You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@commons.apache.org by tn...@apache.org on 2015/02/25 22:49:29 UTC

[01/18] [math] Remove temporary output.

Repository: commons-math
Updated Branches:
  refs/heads/master 3fd9cf175 -> b28255e1b


Remove temporary output.


Project: http://git-wip-us.apache.org/repos/asf/commons-math/repo
Commit: http://git-wip-us.apache.org/repos/asf/commons-math/commit/c22e7fb6
Tree: http://git-wip-us.apache.org/repos/asf/commons-math/tree/c22e7fb6
Diff: http://git-wip-us.apache.org/repos/asf/commons-math/diff/c22e7fb6

Branch: refs/heads/master
Commit: c22e7fb6f9b5df6f5c3ea9d595214d63bc803a6c
Parents: 3fd9cf1
Author: Thomas Neidhart <th...@gmail.com>
Authored: Wed Feb 25 22:20:33 2015 +0100
Committer: Thomas Neidhart <th...@gmail.com>
Committed: Wed Feb 25 22:20:33 2015 +0100

----------------------------------------------------------------------
 .../commons/math4/stat/regression/SimpleRegressionTest.java     | 5 -----
 1 file changed, 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/commons-math/blob/c22e7fb6/src/test/java/org/apache/commons/math4/stat/regression/SimpleRegressionTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/stat/regression/SimpleRegressionTest.java b/src/test/java/org/apache/commons/math4/stat/regression/SimpleRegressionTest.java
index 087f5bc..c31b8c3 100644
--- a/src/test/java/org/apache/commons/math4/stat/regression/SimpleRegressionTest.java
+++ b/src/test/java/org/apache/commons/math4/stat/regression/SimpleRegressionTest.java
@@ -555,14 +555,9 @@ public final class SimpleRegressionTest {
     @Test
     public void testPerfect2() {
         SimpleRegression regression = new SimpleRegression();
-        System.out.println("getXSumSquares()=" + regression.getXSumSquares()); // TODO temp check to see why Jenkins H10 is failing
         regression.addData(0, 0);
-        System.out.println("getXSumSquares()=" + regression.getXSumSquares()); // TODO temp check to see why Jenkins H10 is failing
         regression.addData(1, 1);
-        System.out.println("getXSumSquares()=" + regression.getXSumSquares()); // TODO temp check to see why Jenkins H10 is failing
         regression.addData(2, 2);
-        System.out.println("getXSumSquares()=" + regression.getXSumSquares()); // TODO temp check to see why Jenkins H10 is failing
-        System.out.println("getMeanSquareError()=" + regression.getMeanSquareError()); // TODO temp check to see why Jenkins H10/H11 is failing
         Assert.assertEquals(0.0, regression.getSlopeStdErr(), 0.0);
         Assert.assertEquals(0.0, regression.getSignificance(), Double.MIN_VALUE);
         Assert.assertEquals(1, regression.getRSquare(), Double.MIN_VALUE);


[11/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/fitting/WeightedObservedPoint.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/fitting/WeightedObservedPoint.java b/src/main/java/org/apache/commons/math4/optimization/fitting/WeightedObservedPoint.java
deleted file mode 100644
index 5c2c6d2..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/fitting/WeightedObservedPoint.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.fitting;
-
-import java.io.Serializable;
-
-/** This class is a simple container for weighted observed point in
- * {@link CurveFitter curve fitting}.
- * <p>Instances of this class are guaranteed to be immutable.</p>
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class WeightedObservedPoint implements Serializable {
-
-    /** Serializable version id. */
-    private static final long serialVersionUID = 5306874947404636157L;
-
-    /** Weight of the measurement in the fitting process. */
-    private final double weight;
-
-    /** Abscissa of the point. */
-    private final double x;
-
-    /** Observed value of the function at x. */
-    private final double y;
-
-    /** Simple constructor.
-     * @param weight weight of the measurement in the fitting process
-     * @param x abscissa of the measurement
-     * @param y ordinate of the measurement
-     */
-    public WeightedObservedPoint(final double weight, final double x, final double y) {
-        this.weight = weight;
-        this.x      = x;
-        this.y      = y;
-    }
-
-    /** Get the weight of the measurement in the fitting process.
-     * @return weight of the measurement in the fitting process
-     */
-    public double getWeight() {
-        return weight;
-    }
-
-    /** Get the abscissa of the point.
-     * @return abscissa of the point
-     */
-    public double getX() {
-        return x;
-    }
-
-    /** Get the observed value of the function at x.
-     * @return observed value of the function at x
-     */
-    public double getY() {
-        return y;
-    }
-
-}
-

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/fitting/package-info.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/fitting/package-info.java b/src/main/java/org/apache/commons/math4/optimization/fitting/package-info.java
deleted file mode 100644
index 98683a8..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/fitting/package-info.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *
- * This package provides classes to perform curve fitting.
- *
- * <p>Curve fitting is a special case of a least squares problem
- * were the parameters are the coefficients of a function <code>f</code>
- * whose graph <code>y=f(x)</code> should pass through sample points, and
- * were the objective function is the squared sum of residuals
- * <code>f(x<sub>i</sub>)-y<sub>i</sub></code> for observed points
- * (x<sub>i</sub>, y<sub>i</sub>).</p>
- *
- *
- */
-package org.apache.commons.math4.optimization.fitting;

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/general/AbstractDifferentiableOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/general/AbstractDifferentiableOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/general/AbstractDifferentiableOptimizer.java
deleted file mode 100644
index 1fbcbcb..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/general/AbstractDifferentiableOptimizer.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import org.apache.commons.math4.analysis.MultivariateVectorFunction;
-import org.apache.commons.math4.analysis.differentiation.GradientFunction;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableFunction;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.InitialGuess;
-import org.apache.commons.math4.optimization.OptimizationData;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.direct.BaseAbstractMultivariateOptimizer;
-
-/**
- * Base class for implementing optimizers for multivariate scalar
- * differentiable functions.
- * It contains boiler-plate code for dealing with gradient evaluation.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public abstract class AbstractDifferentiableOptimizer
-    extends BaseAbstractMultivariateOptimizer<MultivariateDifferentiableFunction> {
-    /**
-     * Objective function gradient.
-     */
-    private MultivariateVectorFunction gradient;
-
-    /**
-     * @param checker Convergence checker.
-     */
-    protected AbstractDifferentiableOptimizer(ConvergenceChecker<PointValuePair> checker) {
-        super(checker);
-    }
-
-    /**
-     * Compute the gradient vector.
-     *
-     * @param evaluationPoint Point at which the gradient must be evaluated.
-     * @return the gradient at the specified point.
-     */
-    protected double[] computeObjectiveGradient(final double[] evaluationPoint) {
-        return gradient.value(evaluationPoint);
-    }
-
-    /**
-     * {@inheritDoc}
-     *
-     * @deprecated In 3.1. Please use
-     * {@link #optimizeInternal(int,MultivariateDifferentiableFunction,GoalType,OptimizationData[])}
-     * instead.
-     */
-    @Override@Deprecated
-    protected PointValuePair optimizeInternal(final int maxEval,
-                                              final MultivariateDifferentiableFunction f,
-                                              final GoalType goalType,
-                                              final double[] startPoint) {
-        return optimizeInternal(maxEval, f, goalType, new InitialGuess(startPoint));
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected PointValuePair optimizeInternal(final int maxEval,
-                                              final MultivariateDifferentiableFunction f,
-                                              final GoalType goalType,
-                                              final OptimizationData... optData) {
-        // Store optimization problem characteristics.
-        gradient = new GradientFunction(f);
-
-        // Perform optimization.
-        return super.optimizeInternal(maxEval, f, goalType, optData);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizer.java
deleted file mode 100644
index 3bc9a05..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizer.java
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import org.apache.commons.math4.analysis.DifferentiableMultivariateVectorFunction;
-import org.apache.commons.math4.analysis.FunctionUtils;
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.linear.ArrayRealVector;
-import org.apache.commons.math4.linear.DecompositionSolver;
-import org.apache.commons.math4.linear.DiagonalMatrix;
-import org.apache.commons.math4.linear.EigenDecomposition;
-import org.apache.commons.math4.linear.MatrixUtils;
-import org.apache.commons.math4.linear.QRDecomposition;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.DifferentiableMultivariateVectorOptimizer;
-import org.apache.commons.math4.optimization.InitialGuess;
-import org.apache.commons.math4.optimization.OptimizationData;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.Target;
-import org.apache.commons.math4.optimization.Weight;
-import org.apache.commons.math4.optimization.direct.BaseAbstractMultivariateVectorOptimizer;
-import org.apache.commons.math4.util.FastMath;
-
-/**
- * Base class for implementing least squares optimizers.
- * It handles the boilerplate methods associated to thresholds settings,
- * Jacobian and error estimation.
- * <br/>
- * This class constructs the Jacobian matrix of the function argument in method
- * {@link BaseAbstractMultivariateVectorOptimizer#optimize(int,
- * org.apache.commons.math4.analysis.MultivariateVectorFunction,OptimizationData[])
- * optimize} and assumes that the rows of that matrix iterate on the model
- * functions while the columns iterate on the parameters; thus, the numbers
- * of rows is equal to the dimension of the
- * {@link org.apache.commons.math4.optimization.Target Target} while
- * the number of columns is equal to the dimension of the
- * {@link org.apache.commons.math4.optimization.InitialGuess InitialGuess}.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 1.2
- */
-@Deprecated
-public abstract class AbstractLeastSquaresOptimizer
-    extends BaseAbstractMultivariateVectorOptimizer<DifferentiableMultivariateVectorFunction>
-    implements DifferentiableMultivariateVectorOptimizer {
-    /**
-     * Singularity threshold (cf. {@link #getCovariances(double)}).
-     * @deprecated As of 3.1.
-     */
-    @Deprecated
-    private static final double DEFAULT_SINGULARITY_THRESHOLD = 1e-14;
-    /**
-     * Jacobian matrix of the weighted residuals.
-     * This matrix is in canonical form just after the calls to
-     * {@link #updateJacobian()}, but may be modified by the solver
-     * in the derived class (the {@link LevenbergMarquardtOptimizer
-     * Levenberg-Marquardt optimizer} does this).
-     * @deprecated As of 3.1. To be removed in 4.0. Please use
-     * {@link #computeWeightedJacobian(double[])} instead.
-     */
-    @Deprecated
-    protected double[][] weightedResidualJacobian;
-    /** Number of columns of the jacobian matrix.
-     * @deprecated As of 3.1.
-     */
-    @Deprecated
-    protected int cols;
-    /** Number of rows of the jacobian matrix.
-     * @deprecated As of 3.1.
-     */
-    @Deprecated
-    protected int rows;
-    /** Current point.
-     * @deprecated As of 3.1.
-     */
-    @Deprecated
-    protected double[] point;
-    /** Current objective function value.
-     * @deprecated As of 3.1.
-     */
-    @Deprecated
-    protected double[] objective;
-    /** Weighted residuals
-     * @deprecated As of 3.1.
-     */
-    @Deprecated
-    protected double[] weightedResiduals;
-    /** Cost value (square root of the sum of the residuals).
-     * @deprecated As of 3.1. Field to become "private" in 4.0.
-     * Please use {@link #setCost(double)}.
-     */
-    @Deprecated
-    protected double cost;
-    /** Objective function derivatives. */
-    private MultivariateDifferentiableVectorFunction jF;
-    /** Number of evaluations of the Jacobian. */
-    private int jacobianEvaluations;
-    /** Square-root of the weight matrix. */
-    private RealMatrix weightMatrixSqrt;
-
-    /**
-     * Simple constructor with default settings.
-     * The convergence check is set to a {@link
-     * org.apache.commons.math4.optimization.SimpleVectorValueChecker}.
-     * @deprecated See {@link org.apache.commons.math4.optimization.SimpleValueChecker#SimpleValueChecker()}
-     */
-    @Deprecated
-    protected AbstractLeastSquaresOptimizer() {}
-
-    /**
-     * @param checker Convergence checker.
-     */
-    protected AbstractLeastSquaresOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
-        super(checker);
-    }
-
-    /**
-     * @return the number of evaluations of the Jacobian function.
-     */
-    public int getJacobianEvaluations() {
-        return jacobianEvaluations;
-    }
-
-    /**
-     * Update the jacobian matrix.
-     *
-     * @throws DimensionMismatchException if the Jacobian dimension does not
-     * match problem dimension.
-     * @deprecated As of 3.1. Please use {@link #computeWeightedJacobian(double[])}
-     * instead.
-     */
-    @Deprecated
-    protected void updateJacobian() {
-        final RealMatrix weightedJacobian = computeWeightedJacobian(point);
-        weightedResidualJacobian = weightedJacobian.scalarMultiply(-1).getData();
-    }
-
-    /**
-     * Computes the Jacobian matrix.
-     *
-     * @param params Model parameters at which to compute the Jacobian.
-     * @return the weighted Jacobian: W<sup>1/2</sup> J.
-     * @throws DimensionMismatchException if the Jacobian dimension does not
-     * match problem dimension.
-     * @since 3.1
-     */
-    protected RealMatrix computeWeightedJacobian(double[] params) {
-        ++jacobianEvaluations;
-
-        final DerivativeStructure[] dsPoint = new DerivativeStructure[params.length];
-        final int nC = params.length;
-        for (int i = 0; i < nC; ++i) {
-            dsPoint[i] = new DerivativeStructure(nC, 1, i, params[i]);
-        }
-        final DerivativeStructure[] dsValue = jF.value(dsPoint);
-        final int nR = getTarget().length;
-        if (dsValue.length != nR) {
-            throw new DimensionMismatchException(dsValue.length, nR);
-        }
-        final double[][] jacobianData = new double[nR][nC];
-        for (int i = 0; i < nR; ++i) {
-            int[] orders = new int[nC];
-            for (int j = 0; j < nC; ++j) {
-                orders[j] = 1;
-                jacobianData[i][j] = dsValue[i].getPartialDerivative(orders);
-                orders[j] = 0;
-            }
-        }
-
-        return weightMatrixSqrt.multiply(MatrixUtils.createRealMatrix(jacobianData));
-    }
-
-    /**
-     * Update the residuals array and cost function value.
-     * @throws DimensionMismatchException if the dimension does not match the
-     * problem dimension.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     * @deprecated As of 3.1. Please use {@link #computeResiduals(double[])},
-     * {@link #computeObjectiveValue(double[])}, {@link #computeCost(double[])}
-     * and {@link #setCost(double)} instead.
-     */
-    @Deprecated
-    protected void updateResidualsAndCost() {
-        objective = computeObjectiveValue(point);
-        final double[] res = computeResiduals(objective);
-
-        // Compute cost.
-        cost = computeCost(res);
-
-        // Compute weighted residuals.
-        final ArrayRealVector residuals = new ArrayRealVector(res);
-        weightedResiduals = weightMatrixSqrt.operate(residuals).toArray();
-    }
-
-    /**
-     * Computes the cost.
-     *
-     * @param residuals Residuals.
-     * @return the cost.
-     * @see #computeResiduals(double[])
-     * @since 3.1
-     */
-    protected double computeCost(double[] residuals) {
-        final ArrayRealVector r = new ArrayRealVector(residuals);
-        return FastMath.sqrt(r.dotProduct(getWeight().operate(r)));
-    }
-
-    /**
-     * Get the Root Mean Square value.
-     * Get the Root Mean Square value, i.e. the root of the arithmetic
-     * mean of the square of all weighted residuals. This is related to the
-     * criterion that is minimized by the optimizer as follows: if
-     * <em>c</em> if the criterion, and <em>n</em> is the number of
-     * measurements, then the RMS is <em>sqrt (c/n)</em>.
-     *
-     * @return RMS value
-     */
-    public double getRMS() {
-        return FastMath.sqrt(getChiSquare() / rows);
-    }
-
-    /**
-     * Get a Chi-Square-like value assuming the N residuals follow N
-     * distinct normal distributions centered on 0 and whose variances are
-     * the reciprocal of the weights.
-     * @return chi-square value
-     */
-    public double getChiSquare() {
-        return cost * cost;
-    }
-
-    /**
-     * Gets the square-root of the weight matrix.
-     *
-     * @return the square-root of the weight matrix.
-     * @since 3.1
-     */
-    public RealMatrix getWeightSquareRoot() {
-        return weightMatrixSqrt.copy();
-    }
-
-    /**
-     * Sets the cost.
-     *
-     * @param cost Cost value.
-     * @since 3.1
-     */
-    protected void setCost(double cost) {
-        this.cost = cost;
-    }
-
-    /**
-     * Get the covariance matrix of the optimized parameters.
-     *
-     * @return the covariance matrix.
-     * @throws org.apache.commons.math4.linear.SingularMatrixException
-     * if the covariance matrix cannot be computed (singular problem).
-     * @see #getCovariances(double)
-     * @deprecated As of 3.1. Please use {@link #computeCovariances(double[],double)}
-     * instead.
-     */
-    @Deprecated
-    public double[][] getCovariances() {
-        return getCovariances(DEFAULT_SINGULARITY_THRESHOLD);
-    }
-
-    /**
-     * Get the covariance matrix of the optimized parameters.
-     * <br/>
-     * Note that this operation involves the inversion of the
-     * <code>J<sup>T</sup>J</code> matrix, where {@code J} is the
-     * Jacobian matrix.
-     * The {@code threshold} parameter is a way for the caller to specify
-     * that the result of this computation should be considered meaningless,
-     * and thus trigger an exception.
-     *
-     * @param threshold Singularity threshold.
-     * @return the covariance matrix.
-     * @throws org.apache.commons.math4.linear.SingularMatrixException
-     * if the covariance matrix cannot be computed (singular problem).
-     * @deprecated As of 3.1. Please use {@link #computeCovariances(double[],double)}
-     * instead.
-     */
-    @Deprecated
-    public double[][] getCovariances(double threshold) {
-        return computeCovariances(point, threshold);
-    }
-
-    /**
-     * Get the covariance matrix of the optimized parameters.
-     * <br/>
-     * Note that this operation involves the inversion of the
-     * <code>J<sup>T</sup>J</code> matrix, where {@code J} is the
-     * Jacobian matrix.
-     * The {@code threshold} parameter is a way for the caller to specify
-     * that the result of this computation should be considered meaningless,
-     * and thus trigger an exception.
-     *
-     * @param params Model parameters.
-     * @param threshold Singularity threshold.
-     * @return the covariance matrix.
-     * @throws org.apache.commons.math4.linear.SingularMatrixException
-     * if the covariance matrix cannot be computed (singular problem).
-     * @since 3.1
-     */
-    public double[][] computeCovariances(double[] params,
-                                         double threshold) {
-        // Set up the Jacobian.
-        final RealMatrix j = computeWeightedJacobian(params);
-
-        // Compute transpose(J)J.
-        final RealMatrix jTj = j.transpose().multiply(j);
-
-        // Compute the covariances matrix.
-        final DecompositionSolver solver
-            = new QRDecomposition(jTj, threshold).getSolver();
-        return solver.getInverse().getData();
-    }
-
-    /**
-     * <p>
-     * Returns an estimate of the standard deviation of each parameter. The
-     * returned values are the so-called (asymptotic) standard errors on the
-     * parameters, defined as {@code sd(a[i]) = sqrt(S / (n - m) * C[i][i])},
-     * where {@code a[i]} is the optimized value of the {@code i}-th parameter,
-     * {@code S} is the minimized value of the sum of squares objective function
-     * (as returned by {@link #getChiSquare()}), {@code n} is the number of
-     * observations, {@code m} is the number of parameters and {@code C} is the
-     * covariance matrix.
-     * </p>
-     * <p>
-     * See also
-     * <a href="http://en.wikipedia.org/wiki/Least_squares">Wikipedia</a>,
-     * or
-     * <a href="http://mathworld.wolfram.com/LeastSquaresFitting.html">MathWorld</a>,
-     * equations (34) and (35) for a particular case.
-     * </p>
-     *
-     * @return an estimate of the standard deviation of the optimized parameters
-     * @throws org.apache.commons.math4.linear.SingularMatrixException
-     * if the covariance matrix cannot be computed.
-     * @throws NumberIsTooSmallException if the number of degrees of freedom is not
-     * positive, i.e. the number of measurements is less or equal to the number of
-     * parameters.
-     * @deprecated as of version 3.1, {@link #computeSigma(double[],double)} should be used
-     * instead. It should be emphasized that {@code guessParametersErrors} and
-     * {@code computeSigma} are <em>not</em> strictly equivalent.
-     */
-    @Deprecated
-    public double[] guessParametersErrors() {
-        if (rows <= cols) {
-            throw new NumberIsTooSmallException(LocalizedFormats.NO_DEGREES_OF_FREEDOM,
-                                                rows, cols, false);
-        }
-        double[] errors = new double[cols];
-        final double c = FastMath.sqrt(getChiSquare() / (rows - cols));
-        double[][] covar = computeCovariances(point, 1e-14);
-        for (int i = 0; i < errors.length; ++i) {
-            errors[i] = FastMath.sqrt(covar[i][i]) * c;
-        }
-        return errors;
-    }
-
-    /**
-     * Computes an estimate of the standard deviation of the parameters. The
-     * returned values are the square root of the diagonal coefficients of the
-     * covariance matrix, {@code sd(a[i]) ~= sqrt(C[i][i])}, where {@code a[i]}
-     * is the optimized value of the {@code i}-th parameter, and {@code C} is
-     * the covariance matrix.
-     *
-     * @param params Model parameters.
-     * @param covarianceSingularityThreshold Singularity threshold (see
-     * {@link #computeCovariances(double[],double) computeCovariances}).
-     * @return an estimate of the standard deviation of the optimized parameters
-     * @throws org.apache.commons.math4.linear.SingularMatrixException
-     * if the covariance matrix cannot be computed.
-     * @since 3.1
-     */
-    public double[] computeSigma(double[] params,
-                                 double covarianceSingularityThreshold) {
-        final int nC = params.length;
-        final double[] sig = new double[nC];
-        final double[][] cov = computeCovariances(params, covarianceSingularityThreshold);
-        for (int i = 0; i < nC; ++i) {
-            sig[i] = FastMath.sqrt(cov[i][i]);
-        }
-        return sig;
-    }
-
-    /** {@inheritDoc}
-     * @deprecated As of 3.1. Please use
-     * {@link BaseAbstractMultivariateVectorOptimizer#optimize(int,
-     * org.apache.commons.math4.analysis.MultivariateVectorFunction,OptimizationData[])
-     * optimize(int,MultivariateDifferentiableVectorFunction,OptimizationData...)}
-     * instead.
-     */
-    @Override
-    @Deprecated
-    public PointVectorValuePair optimize(int maxEval,
-                                         final DifferentiableMultivariateVectorFunction f,
-                                         final double[] target, final double[] weights,
-                                         final double[] startPoint) {
-        return optimizeInternal(maxEval,
-                                FunctionUtils.toMultivariateDifferentiableVectorFunction(f),
-                                new Target(target),
-                                new Weight(weights),
-                                new InitialGuess(startPoint));
-    }
-
-    /**
-     * Optimize an objective function.
-     * Optimization is considered to be a weighted least-squares minimization.
-     * The cost function to be minimized is
-     * <code>&sum;weight<sub>i</sub>(objective<sub>i</sub> - target<sub>i</sub>)<sup>2</sup></code>
-     *
-     * @param f Objective function.
-     * @param target Target value for the objective functions at optimum.
-     * @param weights Weights for the least squares cost computation.
-     * @param startPoint Start point for optimization.
-     * @return the point/value pair giving the optimal value for objective
-     * function.
-     * @param maxEval Maximum number of function evaluations.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if the start point dimension is wrong.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     * @throws org.apache.commons.math4.exception.NullArgumentException if
-     * any argument is {@code null}.
-     * @deprecated As of 3.1. Please use
-     * {@link BaseAbstractMultivariateVectorOptimizer#optimize(int,
-     * org.apache.commons.math4.analysis.MultivariateVectorFunction,OptimizationData[])
-     * optimize(int,MultivariateDifferentiableVectorFunction,OptimizationData...)}
-     * instead.
-     */
-    @Deprecated
-    public PointVectorValuePair optimize(final int maxEval,
-                                         final MultivariateDifferentiableVectorFunction f,
-                                         final double[] target, final double[] weights,
-                                         final double[] startPoint) {
-        return optimizeInternal(maxEval, f,
-                                new Target(target),
-                                new Weight(weights),
-                                new InitialGuess(startPoint));
-    }
-
-    /**
-     * Optimize an objective function.
-     * Optimization is considered to be a weighted least-squares minimization.
-     * The cost function to be minimized is
-     * <code>&sum;weight<sub>i</sub>(objective<sub>i</sub> - target<sub>i</sub>)<sup>2</sup></code>
-     *
-     * @param maxEval Allowed number of evaluations of the objective function.
-     * @param f Objective function.
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link Target}</li>
-     *  <li>{@link Weight}</li>
-     *  <li>{@link InitialGuess}</li>
-     * </ul>
-     * @return the point/value pair giving the optimal value of the objective
-     * function.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException if
-     * the maximal number of evaluations is exceeded.
-     * @throws DimensionMismatchException if the target, and weight arguments
-     * have inconsistent dimensions.
-     * @see BaseAbstractMultivariateVectorOptimizer#optimizeInternal(int,
-     * org.apache.commons.math4.analysis.MultivariateVectorFunction,OptimizationData[])
-     * @since 3.1
-     * @deprecated As of 3.1. Override is necessary only until this class's generic
-     * argument is changed to {@code MultivariateDifferentiableVectorFunction}.
-     */
-    @Deprecated
-    protected PointVectorValuePair optimizeInternal(final int maxEval,
-                                                    final MultivariateDifferentiableVectorFunction f,
-                                                    OptimizationData... optData) {
-        // XXX Conversion will be removed when the generic argument of the
-        // base class becomes "MultivariateDifferentiableVectorFunction".
-        return super.optimizeInternal(maxEval, FunctionUtils.toDifferentiableMultivariateVectorFunction(f), optData);
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected void setUp() {
-        super.setUp();
-
-        // Reset counter.
-        jacobianEvaluations = 0;
-
-        // Square-root of the weight matrix.
-        weightMatrixSqrt = squareRoot(getWeight());
-
-        // Store least squares problem characteristics.
-        // XXX The conversion won't be necessary when the generic argument of
-        // the base class becomes "MultivariateDifferentiableVectorFunction".
-        // XXX "jF" is not strictly necessary anymore but is currently more
-        // efficient than converting the value returned from "getObjectiveFunction()"
-        // every time it is used.
-        jF = FunctionUtils.toMultivariateDifferentiableVectorFunction((DifferentiableMultivariateVectorFunction) getObjectiveFunction());
-
-        // Arrays shared with "private" and "protected" methods.
-        point = getStartPoint();
-        rows = getTarget().length;
-        cols = point.length;
-    }
-
-    /**
-     * Computes the residuals.
-     * The residual is the difference between the observed (target)
-     * values and the model (objective function) value.
-     * There is one residual for each element of the vector-valued
-     * function.
-     *
-     * @param objectiveValue Value of the the objective function. This is
-     * the value returned from a call to
-     * {@link #computeObjectiveValue(double[]) computeObjectiveValue}
-     * (whose array argument contains the model parameters).
-     * @return the residuals.
-     * @throws DimensionMismatchException if {@code params} has a wrong
-     * length.
-     * @since 3.1
-     */
-    protected double[] computeResiduals(double[] objectiveValue) {
-        final double[] target = getTarget();
-        if (objectiveValue.length != target.length) {
-            throw new DimensionMismatchException(target.length,
-                                                 objectiveValue.length);
-        }
-
-        final double[] residuals = new double[target.length];
-        for (int i = 0; i < target.length; i++) {
-            residuals[i] = target[i] - objectiveValue[i];
-        }
-
-        return residuals;
-    }
-
-    /**
-     * Computes the square-root of the weight matrix.
-     *
-     * @param m Symmetric, positive-definite (weight) matrix.
-     * @return the square-root of the weight matrix.
-     */
-    private RealMatrix squareRoot(RealMatrix m) {
-        if (m instanceof DiagonalMatrix) {
-            final int dim = m.getRowDimension();
-            final RealMatrix sqrtM = new DiagonalMatrix(dim);
-            for (int i = 0; i < dim; i++) {
-               sqrtM.setEntry(i, i, FastMath.sqrt(m.getEntry(i, i)));
-            }
-            return sqrtM;
-        } else {
-            final EigenDecomposition dec = new EigenDecomposition(m);
-            return dec.getSquareRoot();
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/general/AbstractScalarDifferentiableOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/general/AbstractScalarDifferentiableOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/general/AbstractScalarDifferentiableOptimizer.java
deleted file mode 100644
index 1bb8cc0..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/general/AbstractScalarDifferentiableOptimizer.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import org.apache.commons.math4.analysis.DifferentiableMultivariateFunction;
-import org.apache.commons.math4.analysis.FunctionUtils;
-import org.apache.commons.math4.analysis.MultivariateVectorFunction;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableFunction;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.DifferentiableMultivariateOptimizer;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.direct.BaseAbstractMultivariateOptimizer;
-
-/**
- * Base class for implementing optimizers for multivariate scalar
- * differentiable functions.
- * It contains boiler-plate code for dealing with gradient evaluation.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public abstract class AbstractScalarDifferentiableOptimizer
-    extends BaseAbstractMultivariateOptimizer<DifferentiableMultivariateFunction>
-    implements DifferentiableMultivariateOptimizer {
-    /**
-     * Objective function gradient.
-     */
-    private MultivariateVectorFunction gradient;
-
-    /**
-     * Simple constructor with default settings.
-     * The convergence check is set to a
-     * {@link org.apache.commons.math4.optimization.SimpleValueChecker
-     * SimpleValueChecker}.
-     * @deprecated See {@link org.apache.commons.math4.optimization.SimpleValueChecker#SimpleValueChecker()}
-     */
-    @Deprecated
-    protected AbstractScalarDifferentiableOptimizer() {}
-
-    /**
-     * @param checker Convergence checker.
-     */
-    protected AbstractScalarDifferentiableOptimizer(ConvergenceChecker<PointValuePair> checker) {
-        super(checker);
-    }
-
-    /**
-     * Compute the gradient vector.
-     *
-     * @param evaluationPoint Point at which the gradient must be evaluated.
-     * @return the gradient at the specified point.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the allowed number of evaluations is exceeded.
-     */
-    protected double[] computeObjectiveGradient(final double[] evaluationPoint) {
-        return gradient.value(evaluationPoint);
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected PointValuePair optimizeInternal(int maxEval,
-                                              final DifferentiableMultivariateFunction f,
-                                              final GoalType goalType,
-                                              final double[] startPoint) {
-        // Store optimization problem characteristics.
-        gradient = f.gradient();
-
-        return super.optimizeInternal(maxEval, f, goalType, startPoint);
-    }
-
-    /**
-     * Optimize an objective function.
-     *
-     * @param f Objective function.
-     * @param goalType Type of optimization goal: either
-     * {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}.
-     * @param startPoint Start point for optimization.
-     * @param maxEval Maximum number of function evaluations.
-     * @return the point/value pair giving the optimal value for objective
-     * function.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if the start point dimension is wrong.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     * @throws org.apache.commons.math4.exception.NullArgumentException if
-     * any argument is {@code null}.
-     */
-    public PointValuePair optimize(final int maxEval,
-                                   final MultivariateDifferentiableFunction f,
-                                   final GoalType goalType,
-                                   final double[] startPoint) {
-        return optimizeInternal(maxEval,
-                                FunctionUtils.toDifferentiableMultivariateFunction(f),
-                                goalType,
-                                startPoint);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/general/ConjugateGradientFormula.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/general/ConjugateGradientFormula.java b/src/main/java/org/apache/commons/math4/optimization/general/ConjugateGradientFormula.java
deleted file mode 100644
index fae7419..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/general/ConjugateGradientFormula.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-/**
- * Available choices of update formulas for the &beta; parameter
- * in {@link NonLinearConjugateGradientOptimizer}.
- * <p>
- * The &beta; parameter is used to compute the successive conjugate
- * search directions. For non-linear conjugate gradients, there are
- * two formulas to compute &beta;:
- * <ul>
- *   <li>Fletcher-Reeves formula</li>
- *   <li>Polak-Ribi&egrave;re formula</li>
- * </ul>
- * On the one hand, the Fletcher-Reeves formula is guaranteed to converge
- * if the start point is close enough of the optimum whether the
- * Polak-Ribi&egrave;re formula may not converge in rare cases. On the
- * other hand, the Polak-Ribi&egrave;re formula is often faster when it
- * does converge. Polak-Ribi&egrave;re is often used.
- * <p>
- * @see NonLinearConjugateGradientOptimizer
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public enum ConjugateGradientFormula {
-
-    /** Fletcher-Reeves formula. */
-    FLETCHER_REEVES,
-
-    /** Polak-Ribi&egrave;re formula. */
-    POLAK_RIBIERE
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/general/GaussNewtonOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/general/GaussNewtonOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/general/GaussNewtonOptimizer.java
deleted file mode 100644
index 9a44084..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/general/GaussNewtonOptimizer.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import org.apache.commons.math4.exception.ConvergenceException;
-import org.apache.commons.math4.exception.MathInternalError;
-import org.apache.commons.math4.exception.NullArgumentException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.linear.ArrayRealVector;
-import org.apache.commons.math4.linear.BlockRealMatrix;
-import org.apache.commons.math4.linear.DecompositionSolver;
-import org.apache.commons.math4.linear.LUDecomposition;
-import org.apache.commons.math4.linear.QRDecomposition;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.linear.SingularMatrixException;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.SimpleVectorValueChecker;
-
-/**
- * Gauss-Newton least-squares solver.
- * <p>
- * This class solve a least-square problem by solving the normal equations
- * of the linearized problem at each iteration. Either LU decomposition or
- * QR decomposition can be used to solve the normal equations. LU decomposition
- * is faster but QR decomposition is more robust for difficult problems.
- * </p>
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- *
- */
-@Deprecated
-public class GaussNewtonOptimizer extends AbstractLeastSquaresOptimizer {
-    /** Indicator for using LU decomposition. */
-    private final boolean useLU;
-
-    /**
-     * Simple constructor with default settings.
-     * The normal equations will be solved using LU decomposition and the
-     * convergence check is set to a {@link SimpleVectorValueChecker}
-     * with default tolerances.
-     * @deprecated See {@link SimpleVectorValueChecker#SimpleVectorValueChecker()}
-     */
-    @Deprecated
-    public GaussNewtonOptimizer() {
-        this(true);
-    }
-
-    /**
-     * Simple constructor with default settings.
-     * The normal equations will be solved using LU decomposition.
-     *
-     * @param checker Convergence checker.
-     */
-    public GaussNewtonOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
-        this(true, checker);
-    }
-
-    /**
-     * Simple constructor with default settings.
-     * The convergence check is set to a {@link SimpleVectorValueChecker}
-     * with default tolerances.
-     *
-     * @param useLU If {@code true}, the normal equations will be solved
-     * using LU decomposition, otherwise they will be solved using QR
-     * decomposition.
-     * @deprecated See {@link SimpleVectorValueChecker#SimpleVectorValueChecker()}
-     */
-    @Deprecated
-    public GaussNewtonOptimizer(final boolean useLU) {
-        this(useLU, new SimpleVectorValueChecker());
-    }
-
-    /**
-     * @param useLU If {@code true}, the normal equations will be solved
-     * using LU decomposition, otherwise they will be solved using QR
-     * decomposition.
-     * @param checker Convergence checker.
-     */
-    public GaussNewtonOptimizer(final boolean useLU,
-                                ConvergenceChecker<PointVectorValuePair> checker) {
-        super(checker);
-        this.useLU = useLU;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public PointVectorValuePair doOptimize() {
-        final ConvergenceChecker<PointVectorValuePair> checker
-            = getConvergenceChecker();
-
-        // Computation will be useless without a checker (see "for-loop").
-        if (checker == null) {
-            throw new NullArgumentException();
-        }
-
-        final double[] targetValues = getTarget();
-        final int nR = targetValues.length; // Number of observed data.
-
-        final RealMatrix weightMatrix = getWeight();
-        // Diagonal of the weight matrix.
-        final double[] residualsWeights = new double[nR];
-        for (int i = 0; i < nR; i++) {
-            residualsWeights[i] = weightMatrix.getEntry(i, i);
-        }
-
-        final double[] currentPoint = getStartPoint();
-        final int nC = currentPoint.length;
-
-        // iterate until convergence is reached
-        PointVectorValuePair current = null;
-        int iter = 0;
-        for (boolean converged = false; !converged;) {
-            ++iter;
-
-            // evaluate the objective function and its jacobian
-            PointVectorValuePair previous = current;
-            // Value of the objective function at "currentPoint".
-            final double[] currentObjective = computeObjectiveValue(currentPoint);
-            final double[] currentResiduals = computeResiduals(currentObjective);
-            final RealMatrix weightedJacobian = computeWeightedJacobian(currentPoint);
-            current = new PointVectorValuePair(currentPoint, currentObjective);
-
-            // build the linear problem
-            final double[]   b = new double[nC];
-            final double[][] a = new double[nC][nC];
-            for (int i = 0; i < nR; ++i) {
-
-                final double[] grad   = weightedJacobian.getRow(i);
-                final double weight   = residualsWeights[i];
-                final double residual = currentResiduals[i];
-
-                // compute the normal equation
-                final double wr = weight * residual;
-                for (int j = 0; j < nC; ++j) {
-                    b[j] += wr * grad[j];
-                }
-
-                // build the contribution matrix for measurement i
-                for (int k = 0; k < nC; ++k) {
-                    double[] ak = a[k];
-                    double wgk = weight * grad[k];
-                    for (int l = 0; l < nC; ++l) {
-                        ak[l] += wgk * grad[l];
-                    }
-                }
-            }
-
-            try {
-                // solve the linearized least squares problem
-                RealMatrix mA = new BlockRealMatrix(a);
-                DecompositionSolver solver = useLU ?
-                        new LUDecomposition(mA).getSolver() :
-                        new QRDecomposition(mA).getSolver();
-                final double[] dX = solver.solve(new ArrayRealVector(b, false)).toArray();
-                // update the estimated parameters
-                for (int i = 0; i < nC; ++i) {
-                    currentPoint[i] += dX[i];
-                }
-            } catch (SingularMatrixException e) {
-                throw new ConvergenceException(LocalizedFormats.UNABLE_TO_SOLVE_SINGULAR_PROBLEM);
-            }
-
-            // Check convergence.
-            if (previous != null) {
-                converged = checker.converged(iter, previous, current);
-                if (converged) {
-                    cost = computeCost(currentResiduals);
-                    // Update (deprecated) "point" field.
-                    point = current.getPoint();
-                    return current;
-                }
-            }
-        }
-        // Must never happen.
-        throw new MathInternalError();
-    }
-}


[04/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/NonLinearConjugateGradientOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/NonLinearConjugateGradientOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/general/NonLinearConjugateGradientOptimizerTest.java
deleted file mode 100644
index d9000a8..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/NonLinearConjugateGradientOptimizerTest.java
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import java.io.Serializable;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableFunction;
-import org.apache.commons.math4.analysis.solvers.BrentSolver;
-import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
-import org.apache.commons.math4.linear.BlockRealMatrix;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleValueChecker;
-import org.apache.commons.math4.optimization.general.ConjugateGradientFormula;
-import org.apache.commons.math4.optimization.general.NonLinearConjugateGradientOptimizer;
-import org.apache.commons.math4.optimization.general.Preconditioner;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * <p>Some of the unit tests are re-implementations of the MINPACK <a
- * href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
- * href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
- * The redistribution policy for MINPACK is available <a
- * href="http://www.netlib.org/minpack/disclaimer">here</a>, for
- * convenience, it is reproduced below.</p>
-
- * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
- * <tr><td>
- *    Minpack Copyright Notice (1999) University of Chicago.
- *    All rights reserved
- * </td></tr>
- * <tr><td>
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * <ol>
- *  <li>Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.</li>
- * <li>Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials provided
- *     with the distribution.</li>
- * <li>The end-user documentation included with the redistribution, if any,
- *     must include the following acknowledgment:
- *     <code>This product includes software developed by the University of
- *           Chicago, as Operator of Argonne National Laboratory.</code>
- *     Alternately, this acknowledgment may appear in the software itself,
- *     if and wherever such third-party acknowledgments normally appear.</li>
- * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
- *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
- *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
- *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
- *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
- *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
- *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
- *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
- *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
- *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
- *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
- *     BE CORRECTED.</strong></li>
- * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
- *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
- *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
- *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
- *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
- *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
- *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
- *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
- *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
- *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
- * <ol></td></tr>
- * </table>
-
- * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
- * @author Burton S. Garbow (original fortran minpack tests)
- * @author Kenneth E. Hillstrom (original fortran minpack tests)
- * @author Jorge J. More (original fortran minpack tests)
- * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
- */
-@Deprecated
-public class NonLinearConjugateGradientOptimizerTest {
-    @Test
-    public void testTrivial() {
-        LinearProblem problem =
-            new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-6, 1e-6));
-        PointValuePair optimum =
-            optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 0 });
-        Assert.assertEquals(1.5, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(0.0, optimum.getValue(), 1.0e-10);
-    }
-
-    @Test
-    public void testColumnsPermutation() {
-        LinearProblem problem =
-            new LinearProblem(new double[][] { { 1.0, -1.0 }, { 0.0, 2.0 }, { 1.0, -2.0 } },
-                              new double[] { 4.0, 6.0, 1.0 });
-
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-6, 1e-6));
-        PointValuePair optimum =
-            optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 0, 0 });
-        Assert.assertEquals(7.0, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(3.0, optimum.getPoint()[1], 1.0e-10);
-        Assert.assertEquals(0.0, optimum.getValue(), 1.0e-10);
-
-    }
-
-    @Test
-    public void testNoDependency() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                { 2, 0, 0, 0, 0, 0 },
-                { 0, 2, 0, 0, 0, 0 },
-                { 0, 0, 2, 0, 0, 0 },
-                { 0, 0, 0, 2, 0, 0 },
-                { 0, 0, 0, 0, 2, 0 },
-                { 0, 0, 0, 0, 0, 2 }
-        }, new double[] { 0.0, 1.1, 2.2, 3.3, 4.4, 5.5 });
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-6, 1e-6));
-        PointValuePair optimum =
-            optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 0, 0, 0, 0, 0, 0 });
-        for (int i = 0; i < problem.target.length; ++i) {
-            Assert.assertEquals(0.55 * i, optimum.getPoint()[i], 1.0e-10);
-        }
-    }
-
-    @Test
-    public void testOneSet() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                {  1,  0, 0 },
-                { -1,  1, 0 },
-                {  0, -1, 1 }
-        }, new double[] { 1, 1, 1});
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-6, 1e-6));
-        PointValuePair optimum =
-            optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 0, 0, 0 });
-        Assert.assertEquals(1.0, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(2.0, optimum.getPoint()[1], 1.0e-10);
-        Assert.assertEquals(3.0, optimum.getPoint()[2], 1.0e-10);
-
-    }
-
-    @Test
-    public void testTwoSets() {
-        final double epsilon = 1.0e-7;
-        LinearProblem problem = new LinearProblem(new double[][] {
-                {  2,  1,   0,  4,       0, 0 },
-                { -4, -2,   3, -7,       0, 0 },
-                {  4,  1,  -2,  8,       0, 0 },
-                {  0, -3, -12, -1,       0, 0 },
-                {  0,  0,   0,  0, epsilon, 1 },
-                {  0,  0,   0,  0,       1, 1 }
-        }, new double[] { 2, -9, 2, 2, 1 + epsilon * epsilon, 2});
-
-        final Preconditioner preconditioner
-            = new Preconditioner() {
-                    public double[] precondition(double[] point, double[] r) {
-                        double[] d = r.clone();
-                        d[0] /=  72.0;
-                        d[1] /=  30.0;
-                        d[2] /= 314.0;
-                        d[3] /= 260.0;
-                        d[4] /= 2 * (1 + epsilon * epsilon);
-                        d[5] /= 4.0;
-                        return d;
-                    }
-                };
-
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-13, 1e-13),
-                                                    new BrentSolver(),
-                                                    preconditioner);
-                                                    
-        PointValuePair optimum =
-            optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 0, 0, 0, 0, 0, 0 });
-        Assert.assertEquals( 3.0, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals( 4.0, optimum.getPoint()[1], 1.0e-10);
-        Assert.assertEquals(-1.0, optimum.getPoint()[2], 1.0e-10);
-        Assert.assertEquals(-2.0, optimum.getPoint()[3], 1.0e-10);
-        Assert.assertEquals( 1.0 + epsilon, optimum.getPoint()[4], 1.0e-10);
-        Assert.assertEquals( 1.0 - epsilon, optimum.getPoint()[5], 1.0e-10);
-
-    }
-
-    @Test
-    public void testNonInversible() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                {  1, 2, -3 },
-                {  2, 1,  3 },
-                { -3, 0, -9 }
-        }, new double[] { 1, 1, 1 });
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-6, 1e-6));
-        PointValuePair optimum =
-                optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 0, 0, 0 });
-        Assert.assertTrue(optimum.getValue() > 0.5);
-    }
-
-    @Test
-    public void testIllConditioned() {
-        LinearProblem problem1 = new LinearProblem(new double[][] {
-                { 10.0, 7.0,  8.0,  7.0 },
-                {  7.0, 5.0,  6.0,  5.0 },
-                {  8.0, 6.0, 10.0,  9.0 },
-                {  7.0, 5.0,  9.0, 10.0 }
-        }, new double[] { 32, 23, 33, 31 });
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-13, 1e-13),
-                                                    new BrentSolver(1e-15, 1e-15));
-        PointValuePair optimum1 =
-            optimizer.optimize(200, problem1, GoalType.MINIMIZE, new double[] { 0, 1, 2, 3 });
-        Assert.assertEquals(1.0, optimum1.getPoint()[0], 1.0e-4);
-        Assert.assertEquals(1.0, optimum1.getPoint()[1], 1.0e-4);
-        Assert.assertEquals(1.0, optimum1.getPoint()[2], 1.0e-4);
-        Assert.assertEquals(1.0, optimum1.getPoint()[3], 1.0e-4);
-
-        LinearProblem problem2 = new LinearProblem(new double[][] {
-                { 10.00, 7.00, 8.10, 7.20 },
-                {  7.08, 5.04, 6.00, 5.00 },
-                {  8.00, 5.98, 9.89, 9.00 },
-                {  6.99, 4.99, 9.00, 9.98 }
-        }, new double[] { 32, 23, 33, 31 });
-        PointValuePair optimum2 =
-            optimizer.optimize(200, problem2, GoalType.MINIMIZE, new double[] { 0, 1, 2, 3 });
-        Assert.assertEquals(-81.0, optimum2.getPoint()[0], 1.0e-1);
-        Assert.assertEquals(137.0, optimum2.getPoint()[1], 1.0e-1);
-        Assert.assertEquals(-34.0, optimum2.getPoint()[2], 1.0e-1);
-        Assert.assertEquals( 22.0, optimum2.getPoint()[3], 1.0e-1);
-
-    }
-
-    @Test
-    public void testMoreEstimatedParametersSimple() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                { 3.0, 2.0,  0.0, 0.0 },
-                { 0.0, 1.0, -1.0, 1.0 },
-                { 2.0, 0.0,  1.0, 0.0 }
-        }, new double[] { 7.0, 3.0, 5.0 });
-
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-6, 1e-6));
-        PointValuePair optimum =
-            optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 7, 6, 5, 4 });
-        Assert.assertEquals(0, optimum.getValue(), 1.0e-10);
-
-    }
-
-    @Test
-    public void testMoreEstimatedParametersUnsorted() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                 { 1.0, 1.0,  0.0,  0.0, 0.0,  0.0 },
-                 { 0.0, 0.0,  1.0,  1.0, 1.0,  0.0 },
-                 { 0.0, 0.0,  0.0,  0.0, 1.0, -1.0 },
-                 { 0.0, 0.0, -1.0,  1.0, 0.0,  1.0 },
-                 { 0.0, 0.0,  0.0, -1.0, 1.0,  0.0 }
-        }, new double[] { 3.0, 12.0, -1.0, 7.0, 1.0 });
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-6, 1e-6));
-        PointValuePair optimum =
-            optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 2, 2, 2, 2, 2, 2 });
-        Assert.assertEquals(0, optimum.getValue(), 1.0e-10);
-    }
-
-    @Test
-    public void testRedundantEquations() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                { 1.0,  1.0 },
-                { 1.0, -1.0 },
-                { 1.0,  3.0 }
-        }, new double[] { 3.0, 1.0, 5.0 });
-
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-6, 1e-6));
-        PointValuePair optimum =
-            optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 1, 1 });
-        Assert.assertEquals(2.0, optimum.getPoint()[0], 1.0e-8);
-        Assert.assertEquals(1.0, optimum.getPoint()[1], 1.0e-8);
-
-    }
-
-    @Test
-    public void testInconsistentEquations() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                { 1.0,  1.0 },
-                { 1.0, -1.0 },
-                { 1.0,  3.0 }
-        }, new double[] { 3.0, 1.0, 4.0 });
-
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-6, 1e-6));
-        PointValuePair optimum =
-            optimizer.optimize(100, problem, GoalType.MINIMIZE, new double[] { 1, 1 });
-        Assert.assertTrue(optimum.getValue() > 0.1);
-
-    }
-
-    @Test
-    public void testCircleFitting() {
-        CircleScalar circle = new CircleScalar();
-        circle.addPoint( 30.0,  68.0);
-        circle.addPoint( 50.0,  -6.0);
-        circle.addPoint(110.0, -20.0);
-        circle.addPoint( 35.0,  15.0);
-        circle.addPoint( 45.0,  97.0);
-        NonLinearConjugateGradientOptimizer optimizer =
-            new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                    new SimpleValueChecker(1e-30, 1e-30),
-                                                    new BrentSolver(1e-15, 1e-13));
-        PointValuePair optimum =
-            optimizer.optimize(100, circle, GoalType.MINIMIZE, new double[] { 98.680, 47.345 });
-        Vector2D center = new Vector2D(optimum.getPointRef()[0], optimum.getPointRef()[1]);
-        Assert.assertEquals(69.960161753, circle.getRadius(center), 1.0e-8);
-        Assert.assertEquals(96.075902096, center.getX(), 1.0e-8);
-        Assert.assertEquals(48.135167894, center.getY(), 1.0e-8);
-    }
-
-    private static class LinearProblem implements MultivariateDifferentiableFunction, Serializable {
-
-        private static final long serialVersionUID = 703247177355019415L;
-        final RealMatrix factors;
-        final double[] target;
-        public LinearProblem(double[][] factors, double[] target) {
-            this.factors = new BlockRealMatrix(factors);
-            this.target  = target;
-        }
-
-        public double value(double[] variables) {
-            double[] y = factors.operate(variables);
-            double sum = 0;
-            for (int i = 0; i < y.length; ++i) {
-                double ri = y[i] - target[i];
-                sum += ri * ri;
-            }
-            return sum;
-        }
-
-        public DerivativeStructure value(DerivativeStructure[] variables) {
-            DerivativeStructure[] y = new DerivativeStructure[factors.getRowDimension()];
-            for (int i = 0; i < y.length; ++i) {
-                y[i] = variables[0].getField().getZero();
-                for (int j = 0; j < factors.getColumnDimension(); ++j) {
-                    y[i] = y[i].add(variables[j].multiply(factors.getEntry(i, j)));
-                }
-            }
-
-            DerivativeStructure sum = variables[0].getField().getZero();
-            for (int i = 0; i < y.length; ++i) {
-                DerivativeStructure ri = y[i].subtract(target[i]);
-                sum = sum.add(ri.multiply(ri));
-            }
-            return sum;
-        }
-
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/RandomCirclePointGenerator.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/RandomCirclePointGenerator.java b/src/test/java/org/apache/commons/math4/optimization/general/RandomCirclePointGenerator.java
deleted file mode 100644
index 07ace1f..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/RandomCirclePointGenerator.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import org.apache.commons.math4.distribution.NormalDistribution;
-import org.apache.commons.math4.distribution.RealDistribution;
-import org.apache.commons.math4.distribution.UniformRealDistribution;
-import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
-import org.apache.commons.math4.random.RandomGenerator;
-import org.apache.commons.math4.random.Well44497b;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.MathUtils;
-
-/**
- * Factory for generating a cloud of points that approximate a circle.
- */
-@Deprecated
-public class RandomCirclePointGenerator {
-    /** RNG for the x-coordinate of the center. */
-    private final RealDistribution cX;
-    /** RNG for the y-coordinate of the center. */
-    private final RealDistribution cY;
-    /** RNG for the parametric position of the point. */
-    private final RealDistribution tP;
-    /** Radius of the circle. */
-    private final double radius;
-
-    /**
-     * @param x Abscissa of the circle center.
-     * @param y Ordinate of the circle center.
-     * @param radius Radius of the circle.
-     * @param xSigma Error on the x-coordinate of the circumference points.
-     * @param ySigma Error on the y-coordinate of the circumference points.
-     * @param seed RNG seed.
-     */
-    public RandomCirclePointGenerator(double x,
-                                      double y,
-                                      double radius,
-                                      double xSigma,
-                                      double ySigma,
-                                      long seed) {
-        final RandomGenerator rng = new Well44497b(seed);
-        this.radius = radius;
-        cX = new NormalDistribution(rng, x, xSigma,
-                                    NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
-        cY = new NormalDistribution(rng, y, ySigma,
-                                    NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
-        tP = new UniformRealDistribution(rng, 0, MathUtils.TWO_PI);
-    }
-
-    /**
-     * Point generator.
-     *
-     * @param n Number of points to create.
-     * @return the cloud of {@code n} points.
-     */
-    public Vector2D[] generate(int n) {
-        final Vector2D[] cloud = new Vector2D[n];
-        for (int i = 0; i < n; i++) {
-            cloud[i] = create();
-        }
-        return cloud;
-    }
-
-    /**
-     * Create one point.
-     *
-     * @return a point.
-     */
-    private Vector2D create() {
-        final double t = tP.sample();
-        final double pX = cX.sample() + radius * FastMath.cos(t);
-        final double pY = cY.sample() + radius * FastMath.sin(t);
-
-        return new Vector2D(pX, pY);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/RandomStraightLinePointGenerator.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/RandomStraightLinePointGenerator.java b/src/test/java/org/apache/commons/math4/optimization/general/RandomStraightLinePointGenerator.java
deleted file mode 100644
index e591962..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/RandomStraightLinePointGenerator.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import java.awt.geom.Point2D;
-
-import org.apache.commons.math4.distribution.NormalDistribution;
-import org.apache.commons.math4.distribution.RealDistribution;
-import org.apache.commons.math4.distribution.UniformRealDistribution;
-import org.apache.commons.math4.random.RandomGenerator;
-import org.apache.commons.math4.random.Well44497b;
-
-/**
- * Factory for generating a cloud of points that approximate a straight line.
- */
-@Deprecated
-public class RandomStraightLinePointGenerator {
-    /** Slope. */
-    private final double slope;
-    /** Intercept. */
-    private final double intercept;
-    /** RNG for the x-coordinate. */
-    private final RealDistribution x;
-    /** RNG for the error on the y-coordinate. */
-    private final RealDistribution error;
-
-    /**
-     * The generator will create a cloud of points whose x-coordinates
-     * will be randomly sampled between {@code xLo} and {@code xHi}, and
-     * the corresponding y-coordinates will be computed as
-     * <pre><code>
-     *  y = a x + b + N(0, error)
-     * </code></pre>
-     * where {@code N(mean, sigma)} is a Gaussian distribution with the
-     * given mean and standard deviation.
-     *
-     * @param a Slope.
-     * @param b Intercept.
-     * @param sigma Standard deviation on the y-coordinate of the point.
-     * @param lo Lowest value of the x-coordinate.
-     * @param hi Highest value of the x-coordinate.
-     * @param seed RNG seed.
-     */
-    public RandomStraightLinePointGenerator(double a,
-                                            double b,
-                                            double sigma,
-                                            double lo,
-                                            double hi,
-                                            long seed) {
-        final RandomGenerator rng = new Well44497b(seed);
-        slope = a;
-        intercept = b;
-        error = new NormalDistribution(rng, 0, sigma,
-                                       NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
-        x = new UniformRealDistribution(rng, lo, hi);
-    }
-
-    /**
-     * Point generator.
-     *
-     * @param n Number of points to create.
-     * @return the cloud of {@code n} points.
-     */
-    public Point2D.Double[] generate(int n) {
-        final Point2D.Double[] cloud = new Point2D.Double[n];
-        for (int i = 0; i < n; i++) {
-            cloud[i] = create();
-        }
-        return cloud;
-    }
-
-    /**
-     * Create one point.
-     *
-     * @return a point.
-     */
-    private Point2D.Double create() {
-        final double abscissa = x.sample();
-        final double yModel = slope * abscissa + intercept;
-        final double ordinate = yModel + error.sample();
-
-        return new Point2D.Double(abscissa, ordinate);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/StatisticalReferenceDataset.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/StatisticalReferenceDataset.java b/src/test/java/org/apache/commons/math4/optimization/general/StatisticalReferenceDataset.java
deleted file mode 100644
index 2b7f6ca..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/StatisticalReferenceDataset.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.general;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.util.ArrayList;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.util.MathArrays;
-
-/**
- * This class gives access to the statistical reference datasets provided by the
- * NIST (available
- * <a href="http://www.itl.nist.gov/div898/strd/general/dataarchive.html">here</a>).
- * Instances of this class can be created by invocation of the
- * {@link StatisticalReferenceDatasetFactory}.
- */
-@Deprecated
-public abstract class StatisticalReferenceDataset {
-
-    /** The name of this dataset. */
-    private final String name;
-
-    /** The total number of observations (data points). */
-    private final int numObservations;
-
-    /** The total number of parameters. */
-    private final int numParameters;
-
-    /** The total number of starting points for the optimizations. */
-    private final int numStartingPoints;
-
-    /** The values of the predictor. */
-    private final double[] x;
-
-    /** The values of the response. */
-    private final double[] y;
-
-    /**
-     * The starting values. {@code startingValues[j][i]} is the value of the
-     * {@code i}-th parameter in the {@code j}-th set of starting values.
-     */
-    private final double[][] startingValues;
-
-    /** The certified values of the parameters. */
-    private final double[] a;
-
-    /** The certified values of the standard deviation of the parameters. */
-    private final double[] sigA;
-
-    /** The certified value of the residual sum of squares. */
-    private double residualSumOfSquares;
-
-    /** The least-squares problem. */
-    private final MultivariateDifferentiableVectorFunction problem;
-
-    /**
-     * Creates a new instance of this class from the specified data file. The
-     * file must follow the StRD format.
-     *
-     * @param in the data file
-     * @throws IOException if an I/O error occurs
-     */
-    public StatisticalReferenceDataset(final BufferedReader in)
-        throws IOException {
-
-        final ArrayList<String> lines = new ArrayList<String>();
-        for (String line = in.readLine(); line != null; line = in.readLine()) {
-            lines.add(line);
-        }
-        int[] index = findLineNumbers("Data", lines);
-        if (index == null) {
-            throw new AssertionError("could not find line indices for data");
-        }
-        this.numObservations = index[1] - index[0] + 1;
-        this.x = new double[this.numObservations];
-        this.y = new double[this.numObservations];
-        for (int i = 0; i < this.numObservations; i++) {
-            final String line = lines.get(index[0] + i - 1);
-            final String[] tokens = line.trim().split(" ++");
-            // Data columns are in reverse order!!!
-            this.y[i] = Double.parseDouble(tokens[0]);
-            this.x[i] = Double.parseDouble(tokens[1]);
-        }
-
-        index = findLineNumbers("Starting Values", lines);
-        if (index == null) {
-            throw new AssertionError(
-                                     "could not find line indices for starting values");
-        }
-        this.numParameters = index[1] - index[0] + 1;
-
-        double[][] start = null;
-        this.a = new double[numParameters];
-        this.sigA = new double[numParameters];
-        for (int i = 0; i < numParameters; i++) {
-            final String line = lines.get(index[0] + i - 1);
-            final String[] tokens = line.trim().split(" ++");
-            if (start == null) {
-                start = new double[tokens.length - 4][numParameters];
-            }
-            for (int j = 2; j < tokens.length - 2; j++) {
-                start[j - 2][i] = Double.parseDouble(tokens[j]);
-            }
-            this.a[i] = Double.parseDouble(tokens[tokens.length - 2]);
-            this.sigA[i] = Double.parseDouble(tokens[tokens.length - 1]);
-        }
-        if (start == null) {
-            throw new IOException("could not find starting values");
-        }
-        this.numStartingPoints = start.length;
-        this.startingValues = start;
-
-        double dummyDouble = Double.NaN;
-        String dummyString = null;
-        for (String line : lines) {
-            if (line.contains("Dataset Name:")) {
-                dummyString = line
-                    .substring(line.indexOf("Dataset Name:") + 13,
-                               line.indexOf("(")).trim();
-            }
-            if (line.contains("Residual Sum of Squares")) {
-                final String[] tokens = line.split(" ++");
-                dummyDouble = Double.parseDouble(tokens[4].trim());
-            }
-        }
-        if (Double.isNaN(dummyDouble)) {
-            throw new IOException(
-                                  "could not find certified value of residual sum of squares");
-        }
-        this.residualSumOfSquares = dummyDouble;
-
-        if (dummyString == null) {
-            throw new IOException("could not find dataset name");
-        }
-        this.name = dummyString;
-
-        this.problem = new MultivariateDifferentiableVectorFunction() {
-
-            public double[] value(final double[] a) {
-                DerivativeStructure[] dsA = new DerivativeStructure[a.length];
-                for (int i = 0; i < a.length; ++i) {
-                    dsA[i] = new DerivativeStructure(a.length, 0, a[i]);
-                }
-                final int n = getNumObservations();
-                final double[] yhat = new double[n];
-                for (int i = 0; i < n; i++) {
-                    yhat[i] = getModelValue(getX(i), dsA).getValue();
-                }
-                return yhat;
-            }
-
-            public DerivativeStructure[] value(final DerivativeStructure[] a) {
-                final int n = getNumObservations();
-                final DerivativeStructure[] yhat = new DerivativeStructure[n];
-                for (int i = 0; i < n; i++) {
-                    yhat[i] = getModelValue(getX(i), a);
-                }
-                return yhat;
-            }
-
-        };
-    }
-
-    /**
-     * Returns the name of this dataset.
-     *
-     * @return the name of the dataset
-     */
-    public String getName() {
-        return name;
-    }
-
-    /**
-     * Returns the total number of observations (data points).
-     *
-     * @return the number of observations
-     */
-    public int getNumObservations() {
-        return numObservations;
-    }
-
-    /**
-     * Returns a copy of the data arrays. The data is laid out as follows <li>
-     * {@code data[0][i] = x[i]},</li> <li>{@code data[1][i] = y[i]},</li>
-     *
-     * @return the array of data points.
-     */
-    public double[][] getData() {
-        return new double[][] {
-            MathArrays.copyOf(x), MathArrays.copyOf(y)
-        };
-    }
-
-    /**
-     * Returns the x-value of the {@code i}-th data point.
-     *
-     * @param i the index of the data point
-     * @return the x-value
-     */
-    public double getX(final int i) {
-        return x[i];
-    }
-
-    /**
-     * Returns the y-value of the {@code i}-th data point.
-     *
-     * @param i the index of the data point
-     * @return the y-value
-     */
-    public double getY(final int i) {
-        return y[i];
-    }
-
-    /**
-     * Returns the total number of parameters.
-     *
-     * @return the number of parameters
-     */
-    public int getNumParameters() {
-        return numParameters;
-    }
-
-    /**
-     * Returns the certified values of the paramters.
-     *
-     * @return the values of the parameters
-     */
-    public double[] getParameters() {
-        return MathArrays.copyOf(a);
-    }
-
-    /**
-     * Returns the certified value of the {@code i}-th parameter.
-     *
-     * @param i the index of the parameter
-     * @return the value of the parameter
-     */
-    public double getParameter(final int i) {
-        return a[i];
-    }
-
-    /**
-     * Reurns the certified values of the standard deviations of the parameters.
-     *
-     * @return the standard deviations of the parameters
-     */
-    public double[] getParametersStandardDeviations() {
-        return MathArrays.copyOf(sigA);
-    }
-
-    /**
-     * Returns the certified value of the standard deviation of the {@code i}-th
-     * parameter.
-     *
-     * @param i the index of the parameter
-     * @return the standard deviation of the parameter
-     */
-    public double getParameterStandardDeviation(final int i) {
-        return sigA[i];
-    }
-
-    /**
-     * Returns the certified value of the residual sum of squares.
-     *
-     * @return the residual sum of squares
-     */
-    public double getResidualSumOfSquares() {
-        return residualSumOfSquares;
-    }
-
-    /**
-     * Returns the total number of starting points (initial guesses for the
-     * optimization process).
-     *
-     * @return the number of starting points
-     */
-    public int getNumStartingPoints() {
-        return numStartingPoints;
-    }
-
-    /**
-     * Returns the {@code i}-th set of initial values of the parameters.
-     *
-     * @param i the index of the starting point
-     * @return the starting point
-     */
-    public double[] getStartingPoint(final int i) {
-        return MathArrays.copyOf(startingValues[i]);
-    }
-
-    /**
-     * Returns the least-squares problem corresponding to fitting the model to
-     * the specified data.
-     *
-     * @return the least-squares problem
-     */
-    public MultivariateDifferentiableVectorFunction getLeastSquaresProblem() {
-        return problem;
-    }
-
-    /**
-     * Returns the value of the model for the specified values of the predictor
-     * variable and the parameters.
-     *
-     * @param x the predictor variable
-     * @param a the parameters
-     * @return the value of the model
-     */
-    public abstract DerivativeStructure getModelValue(final double x, final DerivativeStructure[] a);
-
-    /**
-     * <p>
-     * Parses the specified text lines, and extracts the indices of the first
-     * and last lines of the data defined by the specified {@code key}. This key
-     * must be one of
-     * </p>
-     * <ul>
-     * <li>{@code "Starting Values"},</li>
-     * <li>{@code "Certified Values"},</li>
-     * <li>{@code "Data"}.</li>
-     * </ul>
-     * <p>
-     * In the NIST data files, the line indices are separated by the keywords
-     * {@code "lines"} and {@code "to"}.
-     * </p>
-     *
-     * @param lines the line of text to be parsed
-     * @return an array of two {@code int}s. First value is the index of the
-     *         first line, second value is the index of the last line.
-     *         {@code null} if the line could not be parsed.
-     */
-    private static int[] findLineNumbers(final String key,
-                                         final Iterable<String> lines) {
-        for (String text : lines) {
-            boolean flag = text.contains(key) && text.contains("lines") &&
-                           text.contains("to") && text.contains(")");
-            if (flag) {
-                final int[] numbers = new int[2];
-                final String from = text.substring(text.indexOf("lines") + 5,
-                                                   text.indexOf("to"));
-                numbers[0] = Integer.parseInt(from.trim());
-                final String to = text.substring(text.indexOf("to") + 2,
-                                                 text.indexOf(")"));
-                numbers[1] = Integer.parseInt(to.trim());
-                return numbers;
-            }
-        }
-        return null;
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/StatisticalReferenceDatasetFactory.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/StatisticalReferenceDatasetFactory.java b/src/test/java/org/apache/commons/math4/optimization/general/StatisticalReferenceDatasetFactory.java
deleted file mode 100644
index f7fa021..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/StatisticalReferenceDatasetFactory.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.general;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-
-/**
- * A factory to create instances of {@link StatisticalReferenceDataset} from
- * available resources.
- */
-@Deprecated
-public class StatisticalReferenceDatasetFactory {
-
-    private StatisticalReferenceDatasetFactory() {
-        // Do nothing
-    }
-
-    /**
-     * Creates a new buffered reader from the specified resource name.
-     *
-     * @param name the name of the resource
-     * @return a buffered reader
-     * @throws IOException if an I/O error occurred
-     */
-    public static BufferedReader createBufferedReaderFromResource(final String name)
-        throws IOException {
-        final InputStream resourceAsStream;
-        resourceAsStream = StatisticalReferenceDatasetFactory.class
-            .getResourceAsStream(name);
-        if (resourceAsStream == null) {
-            throw new IOException("could not find resource " + name);
-        }
-        return new BufferedReader(new InputStreamReader(resourceAsStream));
-    }
-
-    public static StatisticalReferenceDataset createKirby2()
-        throws IOException {
-        final BufferedReader in = createBufferedReaderFromResource("Kirby2.dat");
-        StatisticalReferenceDataset dataset = null;
-        try {
-            dataset = new StatisticalReferenceDataset(in) {
-
-                @Override
-                public DerivativeStructure getModelValue(final double x, final DerivativeStructure[] a) {
-                    final DerivativeStructure p = a[0].add(a[1].add(a[2].multiply(x)).multiply(x));
-                    final DerivativeStructure q = a[3].add(a[4].multiply(x)).multiply(x).add(1.0);
-                    return p.divide(q);
-                }
-
-            };
-        } finally {
-            in.close();
-        }
-        return dataset;
-    }
-
-    public static StatisticalReferenceDataset createHahn1()
-        throws IOException {
-        final BufferedReader in = createBufferedReaderFromResource("Hahn1.dat");
-        StatisticalReferenceDataset dataset = null;
-        try {
-            dataset = new StatisticalReferenceDataset(in) {
-
-                @Override
-                public DerivativeStructure getModelValue(final double x, final DerivativeStructure[] a) {
-                    final DerivativeStructure p = a[0].add(a[1].add(a[2].add(a[3].multiply(x)).multiply(x)).multiply(x));
-                    final DerivativeStructure q = a[4].add(a[5].add(a[6].multiply(x)).multiply(x)).multiply(x).add(1.0);
-                    return p.divide(q);
-                }
-
-            };
-        } finally {
-            in.close();
-        }
-        return dataset;
-    }
-
-    public static StatisticalReferenceDataset createMGH17()
-        throws IOException {
-        final BufferedReader in = createBufferedReaderFromResource("MGH17.dat");
-        StatisticalReferenceDataset dataset = null;
-        try {
-            dataset = new StatisticalReferenceDataset(in) {
-
-                @Override
-                public DerivativeStructure getModelValue(final double x, final DerivativeStructure[] a) {
-                    return a[0].add(a[1].multiply(a[3].multiply(-x).exp())).add(a[2].multiply(a[4].multiply(-x).exp()));
-                }
-
-            };
-        } finally {
-            in.close();
-        }
-        return dataset;
-    }
-
-    public static StatisticalReferenceDataset createLanczos1()
-        throws IOException {
-        final BufferedReader in =
-            createBufferedReaderFromResource("Lanczos1.dat");
-        StatisticalReferenceDataset dataset = null;
-        try {
-            dataset = new StatisticalReferenceDataset(in) {
-
-                @Override
-                public DerivativeStructure getModelValue(final double x, final DerivativeStructure[] a) {
-                    return a[0].multiply(a[3].multiply(-x).exp()).add(
-                                a[1].multiply(a[4].multiply(-x).exp())).add(
-                                     a[2].multiply(a[5].multiply(-x).exp()));
-                }
-
-            };
-        } finally {
-            in.close();
-        }
-        return dataset;
-    }
-
-    /**
-     * Returns an array with all available reference datasets.
-     *
-     * @return the array of datasets
-     * @throws IOException if an I/O error occurs
-     */
-    public StatisticalReferenceDataset[] createAll()
-        throws IOException {
-        return new StatisticalReferenceDataset[] {
-            createKirby2(), createMGH17()
-        };
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/StraightLineProblem.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/StraightLineProblem.java b/src/test/java/org/apache/commons/math4/optimization/general/StraightLineProblem.java
deleted file mode 100644
index a81da4c..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/StraightLineProblem.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import java.util.ArrayList;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.analysis.differentiation.UnivariateDifferentiableFunction;
-import org.apache.commons.math4.stat.regression.SimpleRegression;
-
-/**
- * Class that models a straight line defined as {@code y = a x + b}.
- * The parameters of problem are:
- * <ul>
- *  <li>{@code a}</li>
- *  <li>{@code b}</li>
- * </ul>
- * The model functions are:
- * <ul>
- *  <li>for each pair (a, b), the y-coordinate of the line.</li>
- * </ul>
- */
-@Deprecated
-class StraightLineProblem implements MultivariateDifferentiableVectorFunction {
-    /** Cloud of points assumed to be fitted by a straight line. */
-    private final ArrayList<double[]> points;
-    /** Error (on the y-coordinate of the points). */
-    private final double sigma;
-
-    /**
-     * @param error Assumed error for the y-coordinate.
-     */
-    public StraightLineProblem(double error) {
-        points = new ArrayList<double[]>();
-        sigma = error;
-    }
-
-    public void addPoint(double px, double py) {
-        points.add(new double[] { px, py });
-    }
-
-    /**
-     * @return the list of x-coordinates.
-     */
-    public double[] x() {
-        final double[] v = new double[points.size()];
-        for (int i = 0; i < points.size(); i++) {
-            final double[] p = points.get(i);
-            v[i] = p[0]; // x-coordinate.
-        }
-
-        return v;
-    }
-
-    /**
-     * @return the list of y-coordinates.
-     */
-    public double[] y() {
-        final double[] v = new double[points.size()];
-        for (int i = 0; i < points.size(); i++) {
-            final double[] p = points.get(i);
-            v[i] = p[1]; // y-coordinate.
-        }
-
-        return v;
-    }
-
-    public double[] target() {
-        return y();
-    }
-
-    public double[] weight() {
-        final double weight = 1 / (sigma * sigma);
-        final double[] w = new double[points.size()];
-        for (int i = 0; i < points.size(); i++) {
-            w[i] = weight;
-        }
-
-        return w;
-    }
-
-    public double[] value(double[] params) {
-        final Model line = new Model(new DerivativeStructure(0, 0, params[0]),
-                                     new DerivativeStructure(0, 0, params[1]));
-
-        final double[] model = new double[points.size()];
-        for (int i = 0; i < points.size(); i++) {
-            final double[] p = points.get(i);
-            model[i] = line.value(p[0]);
-        }
-
-        return model;
-    }
-
-    public DerivativeStructure[] value(DerivativeStructure[] params) {
-        final Model line = new Model(params[0], params[1]);
-
-        final DerivativeStructure[] model = new DerivativeStructure[points.size()];
-        for (int i = 0; i < points.size(); i++) {
-            final DerivativeStructure p0 = params[0].getField().getZero().add(points.get(i)[0]);
-            model[i] = line.value(p0);
-        }
-
-        return model;
-    }
-
-    /**
-     * Directly solve the linear problem, using the {@link SimpleRegression}
-     * class.
-     */
-    public double[] solve() {
-        final SimpleRegression regress = new SimpleRegression(true);
-        for (double[] d : points) {
-            regress.addData(d[0], d[1]);
-        }
-
-        final double[] result = { regress.getSlope(), regress.getIntercept() };
-        return result;
-    }
-
-    /**
-     * Linear function.
-     */
-    public static class Model implements UnivariateDifferentiableFunction {
-        final DerivativeStructure a;
-        final DerivativeStructure b;
-
-        public Model(DerivativeStructure a,
-                     DerivativeStructure b) {
-            this.a = a;
-            this.b = b;
-        }
-
-        public double value(double x) {
-            return a.getValue() * x + b.getValue();
-        }
-
-        public DerivativeStructure value(DerivativeStructure x) {
-            return x.multiply(a).add(b);
-        }
-
-    }
-}


[17/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
Remove deprecated optimization package.


Project: http://git-wip-us.apache.org/repos/asf/commons-math/repo
Commit: http://git-wip-us.apache.org/repos/asf/commons-math/commit/b4669aad
Tree: http://git-wip-us.apache.org/repos/asf/commons-math/tree/b4669aad
Diff: http://git-wip-us.apache.org/repos/asf/commons-math/diff/b4669aad

Branch: refs/heads/master
Commit: b4669aad3f2185894db7d4fb84cbcc311c32e34d
Parents: 35b688b
Author: Thomas Neidhart <th...@gmail.com>
Authored: Wed Feb 25 22:34:53 2015 +0100
Committer: Thomas Neidhart <th...@gmail.com>
Committed: Wed Feb 25 22:34:53 2015 +0100

----------------------------------------------------------------------
 findbugs-exclude-filter.xml                     |   57 +-
 .../AbstractConvergenceChecker.java             |  102 -
 .../BaseMultivariateMultiStartOptimizer.java    |  192 --
 .../optimization/BaseMultivariateOptimizer.java |   61 -
 .../BaseMultivariateSimpleBoundsOptimizer.java  |   65 -
 ...seMultivariateVectorMultiStartOptimizer.java |  204 --
 .../BaseMultivariateVectorOptimizer.java        |   63 -
 .../math4/optimization/BaseOptimizer.java       |   61 -
 .../math4/optimization/ConvergenceChecker.java  |   57 -
 ...entiableMultivariateMultiStartOptimizer.java |   52 -
 .../DifferentiableMultivariateOptimizer.java    |   37 -
 ...leMultivariateVectorMultiStartOptimizer.java |   53 -
 ...fferentiableMultivariateVectorOptimizer.java |   32 -
 .../commons/math4/optimization/GoalType.java    |   37 -
 .../math4/optimization/InitialGuess.java        |   48 -
 .../optimization/LeastSquaresConverter.java     |  182 --
 ...ariateDifferentiableMultiStartOptimizer.java |   52 -
 .../MultivariateDifferentiableOptimizer.java    |   37 -
 ...DifferentiableVectorMultiStartOptimizer.java |   53 -
 ...ltivariateDifferentiableVectorOptimizer.java |   32 -
 .../MultivariateMultiStartOptimizer.java        |   52 -
 .../optimization/MultivariateOptimizer.java     |   35 -
 .../math4/optimization/OptimizationData.java    |   30 -
 .../math4/optimization/PointValuePair.java      |  128 -
 .../optimization/PointVectorValuePair.java      |  151 --
 .../math4/optimization/SimpleBounds.java        |   63 -
 .../math4/optimization/SimplePointChecker.java  |  145 --
 .../math4/optimization/SimpleValueChecker.java  |  136 -
 .../optimization/SimpleVectorValueChecker.java  |  145 --
 .../commons/math4/optimization/Target.java      |   50 -
 .../commons/math4/optimization/Weight.java      |   68 -
 .../optimization/direct/AbstractSimplex.java    |  347 ---
 .../optimization/direct/BOBYQAOptimizer.java    | 2465 ------------------
 .../BaseAbstractMultivariateOptimizer.java      |  318 ---
 ...stractMultivariateSimpleBoundsOptimizer.java |   82 -
 ...BaseAbstractMultivariateVectorOptimizer.java |  370 ---
 .../optimization/direct/CMAESOptimizer.java     | 1441 ----------
 .../direct/MultiDirectionalSimplex.java         |  218 --
 .../MultivariateFunctionMappingAdapter.java     |  301 ---
 .../MultivariateFunctionPenaltyAdapter.java     |  190 --
 .../optimization/direct/NelderMeadSimplex.java  |  283 --
 .../optimization/direct/PowellOptimizer.java    |  352 ---
 .../optimization/direct/SimplexOptimizer.java   |  233 --
 .../math4/optimization/direct/package-info.java |   24 -
 .../math4/optimization/fitting/CurveFitter.java |  298 ---
 .../optimization/fitting/GaussianFitter.java    |  365 ---
 .../optimization/fitting/HarmonicFitter.java    |  384 ---
 .../optimization/fitting/PolynomialFitter.java  |  111 -
 .../fitting/WeightedObservedPoint.java          |   76 -
 .../optimization/fitting/package-info.java      |   30 -
 .../AbstractDifferentiableOptimizer.java        |   90 -
 .../general/AbstractLeastSquaresOptimizer.java  |  577 ----
 .../AbstractScalarDifferentiableOptimizer.java  |  114 -
 .../general/ConjugateGradientFormula.java       |   50 -
 .../general/GaussNewtonOptimizer.java           |  194 --
 .../general/LevenbergMarquardtOptimizer.java    |  943 -------
 .../NonLinearConjugateGradientOptimizer.java    |  311 ---
 .../optimization/general/Preconditioner.java    |   46 -
 .../optimization/general/package-info.java      |   22 -
 .../linear/AbstractLinearOptimizer.java         |  162 --
 .../optimization/linear/LinearConstraint.java   |  234 --
 .../linear/LinearObjectiveFunction.java         |  148 --
 .../optimization/linear/LinearOptimizer.java    |   92 -
 .../linear/NoFeasibleSolutionException.java     |   42 -
 .../math4/optimization/linear/Relationship.java |   67 -
 .../optimization/linear/SimplexSolver.java      |  238 --
 .../optimization/linear/SimplexTableau.java     |  635 -----
 .../linear/UnboundedSolutionException.java      |   42 -
 .../math4/optimization/linear/package-info.java |   22 -
 .../math4/optimization/package-info.java        |   79 -
 .../BaseAbstractUnivariateOptimizer.java        |  162 --
 .../univariate/BaseUnivariateOptimizer.java     |   86 -
 .../optimization/univariate/BracketFinder.java  |  289 --
 .../optimization/univariate/BrentOptimizer.java |  316 ---
 .../SimpleUnivariateValueChecker.java           |  139 -
 .../UnivariateMultiStartOptimizer.java          |  202 --
 .../univariate/UnivariateOptimizer.java         |   29 -
 .../univariate/UnivariatePointValuePair.java    |   68 -
 .../optimization/univariate/package-info.java   |   22 -
 ...teDifferentiableMultiStartOptimizerTest.java |  100 -
 ...erentiableVectorMultiStartOptimizerTest.java |  246 --
 .../MultivariateMultiStartOptimizerTest.java    |   79 -
 .../math4/optimization/PointValuePairTest.java  |   40 -
 .../optimization/PointVectorValuePairTest.java  |   44 -
 .../optimization/SimplePointCheckerTest.java    |   57 -
 .../optimization/SimpleValueCheckerTest.java    |   55 -
 .../SimpleVectorValueCheckerTest.java           |   57 -
 .../direct/BOBYQAOptimizerTest.java             |  631 -----
 .../optimization/direct/CMAESOptimizerTest.java |  761 ------
 .../MultivariateFunctionMappingAdapterTest.java |  194 --
 .../MultivariateFunctionPenaltyAdapterTest.java |  196 --
 .../direct/PowellOptimizerTest.java             |  239 --
 .../SimplexOptimizerMultiDirectionalTest.java   |  207 --
 .../direct/SimplexOptimizerNelderMeadTest.java  |  268 --
 .../optimization/fitting/CurveFitterTest.java   |  154 --
 .../fitting/GaussianFitterTest.java             |  365 ---
 .../fitting/HarmonicFitterTest.java             |  203 --
 .../fitting/PolynomialFitterTest.java           |  288 --
 ...stractLeastSquaresOptimizerAbstractTest.java |  524 ----
 .../AbstractLeastSquaresOptimizerTest.java      |  100 -
 ...ractLeastSquaresOptimizerTestValidation.java |  322 ---
 .../optimization/general/CircleProblem.java     |  139 -
 .../optimization/general/CircleScalar.java      |   89 -
 .../optimization/general/CircleVectorial.java   |   91 -
 .../general/GaussNewtonOptimizerTest.java       |  154 --
 .../LevenbergMarquardtOptimizerTest.java        |  388 ---
 .../math4/optimization/general/MinpackTest.java | 1212 ---------
 ...NonLinearConjugateGradientOptimizerTest.java |  388 ---
 .../general/RandomCirclePointGenerator.java     |   92 -
 .../RandomStraightLinePointGenerator.java       |   99 -
 .../general/StatisticalReferenceDataset.java    |  367 ---
 .../StatisticalReferenceDatasetFactory.java     |  150 --
 .../general/StraightLineProblem.java            |  159 --
 .../optimization/linear/SimplexSolverTest.java  |  646 -----
 .../optimization/linear/SimplexTableauTest.java |  116 -
 .../univariate/BracketFinderTest.java           |  119 -
 .../univariate/BrentOptimizerTest.java          |  256 --
 .../SimpleUnivariateValueCheckerTest.java       |   55 -
 .../UnivariateMultiStartOptimizerTest.java      |  111 -
 119 files changed, 2 insertions(+), 25548 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/findbugs-exclude-filter.xml
----------------------------------------------------------------------
diff --git a/findbugs-exclude-filter.xml b/findbugs-exclude-filter.xml
index db03c27..a99e76f 100644
--- a/findbugs-exclude-filter.xml
+++ b/findbugs-exclude-filter.xml
@@ -39,11 +39,6 @@
     <Bug pattern="FE_FLOATING_POINT_EQUALITY" />
   </Match>
   <Match>
-    <Class name="org.apache.commons.math4.optimization.univariate.BrentOptimizer" />
-    <Method name="localMin" params="boolean,double,double,double,double,double" returns="double" />
-    <Bug pattern="FE_FLOATING_POINT_EQUALITY" />
-  </Match>
-  <Match>
     <Class name="org.apache.commons.math4.optim.univariate.BrentOptimizer" />
     <Method name="localMin" params="boolean,double,double,double,double,double" returns="double" />
     <Bug pattern="FE_FLOATING_POINT_EQUALITY" />
@@ -73,10 +68,7 @@
     <Bug pattern="FE_FLOATING_POINT_EQUALITY" />
   </Match>
   <Match>
-    <Or>
-      <Class name="org.apache.commons.math4.optim.nonlinear.scalar.noderiv.BOBYQAOptimizer" />
-      <Class name="org.apache.commons.math4.optimization.direct.BOBYQAOptimizer" />
-    </Or>
+    <Class name="org.apache.commons.math4.optim.nonlinear.scalar.noderiv.BOBYQAOptimizer" />
     <Method name="altmov" params="int,double" returns="double[]" />
     <Bug pattern="FE_FLOATING_POINT_EQUALITY" />
   </Match>
@@ -97,11 +89,6 @@
     <Bug pattern="FE_FLOATING_POINT_EQUALITY" />
   </Match>
   <Match>
-    <Class name="org.apache.commons.math4.optimization.linear.LinearConstraint" />
-    <Method name="equals" params="java.lang.Object" returns="boolean" />
-    <Bug pattern="FE_FLOATING_POINT_EQUALITY" />
-  </Match>
-  <Match>
     <Class name="org.apache.commons.math4.optim.linear.LinearConstraint" />
     <Method name="equals" params="java.lang.Object" returns="boolean" />
     <Bug pattern="FE_FLOATING_POINT_EQUALITY" />
@@ -133,10 +120,7 @@
        In the original code, this is sequential and fall-through is expected
    -->
   <Match>
-    <Or>
-      <Class name="org.apache.commons.math4.optim.nonlinear.scalar.noderiv.BOBYQAOptimizer" />
-      <Class name="org.apache.commons.math4.optimization.direct.BOBYQAOptimizer" />
-    </Or>
+    <Class name="org.apache.commons.math4.optim.nonlinear.scalar.noderiv.BOBYQAOptimizer" />
     <Or>
       <Method name="bobyqb" params="double[],double[]" returns="double" />
       <Method name="trsbox" />
@@ -144,18 +128,6 @@
     <Bug pattern="SF_SWITCH_FALLTHROUGH" />
   </Match>
   
-  <!-- Spurious: The fields are deprecated and not used anymore
-       (to be removed in 4.0)
-    -->
-  <Match>
-    <Class name="org.apache.commons.math4.optimization.general.AbstractLeastSquaresOptimizer" />
-    <Or>
-      <Field name="weightedResidualJacobian" />
-      <Field name="weightedResiduals" />
-      </Or>
-    <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
-  </Match>
-
   <!-- Spurious: Findbugs confused by final local variables -->
   <Match>
     <Class name="org.apache.commons.math4.util.FastMath" />
@@ -175,21 +147,11 @@
     <Bug pattern="EI_EXPOSE_REP" />
   </Match>
   <Match>
-    <Class name="org.apache.commons.math4.optimization.PointValuePair"/>
-    <Method name="getPointRef" params="" returns="double[]" />
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-  <Match>
     <Class name="org.apache.commons.math4.optim.PointValuePair"/>
     <Method name="getPointRef" params="" returns="double[]" />
     <Bug pattern="EI_EXPOSE_REP" />
   </Match>
   <Match>
-    <Class name="org.apache.commons.math4.optimization.PointValuePair"/>
-    <Method name="&lt;init>" params="double[],double,boolean" returns="void" />
-    <Bug pattern="EI_EXPOSE_REP2" />
-  </Match>
-  <Match>
     <Class name="org.apache.commons.math4.optim.PointValuePair"/>
     <Method name="&lt;init>" params="double[],double,boolean" returns="void" />
     <Bug pattern="EI_EXPOSE_REP2" />
@@ -199,20 +161,10 @@
     <Or>
       <Class name="org.apache.commons.math4.optim.PointValuePair"/>
       <Class name="org.apache.commons.math4.optim.PointVectorValuePair"/>
-      <Class name="org.apache.commons.math4.optimization.PointValuePair"/>
-      <Class name="org.apache.commons.math4.optimization.PointVectorValuePair"/>      
     </Or>
     <Bug pattern="SE_NO_SUITABLE_CONSTRUCTOR" />
   </Match>
   <Match>
-    <Class name="org.apache.commons.math4.optimization.PointVectorValuePair"/>
-    <Or>
-      <Method name="getPointRef" params="" returns="double[]" />
-      <Method name="getValueRef" params="" returns="double[]" />
-    </Or>
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-  <Match>
     <Class name="org.apache.commons.math4.optim.PointVectorValuePair"/>
     <Or>
       <Method name="getPointRef" params="" returns="double[]" />
@@ -221,11 +173,6 @@
     <Bug pattern="EI_EXPOSE_REP" />
   </Match>
   <Match>
-    <Class name="org.apache.commons.math4.optimization.PointVectorValuePair"/>
-    <Method name="&lt;init>" params="double[],double[][],boolean" returns="void" />
-    <Bug pattern="EI_EXPOSE_REP2" />
-  </Match>
-  <Match>
     <Class name="org.apache.commons.math4.optim.PointVectorValuePair"/>
     <Method name="&lt;init>" params="double[],double[][],boolean" returns="void" />
     <Bug pattern="EI_EXPOSE_REP2" />

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/AbstractConvergenceChecker.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/AbstractConvergenceChecker.java b/src/main/java/org/apache/commons/math4/optimization/AbstractConvergenceChecker.java
deleted file mode 100644
index 9f57533..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/AbstractConvergenceChecker.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.util.Precision;
-
-/**
- * Base class for all convergence checker implementations.
- *
- * @param <PAIR> Type of (point, value) pair.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public abstract class AbstractConvergenceChecker<PAIR>
-    implements ConvergenceChecker<PAIR> {
-    /**
-     * Default relative threshold.
-     * @deprecated in 3.1 (to be removed in 4.0) because this value is too small
-     * to be useful as a default (cf. MATH-798).
-     */
-    @Deprecated
-    private static final double DEFAULT_RELATIVE_THRESHOLD = 100 * Precision.EPSILON;
-    /**
-     * Default absolute threshold.
-     * @deprecated in 3.1 (to be removed in 4.0) because this value is too small
-     * to be useful as a default (cf. MATH-798).
-     */
-    @Deprecated
-    private static final double DEFAULT_ABSOLUTE_THRESHOLD = 100 * Precision.SAFE_MIN;
-    /**
-     * Relative tolerance threshold.
-     */
-    private final double relativeThreshold;
-    /**
-     * Absolute tolerance threshold.
-     */
-    private final double absoluteThreshold;
-
-    /**
-     * Build an instance with default thresholds.
-     * @deprecated in 3.1 (to be removed in 4.0). Convergence thresholds are
-     * problem-dependent. As this class is intended for users who want to set
-     * their own convergence criterion instead of relying on an algorithm's
-     * default procedure, they should also set the thresholds appropriately
-     * (cf. MATH-798).
-     */
-    @Deprecated
-    public AbstractConvergenceChecker() {
-        this.relativeThreshold = DEFAULT_RELATIVE_THRESHOLD;
-        this.absoluteThreshold = DEFAULT_ABSOLUTE_THRESHOLD;
-    }
-
-    /**
-     * Build an instance with a specified thresholds.
-     *
-     * @param relativeThreshold relative tolerance threshold
-     * @param absoluteThreshold absolute tolerance threshold
-     */
-    public AbstractConvergenceChecker(final double relativeThreshold,
-                                      final double absoluteThreshold) {
-        this.relativeThreshold = relativeThreshold;
-        this.absoluteThreshold = absoluteThreshold;
-    }
-
-    /**
-     * @return the relative threshold.
-     */
-    public double getRelativeThreshold() {
-        return relativeThreshold;
-    }
-
-    /**
-     * @return the absolute threshold.
-     */
-    public double getAbsoluteThreshold() {
-        return absoluteThreshold;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    public abstract boolean converged(int iteration,
-                                      PAIR previous,
-                                      PAIR current);
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateMultiStartOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateMultiStartOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateMultiStartOptimizer.java
deleted file mode 100644
index 59b8277..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateMultiStartOptimizer.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import java.util.Arrays;
-import java.util.Comparator;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.exception.NullArgumentException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-
-/**
- * Base class for all implementations of a multi-start optimizer.
- *
- * This interface is mainly intended to enforce the internal coherence of
- * Commons-Math. Users of the API are advised to base their code on
- * {@link MultivariateMultiStartOptimizer} or on
- * {@link DifferentiableMultivariateMultiStartOptimizer}.
- *
- * @param <FUNC> Type of the objective function to be optimized.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class BaseMultivariateMultiStartOptimizer<FUNC extends MultivariateFunction>
-    implements BaseMultivariateOptimizer<FUNC> {
-    /** Underlying classical optimizer. */
-    private final BaseMultivariateOptimizer<FUNC> optimizer;
-    /** Maximal number of evaluations allowed. */
-    private int maxEvaluations;
-    /** Number of evaluations already performed for all starts. */
-    private int totalEvaluations;
-    /** Number of starts to go. */
-    private int starts;
-    /** Random generator for multi-start. */
-    private RandomVectorGenerator generator;
-    /** Found optima. */
-    private PointValuePair[] optima;
-
-    /**
-     * Create a multi-start optimizer from a single-start optimizer.
-     *
-     * @param optimizer Single-start optimizer to wrap.
-     * @param starts Number of starts to perform. If {@code starts == 1},
-     * the {@link #optimize(int,MultivariateFunction,GoalType,double[])
-     * optimize} will return the same solution as {@code optimizer} would.
-     * @param generator Random vector generator to use for restarts.
-     * @throws NullArgumentException if {@code optimizer} or {@code generator}
-     * is {@code null}.
-     * @throws NotStrictlyPositiveException if {@code starts < 1}.
-     */
-    protected BaseMultivariateMultiStartOptimizer(final BaseMultivariateOptimizer<FUNC> optimizer,
-                                                      final int starts,
-                                                      final RandomVectorGenerator generator) {
-        if (optimizer == null ||
-            generator == null) {
-            throw new NullArgumentException();
-        }
-        if (starts < 1) {
-            throw new NotStrictlyPositiveException(starts);
-        }
-
-        this.optimizer = optimizer;
-        this.starts = starts;
-        this.generator = generator;
-    }
-
-    /**
-     * Get all the optima found during the last call to {@link
-     * #optimize(int,MultivariateFunction,GoalType,double[]) optimize}.
-     * The optimizer stores all the optima found during a set of
-     * restarts. The {@link #optimize(int,MultivariateFunction,GoalType,double[])
-     * optimize} method returns the best point only. This method
-     * returns all the points found at the end of each starts,
-     * including the best one already returned by the {@link
-     * #optimize(int,MultivariateFunction,GoalType,double[]) optimize} method.
-     * <br/>
-     * The returned array as one element for each start as specified
-     * in the constructor. It is ordered with the results from the
-     * runs that did converge first, sorted from best to worst
-     * objective value (i.e in ascending order if minimizing and in
-     * descending order if maximizing), followed by and null elements
-     * corresponding to the runs that did not converge. This means all
-     * elements will be null if the {@link #optimize(int,MultivariateFunction,GoalType,double[])
-     * optimize} method did throw an exception.
-     * This also means that if the first element is not {@code null}, it
-     * is the best point found across all starts.
-     *
-     * @return an array containing the optima.
-     * @throws MathIllegalStateException if {@link
-     * #optimize(int,MultivariateFunction,GoalType,double[]) optimize}
-     * has not been called.
-     */
-    public PointValuePair[] getOptima() {
-        if (optima == null) {
-            throw new MathIllegalStateException(LocalizedFormats.NO_OPTIMUM_COMPUTED_YET);
-        }
-        return optima.clone();
-    }
-
-    /** {@inheritDoc} */
-    public int getMaxEvaluations() {
-        return maxEvaluations;
-    }
-
-    /** {@inheritDoc} */
-    public int getEvaluations() {
-        return totalEvaluations;
-    }
-
-    /** {@inheritDoc} */
-    public ConvergenceChecker<PointValuePair> getConvergenceChecker() {
-        return optimizer.getConvergenceChecker();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    public PointValuePair optimize(int maxEval, final FUNC f,
-                                       final GoalType goal,
-                                       double[] startPoint) {
-        maxEvaluations = maxEval;
-        RuntimeException lastException = null;
-        optima = new PointValuePair[starts];
-        totalEvaluations = 0;
-
-        // Multi-start loop.
-        for (int i = 0; i < starts; ++i) {
-            // CHECKSTYLE: stop IllegalCatch
-            try {
-                optima[i] = optimizer.optimize(maxEval - totalEvaluations, f, goal,
-                                               i == 0 ? startPoint : generator.nextVector());
-            } catch (RuntimeException mue) {
-                lastException = mue;
-                optima[i] = null;
-            }
-            // CHECKSTYLE: resume IllegalCatch
-
-            totalEvaluations += optimizer.getEvaluations();
-        }
-
-        sortPairs(goal);
-
-        if (optima[0] == null) {
-            throw lastException; // cannot be null if starts >=1
-        }
-
-        // Return the found point given the best objective function value.
-        return optima[0];
-    }
-
-    /**
-     * Sort the optima from best to worst, followed by {@code null} elements.
-     *
-     * @param goal Goal type.
-     */
-    private void sortPairs(final GoalType goal) {
-        Arrays.sort(optima, new Comparator<PointValuePair>() {
-                public int compare(final PointValuePair o1,
-                                   final PointValuePair o2) {
-                    if (o1 == null) {
-                        return (o2 == null) ? 0 : 1;
-                    } else if (o2 == null) {
-                        return -1;
-                    }
-                    final double v1 = o1.getValue();
-                    final double v2 = o2.getValue();
-                    return (goal == GoalType.MINIMIZE) ?
-                        Double.compare(v1, v2) : Double.compare(v2, v1);
-                }
-            });
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateOptimizer.java
deleted file mode 100644
index ce156a0..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateOptimizer.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-
-/**
- * This interface is mainly intended to enforce the internal coherence of
- * Commons-FastMath. Users of the API are advised to base their code on
- * the following interfaces:
- * <ul>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateOptimizer}</li>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateDifferentiableOptimizer}</li>
- * </ul>
- *
- * @param <FUNC> Type of the objective function to be optimized.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public interface BaseMultivariateOptimizer<FUNC extends MultivariateFunction>
-    extends BaseOptimizer<PointValuePair> {
-    /**
-     * Optimize an objective function.
-     *
-     * @param f Objective function.
-     * @param goalType Type of optimization goal: either
-     * {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}.
-     * @param startPoint Start point for optimization.
-     * @param maxEval Maximum number of function evaluations.
-     * @return the point/value pair giving the optimal value for objective
-     * function.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if the start point dimension is wrong.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     * @throws org.apache.commons.math4.exception.NullArgumentException if
-     * any argument is {@code null}.
-     * @deprecated As of 3.1. In 4.0, it will be replaced by the declaration
-     * corresponding to this {@link org.apache.commons.math4.optimization.direct.BaseAbstractMultivariateOptimizer#optimize(int,MultivariateFunction,GoalType,OptimizationData[]) method}.
-     */
-    @Deprecated
-    PointValuePair optimize(int maxEval, FUNC f, GoalType goalType,
-                            double[] startPoint);
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateSimpleBoundsOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateSimpleBoundsOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateSimpleBoundsOptimizer.java
deleted file mode 100644
index b237dee..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateSimpleBoundsOptimizer.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-
-/**
- * This interface is mainly intended to enforce the internal coherence of
- * Commons-FastMath. Users of the API are advised to base their code on
- * the following interfaces:
- * <ul>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateOptimizer}</li>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateDifferentiableOptimizer}</li>
- * </ul>
- *
- * @param <FUNC> Type of the objective function to be optimized.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public interface BaseMultivariateSimpleBoundsOptimizer<FUNC extends MultivariateFunction>
-    extends BaseMultivariateOptimizer<FUNC> {
-    /**
-     * Optimize an objective function.
-     *
-     * @param f Objective function.
-     * @param goalType Type of optimization goal: either
-     * {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}.
-     * @param startPoint Start point for optimization.
-     * @param maxEval Maximum number of function evaluations.
-     * @param lowerBound Lower bound for each of the parameters.
-     * @param upperBound Upper bound for each of the parameters.
-     * @return the point/value pair giving the optimal value for objective
-     * function.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if the array sizes are wrong.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     * @throws org.apache.commons.math4.exception.NullArgumentException if
-     * {@code f}, {@code goalType} or {@code startPoint} is {@code null}.
-     * @throws org.apache.commons.math4.exception.NumberIsTooSmallException if any
-     * of the initial values is less than its lower bound.
-     * @throws org.apache.commons.math4.exception.NumberIsTooLargeException if any
-     * of the initial values is greater than its upper bound.
-     */
-    PointValuePair optimize(int maxEval, FUNC f, GoalType goalType,
-                                double[] startPoint,
-                                double[] lowerBound, double[] upperBound);
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateVectorMultiStartOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateVectorMultiStartOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateVectorMultiStartOptimizer.java
deleted file mode 100644
index f3048d1..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateVectorMultiStartOptimizer.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import java.util.Arrays;
-import java.util.Comparator;
-
-import org.apache.commons.math4.analysis.MultivariateVectorFunction;
-import org.apache.commons.math4.exception.ConvergenceException;
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.exception.NullArgumentException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-
-/**
- * Base class for all implementations of a multi-start optimizer.
- *
- * This interface is mainly intended to enforce the internal coherence of
- * Commons-Math. Users of the API are advised to base their code on
- * {@link DifferentiableMultivariateVectorMultiStartOptimizer}.
- *
- * @param <FUNC> Type of the objective function to be optimized.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class BaseMultivariateVectorMultiStartOptimizer<FUNC extends MultivariateVectorFunction>
-    implements BaseMultivariateVectorOptimizer<FUNC> {
-    /** Underlying classical optimizer. */
-    private final BaseMultivariateVectorOptimizer<FUNC> optimizer;
-    /** Maximal number of evaluations allowed. */
-    private int maxEvaluations;
-    /** Number of evaluations already performed for all starts. */
-    private int totalEvaluations;
-    /** Number of starts to go. */
-    private int starts;
-    /** Random generator for multi-start. */
-    private RandomVectorGenerator generator;
-    /** Found optima. */
-    private PointVectorValuePair[] optima;
-
-    /**
-     * Create a multi-start optimizer from a single-start optimizer.
-     *
-     * @param optimizer Single-start optimizer to wrap.
-     * @param starts Number of starts to perform. If {@code starts == 1},
-     * the {@link #optimize(int,MultivariateVectorFunction,double[],double[],double[])
-     * optimize} will return the same solution as {@code optimizer} would.
-     * @param generator Random vector generator to use for restarts.
-     * @throws NullArgumentException if {@code optimizer} or {@code generator}
-     * is {@code null}.
-     * @throws NotStrictlyPositiveException if {@code starts < 1}.
-     */
-    protected BaseMultivariateVectorMultiStartOptimizer(final BaseMultivariateVectorOptimizer<FUNC> optimizer,
-                                                           final int starts,
-                                                           final RandomVectorGenerator generator) {
-        if (optimizer == null ||
-            generator == null) {
-            throw new NullArgumentException();
-        }
-        if (starts < 1) {
-            throw new NotStrictlyPositiveException(starts);
-        }
-
-        this.optimizer = optimizer;
-        this.starts = starts;
-        this.generator = generator;
-    }
-
-    /**
-     * Get all the optima found during the last call to {@link
-     * #optimize(int,MultivariateVectorFunction,double[],double[],double[]) optimize}.
-     * The optimizer stores all the optima found during a set of
-     * restarts. The {@link #optimize(int,MultivariateVectorFunction,double[],double[],double[])
-     * optimize} method returns the best point only. This method
-     * returns all the points found at the end of each starts, including
-     * the best one already returned by the {@link
-     * #optimize(int,MultivariateVectorFunction,double[],double[],double[]) optimize} method.
-     * <br/>
-     * The returned array as one element for each start as specified
-     * in the constructor. It is ordered with the results from the
-     * runs that did converge first, sorted from best to worst
-     * objective value (i.e. in ascending order if minimizing and in
-     * descending order if maximizing), followed by and null elements
-     * corresponding to the runs that did not converge. This means all
-     * elements will be null if the {@link
-     * #optimize(int,MultivariateVectorFunction,double[],double[],double[]) optimize} method did
-     * throw a {@link ConvergenceException}). This also means that if
-     * the first element is not {@code null}, it is the best point found
-     * across all starts.
-     *
-     * @return array containing the optima
-     * @throws MathIllegalStateException if {@link
-     * #optimize(int,MultivariateVectorFunction,double[],double[],double[]) optimize} has not been
-     * called.
-     */
-    public PointVectorValuePair[] getOptima() {
-        if (optima == null) {
-            throw new MathIllegalStateException(LocalizedFormats.NO_OPTIMUM_COMPUTED_YET);
-        }
-        return optima.clone();
-    }
-
-    /** {@inheritDoc} */
-    public int getMaxEvaluations() {
-        return maxEvaluations;
-    }
-
-    /** {@inheritDoc} */
-    public int getEvaluations() {
-        return totalEvaluations;
-    }
-
-    /** {@inheritDoc} */
-    public ConvergenceChecker<PointVectorValuePair> getConvergenceChecker() {
-        return optimizer.getConvergenceChecker();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    public PointVectorValuePair optimize(int maxEval, final FUNC f,
-                                            double[] target, double[] weights,
-                                            double[] startPoint) {
-        maxEvaluations = maxEval;
-        RuntimeException lastException = null;
-        optima = new PointVectorValuePair[starts];
-        totalEvaluations = 0;
-
-        // Multi-start loop.
-        for (int i = 0; i < starts; ++i) {
-
-            // CHECKSTYLE: stop IllegalCatch
-            try {
-                optima[i] = optimizer.optimize(maxEval - totalEvaluations, f, target, weights,
-                                               i == 0 ? startPoint : generator.nextVector());
-            } catch (ConvergenceException oe) {
-                optima[i] = null;
-            } catch (RuntimeException mue) {
-                lastException = mue;
-                optima[i] = null;
-            }
-            // CHECKSTYLE: resume IllegalCatch
-
-            totalEvaluations += optimizer.getEvaluations();
-        }
-
-        sortPairs(target, weights);
-
-        if (optima[0] == null) {
-            throw lastException; // cannot be null if starts >=1
-        }
-
-        // Return the found point given the best objective function value.
-        return optima[0];
-    }
-
-    /**
-     * Sort the optima from best to worst, followed by {@code null} elements.
-     *
-     * @param target Target value for the objective functions at optimum.
-     * @param weights Weights for the least-squares cost computation.
-     */
-    private void sortPairs(final double[] target,
-                           final double[] weights) {
-        Arrays.sort(optima, new Comparator<PointVectorValuePair>() {
-                public int compare(final PointVectorValuePair o1,
-                                   final PointVectorValuePair o2) {
-                    if (o1 == null) {
-                        return (o2 == null) ? 0 : 1;
-                    } else if (o2 == null) {
-                        return -1;
-                    }
-                    return Double.compare(weightedResidual(o1), weightedResidual(o2));
-                }
-                private double weightedResidual(final PointVectorValuePair pv) {
-                    final double[] value = pv.getValueRef();
-                    double sum = 0;
-                    for (int i = 0; i < value.length; ++i) {
-                        final double ri = value[i] - target[i];
-                        sum += weights[i] * ri * ri;
-                    }
-                    return sum;
-                }
-            });
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateVectorOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateVectorOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateVectorOptimizer.java
deleted file mode 100644
index 34908ec..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/BaseMultivariateVectorOptimizer.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.MultivariateVectorFunction;
-
-/**
- * This interface is mainly intended to enforce the internal coherence of
- * Commons-Math. Users of the API are advised to base their code on
- * the following interfaces:
- * <ul>
- *  <li>{@link org.apache.commons.math4.optimization.DifferentiableMultivariateVectorOptimizer}</li>
- * </ul>
- *
- * @param <FUNC> Type of the objective function to be optimized.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public interface BaseMultivariateVectorOptimizer<FUNC extends MultivariateVectorFunction>
-    extends BaseOptimizer<PointVectorValuePair> {
-    /**
-     * Optimize an objective function.
-     * Optimization is considered to be a weighted least-squares minimization.
-     * The cost function to be minimized is
-     * <code>&sum;weight<sub>i</sub>(objective<sub>i</sub> - target<sub>i</sub>)<sup>2</sup></code>
-     *
-     * @param f Objective function.
-     * @param target Target value for the objective functions at optimum.
-     * @param weight Weights for the least squares cost computation.
-     * @param startPoint Start point for optimization.
-     * @return the point/value pair giving the optimal value for objective
-     * function.
-     * @param maxEval Maximum number of function evaluations.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if the start point dimension is wrong.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     * @throws org.apache.commons.math4.exception.NullArgumentException if
-     * any argument is {@code null}.
-     * @deprecated As of 3.1. In 4.0, this will be replaced by the declaration
-     * corresponding to this {@link org.apache.commons.math4.optimization.direct.BaseAbstractMultivariateVectorOptimizer#optimize(int,MultivariateVectorFunction,OptimizationData[]) method}.
-     */
-    @Deprecated
-    PointVectorValuePair optimize(int maxEval, FUNC f, double[] target,
-                                     double[] weight, double[] startPoint);
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/BaseOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/BaseOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/BaseOptimizer.java
deleted file mode 100644
index 68c1f87..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/BaseOptimizer.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-/**
- * This interface is mainly intended to enforce the internal coherence of
- * Commons-Math. Users of the API are advised to base their code on
- * the following interfaces:
- * <ul>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateOptimizer}</li>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateDifferentiableOptimizer}</li>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateDifferentiableVectorOptimizer}</li>
- *  <li>{@link org.apache.commons.math4.optimization.univariate.UnivariateOptimizer}</li>
- * </ul>
- *
- * @param <PAIR> Type of the point/objective pair.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public interface BaseOptimizer<PAIR> {
-    /**
-     * Get the maximal number of function evaluations.
-     *
-     * @return the maximal number of function evaluations.
-     */
-    int getMaxEvaluations();
-
-    /**
-     * Get the number of evaluations of the objective function.
-     * The number of evaluations corresponds to the last call to the
-     * {@code optimize} method. It is 0 if the method has not been
-     * called yet.
-     *
-     * @return the number of evaluations of the objective function.
-     */
-    int getEvaluations();
-
-    /**
-     * Get the convergence checker.
-     *
-     * @return the object used to check for convergence.
-     */
-    ConvergenceChecker<PAIR> getConvergenceChecker();
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/ConvergenceChecker.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/ConvergenceChecker.java b/src/main/java/org/apache/commons/math4/optimization/ConvergenceChecker.java
deleted file mode 100644
index 3c157dc..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/ConvergenceChecker.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-/**
- * This interface specifies how to check if an optimization algorithm has
- * converged.
- * <br/>
- * Deciding if convergence has been reached is a problem-dependent issue. The
- * user should provide a class implementing this interface to allow the
- * optimization algorithm to stop its search according to the problem at hand.
- * <br/>
- * For convenience, three implementations that fit simple needs are already
- * provided: {@link SimpleValueChecker}, {@link SimpleVectorValueChecker} and
- * {@link SimplePointChecker}. The first two consider that convergence is
- * reached when the objective function value does not change much anymore, it
- * does not use the point set at all.
- * The third one considers that convergence is reached when the input point
- * set does not change much anymore, it does not use objective function value
- * at all.
- *
- * @param <PAIR> Type of the (point, objective value) pair.
- *
- * @see org.apache.commons.math4.optimization.SimplePointChecker
- * @see org.apache.commons.math4.optimization.SimpleValueChecker
- * @see org.apache.commons.math4.optimization.SimpleVectorValueChecker
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public interface ConvergenceChecker<PAIR> {
-    /**
-     * Check if the optimization algorithm has converged.
-     *
-     * @param iteration Current iteration.
-     * @param previous Best point in the previous iteration.
-     * @param current Best point in the current iteration.
-     * @return {@code true} if the algorithm is considered to have converged.
-     */
-    boolean converged(int iteration, PAIR previous, PAIR current);
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateMultiStartOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateMultiStartOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateMultiStartOptimizer.java
deleted file mode 100644
index 27d2f8c..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateMultiStartOptimizer.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.DifferentiableMultivariateFunction;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-
-/**
- * Special implementation of the {@link DifferentiableMultivariateOptimizer}
- * interface adding multi-start features to an existing optimizer.
- *
- * This class wraps a classical optimizer to use it several times in
- * turn with different starting points in order to avoid being trapped
- * into a local extremum when looking for a global one.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class DifferentiableMultivariateMultiStartOptimizer
-    extends BaseMultivariateMultiStartOptimizer<DifferentiableMultivariateFunction>
-    implements DifferentiableMultivariateOptimizer {
-    /**
-     * Create a multi-start optimizer from a single-start optimizer.
-     *
-     * @param optimizer Single-start optimizer to wrap.
-     * @param starts Number of starts to perform (including the
-     * first one), multi-start is disabled if value is less than or
-     * equal to 1.
-     * @param generator Random vector generator to use for restarts.
-     */
-    public DifferentiableMultivariateMultiStartOptimizer(final DifferentiableMultivariateOptimizer optimizer,
-                                                         final int starts,
-                                                         final RandomVectorGenerator generator) {
-        super(optimizer, starts, generator);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateOptimizer.java
deleted file mode 100644
index f1d8da2..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateOptimizer.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.DifferentiableMultivariateFunction;
-
-/**
- * This interface represents an optimization algorithm for
- * {@link DifferentiableMultivariateFunction scalar differentiable objective
- * functions}.
- * Optimization algorithms find the input point set that either {@link GoalType
- * maximize or minimize} an objective function.
- *
- * @see MultivariateOptimizer
- * @see DifferentiableMultivariateVectorOptimizer
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public interface DifferentiableMultivariateOptimizer
-    extends BaseMultivariateOptimizer<DifferentiableMultivariateFunction> {}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateVectorMultiStartOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateVectorMultiStartOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateVectorMultiStartOptimizer.java
deleted file mode 100644
index b76365e..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateVectorMultiStartOptimizer.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.DifferentiableMultivariateVectorFunction;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-
-/**
- * Special implementation of the {@link DifferentiableMultivariateVectorOptimizer}
- * interface addind multi-start features to an existing optimizer.
- *
- * This class wraps a classical optimizer to use it several times in
- * turn with different starting points in order to avoid being trapped
- * into a local extremum when looking for a global one.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class DifferentiableMultivariateVectorMultiStartOptimizer
-    extends BaseMultivariateVectorMultiStartOptimizer<DifferentiableMultivariateVectorFunction>
-    implements DifferentiableMultivariateVectorOptimizer {
-    /**
-     * Create a multi-start optimizer from a single-start optimizer.
-     *
-     * @param optimizer Single-start optimizer to wrap.
-     * @param starts Number of starts to perform (including the
-     * first one), multi-start is disabled if value is less than or
-     * equal to 1.
-     * @param generator Random vector generator to use for restarts.
-     */
-    public DifferentiableMultivariateVectorMultiStartOptimizer(
-                final DifferentiableMultivariateVectorOptimizer optimizer,
-                final int starts,
-                final RandomVectorGenerator generator) {
-        super(optimizer, starts, generator);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateVectorOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateVectorOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateVectorOptimizer.java
deleted file mode 100644
index d4ecdf5..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/DifferentiableMultivariateVectorOptimizer.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.DifferentiableMultivariateVectorFunction;
-
-/**
- * This interface represents an optimization algorithm for
- * {@link DifferentiableMultivariateVectorFunction vectorial differentiable
- * objective functions}.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public interface DifferentiableMultivariateVectorOptimizer
-    extends BaseMultivariateVectorOptimizer<DifferentiableMultivariateVectorFunction> {}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/GoalType.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/GoalType.java b/src/main/java/org/apache/commons/math4/optimization/GoalType.java
deleted file mode 100644
index d61072f..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/GoalType.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import java.io.Serializable;
-
-/**
- * Goal type for an optimization problem.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public enum GoalType implements Serializable {
-
-    /** Maximization goal. */
-    MAXIMIZE,
-
-    /** Minimization goal. */
-    MINIMIZE
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/InitialGuess.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/InitialGuess.java b/src/main/java/org/apache/commons/math4/optimization/InitialGuess.java
deleted file mode 100644
index b12680c..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/InitialGuess.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-/**
- * Starting point (first guess) of the optimization procedure.
- * <br/>
- * Immutable class.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public class InitialGuess implements OptimizationData {
-    /** Initial guess. */
-    private final double[] init;
-
-    /**
-     * @param startPoint Initial guess.
-     */
-    public InitialGuess(double[] startPoint) {
-        init = startPoint.clone();
-    }
-
-    /**
-     * Gets the initial guess.
-     *
-     * @return the initial guess.
-     */
-    public double[] getInitialGuess() {
-        return init.clone();
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/LeastSquaresConverter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/LeastSquaresConverter.java b/src/main/java/org/apache/commons/math4/optimization/LeastSquaresConverter.java
deleted file mode 100644
index 74ca4ee..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/LeastSquaresConverter.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.analysis.MultivariateVectorFunction;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.linear.RealMatrix;
-
-/** This class converts {@link MultivariateVectorFunction vectorial
- * objective functions} to {@link MultivariateFunction scalar objective functions}
- * when the goal is to minimize them.
- * <p>
- * This class is mostly used when the vectorial objective function represents
- * a theoretical result computed from a point set applied to a model and
- * the models point must be adjusted to fit the theoretical result to some
- * reference observations. The observations may be obtained for example from
- * physical measurements whether the model is built from theoretical
- * considerations.
- * </p>
- * <p>
- * This class computes a possibly weighted squared sum of the residuals, which is
- * a scalar value. The residuals are the difference between the theoretical model
- * (i.e. the output of the vectorial objective function) and the observations. The
- * class implements the {@link MultivariateFunction} interface and can therefore be
- * minimized by any optimizer supporting scalar objectives functions.This is one way
- * to perform a least square estimation. There are other ways to do this without using
- * this converter, as some optimization algorithms directly support vectorial objective
- * functions.
- * </p>
- * <p>
- * This class support combination of residuals with or without weights and correlations.
- * </p>
-  *
- * @see MultivariateFunction
- * @see MultivariateVectorFunction
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-
-@Deprecated
-public class LeastSquaresConverter implements MultivariateFunction {
-
-    /** Underlying vectorial function. */
-    private final MultivariateVectorFunction function;
-
-    /** Observations to be compared to objective function to compute residuals. */
-    private final double[] observations;
-
-    /** Optional weights for the residuals. */
-    private final double[] weights;
-
-    /** Optional scaling matrix (weight and correlations) for the residuals. */
-    private final RealMatrix scale;
-
-    /** Build a simple converter for uncorrelated residuals with the same weight.
-     * @param function vectorial residuals function to wrap
-     * @param observations observations to be compared to objective function to compute residuals
-     */
-    public LeastSquaresConverter(final MultivariateVectorFunction function,
-                                 final double[] observations) {
-        this.function     = function;
-        this.observations = observations.clone();
-        this.weights      = null;
-        this.scale        = null;
-    }
-
-    /** Build a simple converter for uncorrelated residuals with the specific weights.
-     * <p>
-     * The scalar objective function value is computed as:
-     * <pre>
-     * objective = &sum;weight<sub>i</sub>(observation<sub>i</sub>-objective<sub>i</sub>)<sup>2</sup>
-     * </pre>
-     * </p>
-     * <p>
-     * Weights can be used for example to combine residuals with different standard
-     * deviations. As an example, consider a residuals array in which even elements
-     * are angular measurements in degrees with a 0.01&deg; standard deviation and
-     * odd elements are distance measurements in meters with a 15m standard deviation.
-     * In this case, the weights array should be initialized with value
-     * 1.0/(0.01<sup>2</sup>) in the even elements and 1.0/(15.0<sup>2</sup>) in the
-     * odd elements (i.e. reciprocals of variances).
-     * </p>
-     * <p>
-     * The array computed by the objective function, the observations array and the
-     * weights array must have consistent sizes or a {@link DimensionMismatchException}
-     * will be triggered while computing the scalar objective.
-     * </p>
-     * @param function vectorial residuals function to wrap
-     * @param observations observations to be compared to objective function to compute residuals
-     * @param weights weights to apply to the residuals
-     * @exception DimensionMismatchException if the observations vector and the weights
-     * vector dimensions do not match (objective function dimension is checked only when
-     * the {@link #value(double[])} method is called)
-     */
-    public LeastSquaresConverter(final MultivariateVectorFunction function,
-                                 final double[] observations, final double[] weights) {
-        if (observations.length != weights.length) {
-            throw new DimensionMismatchException(observations.length, weights.length);
-        }
-        this.function     = function;
-        this.observations = observations.clone();
-        this.weights      = weights.clone();
-        this.scale        = null;
-    }
-
-    /** Build a simple converter for correlated residuals with the specific weights.
-     * <p>
-     * The scalar objective function value is computed as:
-     * <pre>
-     * objective = y<sup>T</sup>y with y = scale&times;(observation-objective)
-     * </pre>
-     * </p>
-     * <p>
-     * The array computed by the objective function, the observations array and the
-     * the scaling matrix must have consistent sizes or a {@link DimensionMismatchException}
-     * will be triggered while computing the scalar objective.
-     * </p>
-     * @param function vectorial residuals function to wrap
-     * @param observations observations to be compared to objective function to compute residuals
-     * @param scale scaling matrix
-     * @throws DimensionMismatchException if the observations vector and the scale
-     * matrix dimensions do not match (objective function dimension is checked only when
-     * the {@link #value(double[])} method is called)
-     */
-    public LeastSquaresConverter(final MultivariateVectorFunction function,
-                                 final double[] observations, final RealMatrix scale) {
-        if (observations.length != scale.getColumnDimension()) {
-            throw new DimensionMismatchException(observations.length, scale.getColumnDimension());
-        }
-        this.function     = function;
-        this.observations = observations.clone();
-        this.weights      = null;
-        this.scale        = scale.copy();
-    }
-
-    /** {@inheritDoc} */
-    public double value(final double[] point) {
-        // compute residuals
-        final double[] residuals = function.value(point);
-        if (residuals.length != observations.length) {
-            throw new DimensionMismatchException(residuals.length, observations.length);
-        }
-        for (int i = 0; i < residuals.length; ++i) {
-            residuals[i] -= observations[i];
-        }
-
-        // compute sum of squares
-        double sumSquares = 0;
-        if (weights != null) {
-            for (int i = 0; i < residuals.length; ++i) {
-                final double ri = residuals[i];
-                sumSquares +=  weights[i] * ri * ri;
-            }
-        } else if (scale != null) {
-            for (final double yi : scale.operate(residuals)) {
-                sumSquares += yi * yi;
-            }
-        } else {
-            for (final double ri : residuals) {
-                sumSquares += ri * ri;
-            }
-        }
-
-        return sumSquares;
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableMultiStartOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableMultiStartOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableMultiStartOptimizer.java
deleted file mode 100644
index ca558f0..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableMultiStartOptimizer.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableFunction;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-
-/**
- * Special implementation of the {@link MultivariateDifferentiableOptimizer}
- * interface adding multi-start features to an existing optimizer.
- *
- * This class wraps a classical optimizer to use it several times in
- * turn with different starting points in order to avoid being trapped
- * into a local extremum when looking for a global one.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public class MultivariateDifferentiableMultiStartOptimizer
-    extends BaseMultivariateMultiStartOptimizer<MultivariateDifferentiableFunction>
-    implements MultivariateDifferentiableOptimizer {
-    /**
-     * Create a multi-start optimizer from a single-start optimizer.
-     *
-     * @param optimizer Single-start optimizer to wrap.
-     * @param starts Number of starts to perform (including the
-     * first one), multi-start is disabled if value is less than or
-     * equal to 1.
-     * @param generator Random vector generator to use for restarts.
-     */
-    public MultivariateDifferentiableMultiStartOptimizer(final MultivariateDifferentiableOptimizer optimizer,
-                                                         final int starts,
-                                                         final RandomVectorGenerator generator) {
-        super(optimizer, starts, generator);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableOptimizer.java
deleted file mode 100644
index 67e894e..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableOptimizer.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableFunction;
-
-/**
- * This interface represents an optimization algorithm for
- * {@link MultivariateDifferentiableFunction scalar differentiable objective
- * functions}.
- * Optimization algorithms find the input point set that either {@link GoalType
- * maximize or minimize} an objective function.
- *
- * @see MultivariateOptimizer
- * @see MultivariateDifferentiableVectorOptimizer
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public interface MultivariateDifferentiableOptimizer
-    extends BaseMultivariateOptimizer<MultivariateDifferentiableFunction> {}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorMultiStartOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorMultiStartOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorMultiStartOptimizer.java
deleted file mode 100644
index 63e8953..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorMultiStartOptimizer.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-
-/**
- * Special implementation of the {@link MultivariateDifferentiableVectorOptimizer}
- * interface adding multi-start features to an existing optimizer.
- *
- * This class wraps a classical optimizer to use it several times in
- * turn with different starting points in order to avoid being trapped
- * into a local extremum when looking for a global one.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public class MultivariateDifferentiableVectorMultiStartOptimizer
-    extends BaseMultivariateVectorMultiStartOptimizer<MultivariateDifferentiableVectorFunction>
-    implements MultivariateDifferentiableVectorOptimizer {
-    /**
-     * Create a multi-start optimizer from a single-start optimizer.
-     *
-     * @param optimizer Single-start optimizer to wrap.
-     * @param starts Number of starts to perform (including the
-     * first one), multi-start is disabled if value is less than or
-     * equal to 1.
-     * @param generator Random vector generator to use for restarts.
-     */
-    public MultivariateDifferentiableVectorMultiStartOptimizer(
-                final MultivariateDifferentiableVectorOptimizer optimizer,
-                final int starts,
-                final RandomVectorGenerator generator) {
-        super(optimizer, starts, generator);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorOptimizer.java
deleted file mode 100644
index 569624d..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorOptimizer.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-
-/**
- * This interface represents an optimization algorithm for
- * {@link MultivariateDifferentiableVectorFunction differentiable vectorial
- * objective functions}.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public interface MultivariateDifferentiableVectorOptimizer
-    extends BaseMultivariateVectorOptimizer<MultivariateDifferentiableVectorFunction> {}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/MultivariateMultiStartOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/MultivariateMultiStartOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/MultivariateMultiStartOptimizer.java
deleted file mode 100644
index 8c0df54..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/MultivariateMultiStartOptimizer.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-
-/**
- * Special implementation of the {@link MultivariateOptimizer} interface adding
- * multi-start features to an existing optimizer.
- *
- * This class wraps a classical optimizer to use it several times in
- * turn with different starting points in order to avoid being trapped
- * into a local extremum when looking for a global one.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class MultivariateMultiStartOptimizer
-    extends BaseMultivariateMultiStartOptimizer<MultivariateFunction>
-    implements MultivariateOptimizer {
-    /**
-     * Create a multi-start optimizer from a single-start optimizer.
-     *
-     * @param optimizer Single-start optimizer to wrap.
-     * @param starts Number of starts to perform (including the
-     * first one), multi-start is disabled if value is less than or
-     * equal to 1.
-     * @param generator Random vector generator to use for restarts.
-     */
-    public MultivariateMultiStartOptimizer(final MultivariateOptimizer optimizer,
-                                               final int starts,
-                                               final RandomVectorGenerator generator) {
-        super(optimizer, starts, generator);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/MultivariateOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/MultivariateOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/MultivariateOptimizer.java
deleted file mode 100644
index e0d2715..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/MultivariateOptimizer.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-
-/**
- * This interface represents an optimization algorithm for {@link MultivariateFunction
- * scalar objective functions}.
- * <p>Optimization algorithms find the input point set that either {@link GoalType
- * maximize or minimize} an objective function.</p>
- *
- * @see MultivariateDifferentiableOptimizer
- * @see MultivariateDifferentiableVectorOptimizer
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public interface MultivariateOptimizer
-    extends BaseMultivariateOptimizer<MultivariateFunction> {}


[08/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariateMultiStartOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariateMultiStartOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariateMultiStartOptimizer.java
deleted file mode 100644
index cbf73c5..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariateMultiStartOptimizer.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.univariate;
-
-import java.util.Arrays;
-import java.util.Comparator;
-
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.exception.NullArgumentException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.random.RandomGenerator;
-
-/**
- * Special implementation of the {@link UnivariateOptimizer} interface
- * adding multi-start features to an existing optimizer.
- *
- * This class wraps a classical optimizer to use it several times in
- * turn with different starting points in order to avoid being trapped
- * into a local extremum when looking for a global one.
- *
- * @param <FUNC> Type of the objective function to be optimized.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class UnivariateMultiStartOptimizer<FUNC extends UnivariateFunction>
-    implements BaseUnivariateOptimizer<FUNC> {
-    /** Underlying classical optimizer. */
-    private final BaseUnivariateOptimizer<FUNC> optimizer;
-    /** Maximal number of evaluations allowed. */
-    private int maxEvaluations;
-    /** Number of evaluations already performed for all starts. */
-    private int totalEvaluations;
-    /** Number of starts to go. */
-    private int starts;
-    /** Random generator for multi-start. */
-    private RandomGenerator generator;
-    /** Found optima. */
-    private UnivariatePointValuePair[] optima;
-
-    /**
-     * Create a multi-start optimizer from a single-start optimizer.
-     *
-     * @param optimizer Single-start optimizer to wrap.
-     * @param starts Number of starts to perform. If {@code starts == 1},
-     * the {@code optimize} methods will return the same solution as
-     * {@code optimizer} would.
-     * @param generator Random generator to use for restarts.
-     * @throws NullArgumentException if {@code optimizer} or {@code generator}
-     * is {@code null}.
-     * @throws NotStrictlyPositiveException if {@code starts < 1}.
-     */
-    public UnivariateMultiStartOptimizer(final BaseUnivariateOptimizer<FUNC> optimizer,
-                                             final int starts,
-                                             final RandomGenerator generator) {
-        if (optimizer == null ||
-                generator == null) {
-                throw new NullArgumentException();
-        }
-        if (starts < 1) {
-            throw new NotStrictlyPositiveException(starts);
-        }
-
-        this.optimizer = optimizer;
-        this.starts = starts;
-        this.generator = generator;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    public ConvergenceChecker<UnivariatePointValuePair> getConvergenceChecker() {
-        return optimizer.getConvergenceChecker();
-    }
-
-    /** {@inheritDoc} */
-    public int getMaxEvaluations() {
-        return maxEvaluations;
-    }
-
-    /** {@inheritDoc} */
-    public int getEvaluations() {
-        return totalEvaluations;
-    }
-
-    /**
-     * Get all the optima found during the last call to {@link
-     * #optimize(int,UnivariateFunction,GoalType,double,double) optimize}.
-     * The optimizer stores all the optima found during a set of
-     * restarts. The {@link #optimize(int,UnivariateFunction,GoalType,double,double) optimize}
-     * method returns the best point only. This method returns all the points
-     * found at the end of each starts, including the best one already
-     * returned by the {@link #optimize(int,UnivariateFunction,GoalType,double,double) optimize}
-     * method.
-     * <br/>
-     * The returned array as one element for each start as specified
-     * in the constructor. It is ordered with the results from the
-     * runs that did converge first, sorted from best to worst
-     * objective value (i.e in ascending order if minimizing and in
-     * descending order if maximizing), followed by {@code null} elements
-     * corresponding to the runs that did not converge. This means all
-     * elements will be {@code null} if the {@link
-     * #optimize(int,UnivariateFunction,GoalType,double,double) optimize}
-     * method did throw an exception.
-     * This also means that if the first element is not {@code null}, it is
-     * the best point found across all starts.
-     *
-     * @return an array containing the optima.
-     * @throws MathIllegalStateException if {@link
-     * #optimize(int,UnivariateFunction,GoalType,double,double) optimize}
-     * has not been called.
-     */
-    public UnivariatePointValuePair[] getOptima() {
-        if (optima == null) {
-            throw new MathIllegalStateException(LocalizedFormats.NO_OPTIMUM_COMPUTED_YET);
-        }
-        return optima.clone();
-    }
-
-    /** {@inheritDoc} */
-    public UnivariatePointValuePair optimize(int maxEval, final FUNC f,
-                                                 final GoalType goal,
-                                                 final double min, final double max) {
-        return optimize(maxEval, f, goal, min, max, min + 0.5 * (max - min));
-    }
-
-    /** {@inheritDoc} */
-    public UnivariatePointValuePair optimize(int maxEval, final FUNC f,
-                                                 final GoalType goal,
-                                                 final double min, final double max,
-                                                 final double startValue) {
-        RuntimeException lastException = null;
-        optima = new UnivariatePointValuePair[starts];
-        totalEvaluations = 0;
-
-        // Multi-start loop.
-        for (int i = 0; i < starts; ++i) {
-            // CHECKSTYLE: stop IllegalCatch
-            try {
-                final double s = (i == 0) ? startValue : min + generator.nextDouble() * (max - min);
-                optima[i] = optimizer.optimize(maxEval - totalEvaluations, f, goal, min, max, s);
-            } catch (RuntimeException mue) {
-                lastException = mue;
-                optima[i] = null;
-            }
-            // CHECKSTYLE: resume IllegalCatch
-
-            totalEvaluations += optimizer.getEvaluations();
-        }
-
-        sortPairs(goal);
-
-        if (optima[0] == null) {
-            throw lastException; // cannot be null if starts >=1
-        }
-
-        // Return the point with the best objective function value.
-        return optima[0];
-    }
-
-    /**
-     * Sort the optima from best to worst, followed by {@code null} elements.
-     *
-     * @param goal Goal type.
-     */
-    private void sortPairs(final GoalType goal) {
-        Arrays.sort(optima, new Comparator<UnivariatePointValuePair>() {
-                public int compare(final UnivariatePointValuePair o1,
-                                   final UnivariatePointValuePair o2) {
-                    if (o1 == null) {
-                        return (o2 == null) ? 0 : 1;
-                    } else if (o2 == null) {
-                        return -1;
-                    }
-                    final double v1 = o1.getValue();
-                    final double v2 = o2.getValue();
-                    return (goal == GoalType.MINIMIZE) ?
-                        Double.compare(v1, v2) : Double.compare(v2, v1);
-                }
-            });
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariateOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariateOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariateOptimizer.java
deleted file mode 100644
index b621c8b..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariateOptimizer.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.univariate;
-
-import org.apache.commons.math4.analysis.UnivariateFunction;
-
-/**
- * Interface for univariate optimization algorithms.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public interface UnivariateOptimizer
-    extends BaseUnivariateOptimizer<UnivariateFunction> {}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariatePointValuePair.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariatePointValuePair.java b/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariatePointValuePair.java
deleted file mode 100644
index 6f5c450..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/univariate/UnivariatePointValuePair.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.univariate;
-
-import java.io.Serializable;
-
-/**
- * This class holds a point and the value of an objective function at this
- * point.
- * This is a simple immutable container.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class UnivariatePointValuePair implements Serializable {
-    /** Serializable version identifier. */
-    private static final long serialVersionUID = 1003888396256744753L;
-    /** Point. */
-    private final double point;
-    /** Value of the objective function at the point. */
-    private final double value;
-
-    /**
-     * Build a point/objective function value pair.
-     *
-     * @param point Point.
-     * @param value Value of an objective function at the point
-     */
-    public UnivariatePointValuePair(final double point,
-                                    final double value) {
-        this.point = point;
-        this.value = value;
-    }
-
-    /**
-     * Get the point.
-     *
-     * @return the point.
-     */
-    public double getPoint() {
-        return point;
-    }
-
-    /**
-     * Get the value of the objective function.
-     *
-     * @return the stored value of the objective function.
-     */
-    public double getValue() {
-        return value;
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/univariate/package-info.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/univariate/package-info.java b/src/main/java/org/apache/commons/math4/optimization/univariate/package-info.java
deleted file mode 100644
index 97258e3..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/univariate/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *
- *     Univariate real functions minimum finding algorithms.
- *
- */
-package org.apache.commons.math4.optimization.univariate;

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/MultivariateDifferentiableMultiStartOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/MultivariateDifferentiableMultiStartOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/MultivariateDifferentiableMultiStartOptimizerTest.java
deleted file mode 100644
index 60c412e..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/MultivariateDifferentiableMultiStartOptimizerTest.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableFunction;
-import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.MultivariateDifferentiableMultiStartOptimizer;
-import org.apache.commons.math4.optimization.MultivariateDifferentiableOptimizer;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleValueChecker;
-import org.apache.commons.math4.optimization.general.CircleScalar;
-import org.apache.commons.math4.optimization.general.ConjugateGradientFormula;
-import org.apache.commons.math4.optimization.general.NonLinearConjugateGradientOptimizer;
-import org.apache.commons.math4.random.GaussianRandomGenerator;
-import org.apache.commons.math4.random.JDKRandomGenerator;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-import org.apache.commons.math4.random.UncorrelatedRandomVectorGenerator;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class MultivariateDifferentiableMultiStartOptimizerTest {
-
-    @Test
-    public void testCircleFitting() {
-        CircleScalar circle = new CircleScalar();
-        circle.addPoint( 30.0,  68.0);
-        circle.addPoint( 50.0,  -6.0);
-        circle.addPoint(110.0, -20.0);
-        circle.addPoint( 35.0,  15.0);
-        circle.addPoint( 45.0,  97.0);
-        // TODO: the wrapper around NonLinearConjugateGradientOptimizer is a temporary hack for
-        // version 3.1 of the library. It should be removed when NonLinearConjugateGradientOptimizer
-        // will officially be declared as implementing MultivariateDifferentiableOptimizer
-        MultivariateDifferentiableOptimizer underlying =
-                new MultivariateDifferentiableOptimizer() {
-
-            private final NonLinearConjugateGradientOptimizer cg =
-                    new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE,
-                                                            new SimpleValueChecker(1.0e-10, 1.0e-10));
-            public PointValuePair optimize(int maxEval,
-                                           MultivariateDifferentiableFunction f,
-                                           GoalType goalType,
-                                           double[] startPoint) {
-                return cg.optimize(maxEval, f, goalType, startPoint);
-            }
-
-            public int getMaxEvaluations() {
-                return cg.getMaxEvaluations();
-            }
-
-            public int getEvaluations() {
-                return cg.getEvaluations();
-            }
-
-            public ConvergenceChecker<PointValuePair> getConvergenceChecker() {
-                return cg.getConvergenceChecker();
-            }
-        };
-        JDKRandomGenerator g = new JDKRandomGenerator();
-        g.setSeed(753289573253l);
-        RandomVectorGenerator generator =
-            new UncorrelatedRandomVectorGenerator(new double[] { 50.0, 50.0 }, new double[] { 10.0, 10.0 },
-                                                  new GaussianRandomGenerator(g));
-        MultivariateDifferentiableMultiStartOptimizer optimizer =
-            new MultivariateDifferentiableMultiStartOptimizer(underlying, 10, generator);
-        PointValuePair optimum =
-            optimizer.optimize(200, circle, GoalType.MINIMIZE, new double[] { 98.680, 47.345 });
-        Assert.assertEquals(200, optimizer.getMaxEvaluations());
-        PointValuePair[] optima = optimizer.getOptima();
-        for (PointValuePair o : optima) {
-            Vector2D center = new Vector2D(o.getPointRef()[0], o.getPointRef()[1]);
-            Assert.assertEquals(69.960161753, circle.getRadius(center), 1.0e-8);
-            Assert.assertEquals(96.075902096, center.getX(), 1.0e-8);
-            Assert.assertEquals(48.135167894, center.getY(), 1.0e-8);
-        }
-        Assert.assertTrue(optimizer.getEvaluations() > 70);
-        Assert.assertTrue(optimizer.getEvaluations() < 90);
-        Assert.assertEquals(3.1267527, optimum.getValue(), 1.0e-8);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorMultiStartOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorMultiStartOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorMultiStartOptimizerTest.java
deleted file mode 100644
index f36d364..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/MultivariateDifferentiableVectorMultiStartOptimizerTest.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.linear.BlockRealMatrix;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.MultivariateDifferentiableVectorMultiStartOptimizer;
-import org.apache.commons.math4.optimization.MultivariateDifferentiableVectorOptimizer;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.SimpleVectorValueChecker;
-import org.apache.commons.math4.optimization.general.GaussNewtonOptimizer;
-import org.apache.commons.math4.random.GaussianRandomGenerator;
-import org.apache.commons.math4.random.JDKRandomGenerator;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-import org.apache.commons.math4.random.UncorrelatedRandomVectorGenerator;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * <p>Some of the unit tests are re-implementations of the MINPACK <a
- * href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
- * href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
- * The redistribution policy for MINPACK is available <a
- * href="http://www.netlib.org/minpack/disclaimer">here</a>, for
- * convenience, it is reproduced below.</p>
-
- * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
- * <tr><td>
- *    Minpack Copyright Notice (1999) University of Chicago.
- *    All rights reserved
- * </td></tr>
- * <tr><td>
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * <ol>
- *  <li>Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.</li>
- * <li>Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials provided
- *     with the distribution.</li>
- * <li>The end-user documentation included with the redistribution, if any,
- *     must include the following acknowledgment:
- *     <code>This product includes software developed by the University of
- *           Chicago, as Operator of Argonne National Laboratory.</code>
- *     Alternately, this acknowledgment may appear in the software itself,
- *     if and wherever such third-party acknowledgments normally appear.</li>
- * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
- *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
- *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
- *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
- *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
- *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
- *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
- *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
- *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
- *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
- *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
- *     BE CORRECTED.</strong></li>
- * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
- *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
- *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
- *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
- *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
- *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
- *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
- *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
- *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
- *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
- * <ol></td></tr>
- * </table>
-
- * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
- * @author Burton S. Garbow (original fortran minpack tests)
- * @author Kenneth E. Hillstrom (original fortran minpack tests)
- * @author Jorge J. More (original fortran minpack tests)
- * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
- */
-@Deprecated
-public class MultivariateDifferentiableVectorMultiStartOptimizerTest {
-
-    @Test
-    public void testTrivial() {
-        LinearProblem problem =
-            new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
-        // TODO: the wrapper around GaussNewtonOptimizer is a temporary hack for
-        // version 3.1 of the library. It should be removed when GaussNewtonOptimizer
-        // will officialy be declared as implementing MultivariateDifferentiableVectorOptimizer
-        MultivariateDifferentiableVectorOptimizer underlyingOptimizer =
-                new MultivariateDifferentiableVectorOptimizer() {
-            private GaussNewtonOptimizer gn =
-                    new GaussNewtonOptimizer(true,
-                                             new SimpleVectorValueChecker(1.0e-6, 1.0e-6));
-
-            public PointVectorValuePair optimize(int maxEval,
-                                                 MultivariateDifferentiableVectorFunction f,
-                                                 double[] target,
-                                                 double[] weight,
-                                                 double[] startPoint) {
-                return gn.optimize(maxEval, f, target, weight, startPoint);
-            }
-
-            public int getMaxEvaluations() {
-                return gn.getMaxEvaluations();
-            }
-
-            public int getEvaluations() {
-                return gn.getEvaluations();
-            }
-
-            public ConvergenceChecker<PointVectorValuePair> getConvergenceChecker() {
-                return gn.getConvergenceChecker();
-            }
-        };
-        JDKRandomGenerator g = new JDKRandomGenerator();
-        g.setSeed(16069223052l);
-        RandomVectorGenerator generator =
-            new UncorrelatedRandomVectorGenerator(1, new GaussianRandomGenerator(g));
-        MultivariateDifferentiableVectorMultiStartOptimizer optimizer =
-            new MultivariateDifferentiableVectorMultiStartOptimizer(underlyingOptimizer,
-                                                                       10, generator);
-
-        // no optima before first optimization attempt
-        try {
-            optimizer.getOptima();
-            Assert.fail("an exception should have been thrown");
-        } catch (MathIllegalStateException ise) {
-            // expected
-        }
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1 }, new double[] { 0 });
-        Assert.assertEquals(1.5, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(3.0, optimum.getValue()[0], 1.0e-10);
-        PointVectorValuePair[] optima = optimizer.getOptima();
-        Assert.assertEquals(10, optima.length);
-        for (int i = 0; i < optima.length; ++i) {
-            Assert.assertEquals(1.5, optima[i].getPoint()[0], 1.0e-10);
-            Assert.assertEquals(3.0, optima[i].getValue()[0], 1.0e-10);
-        }
-        Assert.assertTrue(optimizer.getEvaluations() > 20);
-        Assert.assertTrue(optimizer.getEvaluations() < 50);
-        Assert.assertEquals(100, optimizer.getMaxEvaluations());
-    }
-
-    @Test(expected=TestException.class)
-    public void testNoOptimum() {
-
-        // TODO: the wrapper around GaussNewtonOptimizer is a temporary hack for
-        // version 3.1 of the library. It should be removed when GaussNewtonOptimizer
-        // will officialy be declared as implementing MultivariateDifferentiableVectorOptimizer
-        MultivariateDifferentiableVectorOptimizer underlyingOptimizer =
-                new MultivariateDifferentiableVectorOptimizer() {
-            private GaussNewtonOptimizer gn =
-                    new GaussNewtonOptimizer(true,
-                                             new SimpleVectorValueChecker(1.0e-6, 1.0e-6));
-
-            public PointVectorValuePair optimize(int maxEval,
-                                                 MultivariateDifferentiableVectorFunction f,
-                                                 double[] target,
-                                                 double[] weight,
-                                                 double[] startPoint) {
-                return gn.optimize(maxEval, f, target, weight, startPoint);
-            }
-
-            public int getMaxEvaluations() {
-                return gn.getMaxEvaluations();
-            }
-
-            public int getEvaluations() {
-                return gn.getEvaluations();
-            }
-
-            public ConvergenceChecker<PointVectorValuePair> getConvergenceChecker() {
-                return gn.getConvergenceChecker();
-            }
-        };
-        JDKRandomGenerator g = new JDKRandomGenerator();
-        g.setSeed(12373523445l);
-        RandomVectorGenerator generator =
-            new UncorrelatedRandomVectorGenerator(1, new GaussianRandomGenerator(g));
-        MultivariateDifferentiableVectorMultiStartOptimizer optimizer =
-            new MultivariateDifferentiableVectorMultiStartOptimizer(underlyingOptimizer,
-                                                                       10, generator);
-        optimizer.optimize(100, new MultivariateDifferentiableVectorFunction() {
-            public double[] value(double[] point) {
-                throw new TestException();
-            }
-            public DerivativeStructure[] value(DerivativeStructure[] point) {
-                return point;
-            }
-            }, new double[] { 2 }, new double[] { 1 }, new double[] { 0 });
-    }
-
-    private static class TestException extends RuntimeException {
-        private static final long serialVersionUID = -7809988995389067683L;
-    }
-
-    private static class LinearProblem implements MultivariateDifferentiableVectorFunction {
-
-        final RealMatrix factors;
-        final double[] target;
-        public LinearProblem(double[][] factors, double[] target) {
-            this.factors = new BlockRealMatrix(factors);
-            this.target  = target;
-        }
-
-        public double[] value(double[] variables) {
-            return factors.operate(variables);
-        }
-
-        public DerivativeStructure[] value(DerivativeStructure[] variables) {
-            DerivativeStructure[] y = new DerivativeStructure[factors.getRowDimension()];
-            for (int i = 0; i < y.length; ++i) {
-                y[i] = variables[0].getField().getZero();
-                for (int j = 0; j < factors.getColumnDimension(); ++j) {
-                    y[i] = y[i].add(variables[j].multiply(factors.getEntry(i, j)));
-                }
-            }
-            return y;
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/MultivariateMultiStartOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/MultivariateMultiStartOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/MultivariateMultiStartOptimizerTest.java
deleted file mode 100644
index f3f4461..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/MultivariateMultiStartOptimizerTest.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.MultivariateMultiStartOptimizer;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleValueChecker;
-import org.apache.commons.math4.optimization.direct.NelderMeadSimplex;
-import org.apache.commons.math4.optimization.direct.SimplexOptimizer;
-import org.apache.commons.math4.random.GaussianRandomGenerator;
-import org.apache.commons.math4.random.JDKRandomGenerator;
-import org.apache.commons.math4.random.RandomVectorGenerator;
-import org.apache.commons.math4.random.UncorrelatedRandomVectorGenerator;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class MultivariateMultiStartOptimizerTest {
-    @Test
-    public void testRosenbrock() {
-        Rosenbrock rosenbrock = new Rosenbrock();
-        SimplexOptimizer underlying
-            = new SimplexOptimizer(new SimpleValueChecker(-1, 1.0e-3));
-        NelderMeadSimplex simplex = new NelderMeadSimplex(new double[][] {
-                { -1.2,  1.0 }, { 0.9, 1.2 } , {  3.5, -2.3 }
-            });
-        underlying.setSimplex(simplex);
-        JDKRandomGenerator g = new JDKRandomGenerator();
-        g.setSeed(16069223052l);
-        RandomVectorGenerator generator =
-            new UncorrelatedRandomVectorGenerator(2, new GaussianRandomGenerator(g));
-        MultivariateMultiStartOptimizer optimizer =
-            new MultivariateMultiStartOptimizer(underlying, 10, generator);
-        PointValuePair optimum =
-            optimizer.optimize(1100, rosenbrock, GoalType.MINIMIZE, new double[] { -1.2, 1.0 });
-
-        Assert.assertEquals(rosenbrock.getCount(), optimizer.getEvaluations());
-        Assert.assertTrue(optimizer.getEvaluations() > 900);
-        Assert.assertTrue(optimizer.getEvaluations() < 1200);
-        Assert.assertTrue(optimum.getValue() < 8.0e-4);
-    }
-
-    private static class Rosenbrock implements MultivariateFunction {
-        private int count;
-
-        public Rosenbrock() {
-            count = 0;
-        }
-
-        public double value(double[] x) {
-            ++count;
-            double a = x[1] - x[0] * x[0];
-            double b = 1.0 - x[0];
-            return 100 * a * a + b * b;
-        }
-
-        public int getCount() {
-            return count;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/PointValuePairTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/PointValuePairTest.java b/src/test/java/org/apache/commons/math4/optimization/PointValuePairTest.java
deleted file mode 100644
index 558541f..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/PointValuePairTest.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-
-import org.apache.commons.math4.TestUtils;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class PointValuePairTest {
-
-    @Test
-    public void testSerial() {
-        PointValuePair pv1 = new PointValuePair(new double[] { 1.0, 2.0, 3.0 }, 4.0);
-        PointValuePair pv2 = (PointValuePair) TestUtils.serializeAndRecover(pv1);
-        Assert.assertEquals(pv1.getKey().length, pv2.getKey().length);
-        for (int i = 0; i < pv1.getKey().length; ++i) {
-            Assert.assertEquals(pv1.getKey()[i], pv2.getKey()[i], 1.0e-15);
-        }
-        Assert.assertEquals(pv1.getValue(), pv2.getValue(), 1.0e-15);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/PointVectorValuePairTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/PointVectorValuePairTest.java b/src/test/java/org/apache/commons/math4/optimization/PointVectorValuePairTest.java
deleted file mode 100644
index 9d59f73..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/PointVectorValuePairTest.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-
-import org.apache.commons.math4.TestUtils;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class PointVectorValuePairTest {
-
-    @Test
-    public void testSerial() {
-        PointVectorValuePair pv1 = new PointVectorValuePair(new double[] { 1.0, 2.0, 3.0 },
-                                                            new double[] { 4.0, 5.0 });
-        PointVectorValuePair pv2 = (PointVectorValuePair) TestUtils.serializeAndRecover(pv1);
-        Assert.assertEquals(pv1.getKey().length, pv2.getKey().length);
-        for (int i = 0; i < pv1.getKey().length; ++i) {
-            Assert.assertEquals(pv1.getKey()[i], pv2.getKey()[i], 1.0e-15);
-        }
-        Assert.assertEquals(pv1.getValue().length, pv2.getValue().length);
-        for (int i = 0; i < pv1.getValue().length; ++i) {
-            Assert.assertEquals(pv1.getValue()[i], pv2.getValue()[i], 1.0e-15);
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/SimplePointCheckerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/SimplePointCheckerTest.java b/src/test/java/org/apache/commons/math4/optimization/SimplePointCheckerTest.java
deleted file mode 100644
index 44238ca..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/SimplePointCheckerTest.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimplePointChecker;
-import org.junit.Test;
-import org.junit.Assert;
-
-@Deprecated
-public class SimplePointCheckerTest {
-    @Test(expected=NotStrictlyPositiveException.class)
-    public void testIterationCheckPrecondition() {
-        new SimplePointChecker<PointValuePair>(1e-1, 1e-2, 0);
-    }
-
-    @Test
-    public void testIterationCheck() {
-        final int max = 10;
-        final SimplePointChecker<PointValuePair> checker
-            = new SimplePointChecker<PointValuePair>(1e-1, 1e-2, max);
-        Assert.assertTrue(checker.converged(max, null, null)); 
-        Assert.assertTrue(checker.converged(max + 1, null, null));
-    }
-
-    @Test
-    public void testIterationCheckDisabled() {
-        final SimplePointChecker<PointValuePair> checker
-            = new SimplePointChecker<PointValuePair>(1e-8, 1e-8);
-
-        final PointValuePair a = new PointValuePair(new double[] { 1d }, 1d);
-        final PointValuePair b = new PointValuePair(new double[] { 10d }, 10d);
-
-        Assert.assertFalse(checker.converged(-1, a, b));
-        Assert.assertFalse(checker.converged(0, a, b));
-        Assert.assertFalse(checker.converged(1000000, a, b));
-
-        Assert.assertTrue(checker.converged(-1, a, a));
-        Assert.assertTrue(checker.converged(-1, b, b));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/SimpleValueCheckerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/SimpleValueCheckerTest.java b/src/test/java/org/apache/commons/math4/optimization/SimpleValueCheckerTest.java
deleted file mode 100644
index 53b0d13..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/SimpleValueCheckerTest.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleValueChecker;
-import org.junit.Test;
-import org.junit.Assert;
-
-@Deprecated
-public class SimpleValueCheckerTest {
-    @Test(expected=NotStrictlyPositiveException.class)
-    public void testIterationCheckPrecondition() {
-        new SimpleValueChecker(1e-1, 1e-2, 0);
-    }
-
-    @Test
-    public void testIterationCheck() {
-        final int max = 10;
-        final SimpleValueChecker checker = new SimpleValueChecker(1e-1, 1e-2, max);
-        Assert.assertTrue(checker.converged(max, null, null)); 
-        Assert.assertTrue(checker.converged(max + 1, null, null));
-    }
-
-    @Test
-    public void testIterationCheckDisabled() {
-        final SimpleValueChecker checker = new SimpleValueChecker(1e-8, 1e-8);
-
-        final PointValuePair a = new PointValuePair(new double[] { 1d }, 1d);
-        final PointValuePair b = new PointValuePair(new double[] { 10d }, 10d);
-
-        Assert.assertFalse(checker.converged(-1, a, b));
-        Assert.assertFalse(checker.converged(0, a, b));
-        Assert.assertFalse(checker.converged(1000000, a, b));
-
-        Assert.assertTrue(checker.converged(-1, a, a));
-        Assert.assertTrue(checker.converged(-1, b, b));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/SimpleVectorValueCheckerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/SimpleVectorValueCheckerTest.java b/src/test/java/org/apache/commons/math4/optimization/SimpleVectorValueCheckerTest.java
deleted file mode 100644
index abe807a..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/SimpleVectorValueCheckerTest.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.SimpleVectorValueChecker;
-import org.junit.Test;
-import org.junit.Assert;
-
-@Deprecated
-public class SimpleVectorValueCheckerTest {
-    @Test(expected=NotStrictlyPositiveException.class)
-    public void testIterationCheckPrecondition() {
-        new SimpleVectorValueChecker(1e-1, 1e-2, 0);
-    }
-
-    @Test
-    public void testIterationCheck() {
-        final int max = 10;
-        final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(1e-1, 1e-2, max);
-        Assert.assertTrue(checker.converged(max, null, null));
-        Assert.assertTrue(checker.converged(max + 1, null, null));
-    }
-
-    @Test
-    public void testIterationCheckDisabled() {
-        final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(1e-8, 1e-8);
-
-        final PointVectorValuePair a = new PointVectorValuePair(new double[] { 1d },
-                                                                new double[] { 1d });
-        final PointVectorValuePair b = new PointVectorValuePair(new double[] { 10d },
-                                                                new double[] { 10d });
-
-        Assert.assertFalse(checker.converged(-1, a, b));
-        Assert.assertFalse(checker.converged(0, a, b));
-        Assert.assertFalse(checker.converged(1000000, a, b));
-
-        Assert.assertTrue(checker.converged(-1, a, a));
-        Assert.assertTrue(checker.converged(-1, b, b));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/direct/BOBYQAOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/direct/BOBYQAOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/direct/BOBYQAOptimizerTest.java
deleted file mode 100644
index add96f3..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/direct/BOBYQAOptimizerTest.java
+++ /dev/null
@@ -1,631 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.direct;
-
-import java.util.Arrays;
-import java.util.Random;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.NumberIsTooLargeException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.InitialGuess;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleBounds;
-import org.apache.commons.math4.optimization.direct.BOBYQAOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Ignore;
-import org.junit.Test;
-
-/**
- * Test for {@link BOBYQAOptimizer}.
- */
-@Deprecated
-public class BOBYQAOptimizerTest {
-
-    static final int DIM = 13;
-   
-    @Test(expected=NumberIsTooLargeException.class)
-    public void testInitOutOfBounds() {
-        double[] startPoint = point(DIM, 3);
-        double[][] boundaries = boundaries(DIM, -1, 2);
-        doTest(new Rosen(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-13, 1e-6, 2000, null);
-    }
-    
-    @Test(expected=DimensionMismatchException.class)
-    public void testBoundariesDimensionMismatch() {
-        double[] startPoint = point(DIM, 0.5);
-        double[][] boundaries = boundaries(DIM + 1, -1, 2);
-        doTest(new Rosen(), startPoint, boundaries,
-               GoalType.MINIMIZE, 
-               1e-13, 1e-6, 2000, null);
-    }
-
-    @Test(expected=NumberIsTooSmallException.class)
-    public void testProblemDimensionTooSmall() {
-        double[] startPoint = point(1, 0.5);
-        doTest(new Rosen(), startPoint, null,
-               GoalType.MINIMIZE,
-               1e-13, 1e-6, 2000, null);
-    }
-
-    @Test(expected=TooManyEvaluationsException.class)
-    public void testMaxEvaluations() {
-        final int lowMaxEval = 2;
-        double[] startPoint = point(DIM, 0.1);
-        double[][] boundaries = null;
-        doTest(new Rosen(), startPoint, boundaries,
-               GoalType.MINIMIZE, 
-               1e-13, 1e-6, lowMaxEval, null);
-     }
-
-    @Test
-    public void testRosen() {
-        double[] startPoint = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected = new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-13, 1e-6, 2000, expected);
-     }
-
-    @Test
-    public void testMaximize() {
-        double[] startPoint = point(DIM,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected = new PointValuePair(point(DIM,0.0),1.0);
-        doTest(new MinusElli(), startPoint, boundaries,
-                GoalType.MAXIMIZE, 
-                2e-10, 5e-6, 1000, expected);
-        boundaries = boundaries(DIM,-0.3,0.3); 
-        startPoint = point(DIM,0.1);
-        doTest(new MinusElli(), startPoint, boundaries,
-                GoalType.MAXIMIZE, 
-                2e-10, 5e-6, 1000, expected);
-    }
-
-    @Test
-    public void testEllipse() {
-        double[] startPoint = point(DIM,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Elli(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-13, 1e-6, 1000, expected);
-     }
-
-    @Test
-    public void testElliRotated() {
-        double[] startPoint = point(DIM,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new ElliRotated(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-12, 1e-6, 10000, expected);
-    }
-
-    @Test
-    public void testCigar() {
-        double[] startPoint = point(DIM,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Cigar(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-13, 1e-6, 100, expected);
-    }
-
-    @Test
-    public void testTwoAxes() {
-        double[] startPoint = point(DIM,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new TwoAxes(), startPoint, boundaries,
-                GoalType.MINIMIZE, 2*
-                1e-13, 1e-6, 100, expected);
-     }
-
-    @Test
-    public void testCigTab() {
-        double[] startPoint = point(DIM,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new CigTab(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-13, 5e-5, 100, expected);
-     }
-
-    @Test
-    public void testSphere() {
-        double[] startPoint = point(DIM,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Sphere(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-13, 1e-6, 100, expected);
-    }
-
-    @Test
-    public void testTablet() {
-        double[] startPoint = point(DIM,1.0); 
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Tablet(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-13, 1e-6, 100, expected);
-    }
-
-    @Test
-    public void testDiffPow() {
-        double[] startPoint = point(DIM/2,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM/2,0.0),0.0);
-        doTest(new DiffPow(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-8, 1e-1, 12000, expected);
-    }
-
-    @Test
-    public void testSsDiffPow() {
-        double[] startPoint = point(DIM/2,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM/2,0.0),0.0);
-        doTest(new SsDiffPow(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-2, 1.3e-1, 50000, expected);
-    }
-
-    @Test
-    public void testAckley() {
-        double[] startPoint = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Ackley(), startPoint, boundaries,
-                GoalType.MINIMIZE,
-                1e-7, 1e-5, 1000, expected);
-    }
-
-    @Test
-    public void testRastrigin() {
-        double[] startPoint = point(DIM,1.0);
-
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Rastrigin(), startPoint, boundaries,
-                GoalType.MINIMIZE, 
-                1e-13, 1e-6, 1000, expected);
-    }
-
-    @Test
-    public void testConstrainedRosen() {
-        double[] startPoint = point(DIM,0.1);
-
-        double[][] boundaries = boundaries(DIM,-1,2);
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, boundaries,
-                GoalType.MINIMIZE,
-                1e-13, 1e-6, 2000, expected);
-    }
-
-    // See MATH-728
-    // TODO: this test is temporarily disabled for 3.2 release as a bug in Cobertura
-    //       makes it run for several hours before completing
-    @Ignore @Test
-    public void testConstrainedRosenWithMoreInterpolationPoints() {
-        final double[] startPoint = point(DIM, 0.1);
-        final double[][] boundaries = boundaries(DIM, -1, 2);
-        final PointValuePair expected = new PointValuePair(point(DIM, 1.0), 0.0);
-
-        // This should have been 78 because in the code the hard limit is
-        // said to be
-        //   ((DIM + 1) * (DIM + 2)) / 2 - (2 * DIM + 1)
-        // i.e. 78 in this case, but the test fails for 48, 59, 62, 63, 64,
-        // 65, 66, ...
-        final int maxAdditionalPoints = 47;
-
-        for (int num = 1; num <= maxAdditionalPoints; num++) {
-            doTest(new Rosen(), startPoint, boundaries,
-                   GoalType.MINIMIZE,
-                   1e-12, 1e-6, 2000,
-                   num,
-                   expected,
-                   "num=" + num);
-        }
-    }
-
-    /**
-     * @param func Function to optimize.
-     * @param startPoint Starting point.
-     * @param boundaries Upper / lower point limit.
-     * @param goal Minimization or maximization.
-     * @param fTol Tolerance relative error on the objective function.
-     * @param pointTol Tolerance for checking that the optimum is correct.
-     * @param maxEvaluations Maximum number of evaluations.
-     * @param expected Expected point / value.
-     */
-    private void doTest(MultivariateFunction func,
-                        double[] startPoint,
-                        double[][] boundaries,
-                        GoalType goal,
-                        double fTol,
-                        double pointTol,
-                        int maxEvaluations,
-                        PointValuePair expected) {
-        doTest(func,
-               startPoint,
-               boundaries,
-               goal,
-               fTol,
-               pointTol,
-               maxEvaluations,
-               0,
-               expected,
-               "");
-    }
-
-    /**
-     * @param func Function to optimize.
-     * @param startPoint Starting point.
-     * @param boundaries Upper / lower point limit.
-     * @param goal Minimization or maximization.
-     * @param fTol Tolerance relative error on the objective function.
-     * @param pointTol Tolerance for checking that the optimum is correct.
-     * @param maxEvaluations Maximum number of evaluations.
-     * @param additionalInterpolationPoints Number of interpolation to used
-     * in addition to the default (2 * dim + 1).
-     * @param expected Expected point / value.
-     */
-    private void doTest(MultivariateFunction func,
-                        double[] startPoint,
-                        double[][] boundaries,
-                        GoalType goal,
-                        double fTol,
-                        double pointTol,
-                        int maxEvaluations,
-                        int additionalInterpolationPoints,
-                        PointValuePair expected,
-                        String assertMsg) {
-
-//         System.out.println(func.getClass().getName() + " BEGIN"); // XXX
-
-        int dim = startPoint.length;
-//        MultivariateOptimizer optim =
-//            new PowellOptimizer(1e-13, FastMath.ulp(1d));
-//        PointValuePair result = optim.optimize(100000, func, goal, startPoint);
-        final double[] lB = boundaries == null ? null : boundaries[0];
-        final double[] uB = boundaries == null ? null : boundaries[1];
-        final int numIterpolationPoints = 2 * dim + 1 + additionalInterpolationPoints;
-        BOBYQAOptimizer optim = new BOBYQAOptimizer(numIterpolationPoints);
-        PointValuePair result = boundaries == null ?
-            optim.optimize(maxEvaluations, func, goal,
-                           new InitialGuess(startPoint)) :
-            optim.optimize(maxEvaluations, func, goal,
-                           new InitialGuess(startPoint),
-                           new SimpleBounds(lB, uB));
-//        System.out.println(func.getClass().getName() + " = " 
-//              + optim.getEvaluations() + " f(");
-//        for (double x: result.getPoint())  System.out.print(x + " ");
-//        System.out.println(") = " +  result.getValue());
-        Assert.assertEquals(assertMsg, expected.getValue(), result.getValue(), fTol);
-        for (int i = 0; i < dim; i++) {
-            Assert.assertEquals(expected.getPoint()[i],
-                                result.getPoint()[i], pointTol);
-        }
-
-//         System.out.println(func.getClass().getName() + " END"); // XXX
-    }
-
-    private static double[] point(int n, double value) {
-        double[] ds = new double[n];
-        Arrays.fill(ds, value);
-        return ds;
-    }
-
-    private static double[][] boundaries(int dim,
-            double lower, double upper) {
-        double[][] boundaries = new double[2][dim];
-        for (int i = 0; i < dim; i++)
-            boundaries[0][i] = lower;
-        for (int i = 0; i < dim; i++)
-            boundaries[1][i] = upper;
-        return boundaries;
-    }
-
-    private static class Sphere implements MultivariateFunction {
-
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length; ++i)
-                f += x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class Cigar implements MultivariateFunction {
-        private double factor;
-
-        Cigar() {
-            this(1e3);
-        }
-
-        Cigar(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = x[0] * x[0];
-            for (int i = 1; i < x.length; ++i)
-                f += factor * x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class Tablet implements MultivariateFunction {
-        private double factor;
-
-        Tablet() {
-            this(1e3);
-        }
-
-        Tablet(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = factor * x[0] * x[0];
-            for (int i = 1; i < x.length; ++i)
-                f += x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class CigTab implements MultivariateFunction {
-        private double factor;
-
-        CigTab() {
-            this(1e4);
-        }
-
-        CigTab(double axisratio) {
-            factor = axisratio;
-        }
-
-        public double value(double[] x) {
-            int end = x.length - 1;
-            double f = x[0] * x[0] / factor + factor * x[end] * x[end];
-            for (int i = 1; i < end; ++i)
-                f += x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class TwoAxes implements MultivariateFunction {
-
-        private double factor;
-
-        TwoAxes() {
-            this(1e6);
-        }
-
-        TwoAxes(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length; ++i)
-                f += (i < x.length / 2 ? factor : 1) * x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class ElliRotated implements MultivariateFunction {
-        private Basis B = new Basis();
-        private double factor;
-
-        ElliRotated() {
-            this(1e3);
-        }
-
-        ElliRotated(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            x = B.Rotate(x);
-            for (int i = 0; i < x.length; ++i)
-                f += FastMath.pow(factor, i / (x.length - 1.)) * x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class Elli implements MultivariateFunction {
-
-        private double factor;
-
-        Elli() {
-            this(1e3);
-        }
-
-        Elli(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length; ++i)
-                f += FastMath.pow(factor, i / (x.length - 1.)) * x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class MinusElli implements MultivariateFunction {
-        private final Elli elli = new Elli();
-        public double value(double[] x) {
-            return 1.0 - elli.value(x);
-        }
-    }
-
-    private static class DiffPow implements MultivariateFunction {
-//        private int fcount = 0;
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length; ++i)
-                f += FastMath.pow(FastMath.abs(x[i]), 2. + 10 * (double) i
-                        / (x.length - 1.));
-//            System.out.print("" + (fcount++) + ") ");
-//            for (int i = 0; i < x.length; i++)
-//                System.out.print(x[i] +  " ");
-//            System.out.println(" = " + f);
-            return f;
-        }
-    }
-
-    private static class SsDiffPow implements MultivariateFunction {
-
-        public double value(double[] x) {
-            double f = FastMath.pow(new DiffPow().value(x), 0.25);
-            return f;
-        }
-    }
-
-    private static class Rosen implements MultivariateFunction {
-
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length - 1; ++i)
-                f += 1e2 * (x[i] * x[i] - x[i + 1]) * (x[i] * x[i] - x[i + 1])
-                + (x[i] - 1.) * (x[i] - 1.);
-            return f;
-        }
-    }
-
-    private static class Ackley implements MultivariateFunction {
-        private double axisratio;
-
-        Ackley(double axra) {
-            axisratio = axra;
-        }
-
-        public Ackley() {
-            this(1);
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            double res2 = 0;
-            double fac = 0;
-            for (int i = 0; i < x.length; ++i) {
-                fac = FastMath.pow(axisratio, (i - 1.) / (x.length - 1.));
-                f += fac * fac * x[i] * x[i];
-                res2 += FastMath.cos(2. * FastMath.PI * fac * x[i]);
-            }
-            f = (20. - 20. * FastMath.exp(-0.2 * FastMath.sqrt(f / x.length))
-                    + FastMath.exp(1.) - FastMath.exp(res2 / x.length));
-            return f;
-        }
-    }
-
-    private static class Rastrigin implements MultivariateFunction {
-
-        private double axisratio;
-        private double amplitude;
-
-        Rastrigin() {
-            this(1, 10);
-        }
-
-        Rastrigin(double axisratio, double amplitude) {
-            this.axisratio = axisratio;
-            this.amplitude = amplitude;
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            double fac;
-            for (int i = 0; i < x.length; ++i) {
-                fac = FastMath.pow(axisratio, (i - 1.) / (x.length - 1.));
-                if (i == 0 && x[i] < 0)
-                    fac *= 1.;
-                f += fac * fac * x[i] * x[i] + amplitude
-                * (1. - FastMath.cos(2. * FastMath.PI * fac * x[i]));
-            }
-            return f;
-        }
-    }
-
-    private static class Basis {
-        double[][] basis;
-        Random rand = new Random(2); // use not always the same basis
-
-        double[] Rotate(double[] x) {
-            GenBasis(x.length);
-            double[] y = new double[x.length];
-            for (int i = 0; i < x.length; ++i) {
-                y[i] = 0;
-                for (int j = 0; j < x.length; ++j)
-                    y[i] += basis[i][j] * x[j];
-            }
-            return y;
-        }
-
-        void GenBasis(int DIM) {
-            if (basis != null ? basis.length == DIM : false)
-                return;
-
-            double sp;
-            int i, j, k;
-
-            /* generate orthogonal basis */
-            basis = new double[DIM][DIM];
-            for (i = 0; i < DIM; ++i) {
-                /* sample components gaussian */
-                for (j = 0; j < DIM; ++j)
-                    basis[i][j] = rand.nextGaussian();
-                /* substract projection of previous vectors */
-                for (j = i - 1; j >= 0; --j) {
-                    for (sp = 0., k = 0; k < DIM; ++k)
-                        sp += basis[i][k] * basis[j][k]; /* scalar product */
-                    for (k = 0; k < DIM; ++k)
-                        basis[i][k] -= sp * basis[j][k]; /* substract */
-                }
-                /* normalize */
-                for (sp = 0., k = 0; k < DIM; ++k)
-                    sp += basis[i][k] * basis[i][k]; /* squared norm */
-                for (k = 0; k < DIM; ++k)
-                    basis[i][k] /= FastMath.sqrt(sp);
-            }
-        }
-    }
-}


[15/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/BOBYQAOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/BOBYQAOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/direct/BOBYQAOptimizer.java
deleted file mode 100644
index 487aad6..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/BOBYQAOptimizer.java
+++ /dev/null
@@ -1,2465 +0,0 @@
-// CHECKSTYLE: stop all
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.exception.OutOfRangeException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.linear.Array2DRowRealMatrix;
-import org.apache.commons.math4.linear.ArrayRealVector;
-import org.apache.commons.math4.linear.RealVector;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.MultivariateOptimizer;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.util.FastMath;
-
-/**
- * Powell's BOBYQA algorithm. This implementation is translated and
- * adapted from the Fortran version available
- * <a href="http://plato.asu.edu/ftp/other_software/bobyqa.zip">here</a>.
- * See <a href="http://www.optimization-online.org/DB_HTML/2010/05/2616.html">
- * this paper</a> for an introduction.
- * <br/>
- * BOBYQA is particularly well suited for high dimensional problems
- * where derivatives are not available. In most cases it outperforms the
- * {@link PowellOptimizer} significantly. Stochastic algorithms like
- * {@link CMAESOptimizer} succeed more often than BOBYQA, but are more
- * expensive. BOBYQA could also be considered as a replacement of any
- * derivative-based optimizer when the derivatives are approximated by
- * finite differences.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class BOBYQAOptimizer
-    extends BaseAbstractMultivariateSimpleBoundsOptimizer<MultivariateFunction>
-    implements MultivariateOptimizer {
-    /** Minimum dimension of the problem: {@value} */
-    public static final int MINIMUM_PROBLEM_DIMENSION = 2;
-    /** Default value for {@link #initialTrustRegionRadius}: {@value} . */
-    public static final double DEFAULT_INITIAL_RADIUS = 10.0;
-    /** Default value for {@link #stoppingTrustRegionRadius}: {@value} . */
-    public static final double DEFAULT_STOPPING_RADIUS = 1E-8;
-
-    private static final double ZERO = 0d;
-    private static final double ONE = 1d;
-    private static final double TWO = 2d;
-    private static final double TEN = 10d;
-    private static final double SIXTEEN = 16d;
-    private static final double TWO_HUNDRED_FIFTY = 250d;
-    private static final double MINUS_ONE = -ONE;
-    private static final double HALF = ONE / 2;
-    private static final double ONE_OVER_FOUR = ONE / 4;
-    private static final double ONE_OVER_EIGHT = ONE / 8;
-    private static final double ONE_OVER_TEN = ONE / 10;
-    private static final double ONE_OVER_A_THOUSAND = ONE / 1000;
-
-    /**
-     * numberOfInterpolationPoints XXX
-     */
-    private final int numberOfInterpolationPoints;
-    /**
-     * initialTrustRegionRadius XXX
-     */
-    private double initialTrustRegionRadius;
-    /**
-     * stoppingTrustRegionRadius XXX
-     */
-    private final double stoppingTrustRegionRadius;
-    /** Goal type (minimize or maximize). */
-    private boolean isMinimize;
-    /**
-     * Current best values for the variables to be optimized.
-     * The vector will be changed in-place to contain the values of the least
-     * calculated objective function values.
-     */
-    private ArrayRealVector currentBest;
-    /** Differences between the upper and lower bounds. */
-    private double[] boundDifference;
-    /**
-     * Index of the interpolation point at the trust region center.
-     */
-    private int trustRegionCenterInterpolationPointIndex;
-    /**
-     * Last <em>n</em> columns of matrix H (where <em>n</em> is the dimension
-     * of the problem).
-     * XXX "bmat" in the original code.
-     */
-    private Array2DRowRealMatrix bMatrix;
-    /**
-     * Factorization of the leading <em>npt</em> square submatrix of H, this
-     * factorization being Z Z<sup>T</sup>, which provides both the correct
-     * rank and positive semi-definiteness.
-     * XXX "zmat" in the original code.
-     */
-    private Array2DRowRealMatrix zMatrix;
-    /**
-     * Coordinates of the interpolation points relative to {@link #originShift}.
-     * XXX "xpt" in the original code.
-     */
-    private Array2DRowRealMatrix interpolationPoints;
-    /**
-     * Shift of origin that should reduce the contributions from rounding
-     * errors to values of the model and Lagrange functions.
-     * XXX "xbase" in the original code.
-     */
-    private ArrayRealVector originShift;
-    /**
-     * Values of the objective function at the interpolation points.
-     * XXX "fval" in the original code.
-     */
-    private ArrayRealVector fAtInterpolationPoints;
-    /**
-     * Displacement from {@link #originShift} of the trust region center.
-     * XXX "xopt" in the original code.
-     */
-    private ArrayRealVector trustRegionCenterOffset;
-    /**
-     * Gradient of the quadratic model at {@link #originShift} +
-     * {@link #trustRegionCenterOffset}.
-     * XXX "gopt" in the original code.
-     */
-    private ArrayRealVector gradientAtTrustRegionCenter;
-    /**
-     * Differences {@link #getLowerBound()} - {@link #originShift}.
-     * All the components of every {@link #trustRegionCenterOffset} are going
-     * to satisfy the bounds<br/>
-     * {@link #getLowerBound() lowerBound}<sub>i</sub> &le;
-     * {@link #trustRegionCenterOffset}<sub>i</sub>,<br/>
-     * with appropriate equalities when {@link #trustRegionCenterOffset} is
-     * on a constraint boundary.
-     * XXX "sl" in the original code.
-     */
-    private ArrayRealVector lowerDifference;
-    /**
-     * Differences {@link #getUpperBound()} - {@link #originShift}
-     * All the components of every {@link #trustRegionCenterOffset} are going
-     * to satisfy the bounds<br/>
-     *  {@link #trustRegionCenterOffset}<sub>i</sub> &le;
-     *  {@link #getUpperBound() upperBound}<sub>i</sub>,<br/>
-     * with appropriate equalities when {@link #trustRegionCenterOffset} is
-     * on a constraint boundary.
-     * XXX "su" in the original code.
-     */
-    private ArrayRealVector upperDifference;
-    /**
-     * Parameters of the implicit second derivatives of the quadratic model.
-     * XXX "pq" in the original code.
-     */
-    private ArrayRealVector modelSecondDerivativesParameters;
-    /**
-     * Point chosen by function {@link #trsbox(double,ArrayRealVector,
-     * ArrayRealVector, ArrayRealVector,ArrayRealVector,ArrayRealVector) trsbox}
-     * or {@link #altmov(int,double) altmov}.
-     * Usually {@link #originShift} + {@link #newPoint} is the vector of
-     * variables for the next evaluation of the objective function.
-     * It also satisfies the constraints indicated in {@link #lowerDifference}
-     * and {@link #upperDifference}.
-     * XXX "xnew" in the original code.
-     */
-    private ArrayRealVector newPoint;
-    /**
-     * Alternative to {@link #newPoint}, chosen by
-     * {@link #altmov(int,double) altmov}.
-     * It may replace {@link #newPoint} in order to increase the denominator
-     * in the {@link #update(double, double, int) updating procedure}.
-     * XXX "xalt" in the original code.
-     */
-    private ArrayRealVector alternativeNewPoint;
-    /**
-     * Trial step from {@link #trustRegionCenterOffset} which is usually
-     * {@link #newPoint} - {@link #trustRegionCenterOffset}.
-     * XXX "d__" in the original code.
-     */
-    private ArrayRealVector trialStepPoint;
-    /**
-     * Values of the Lagrange functions at a new point.
-     * XXX "vlag" in the original code.
-     */
-    private ArrayRealVector lagrangeValuesAtNewPoint;
-    /**
-     * Explicit second derivatives of the quadratic model.
-     * XXX "hq" in the original code.
-     */
-    private ArrayRealVector modelSecondDerivativesValues;
-
-    /**
-     * @param numberOfInterpolationPoints Number of interpolation conditions.
-     * For a problem of dimension {@code n}, its value must be in the interval
-     * {@code [n+2, (n+1)(n+2)/2]}.
-     * Choices that exceed {@code 2n+1} are not recommended.
-     */
-    public BOBYQAOptimizer(int numberOfInterpolationPoints) {
-        this(numberOfInterpolationPoints,
-             DEFAULT_INITIAL_RADIUS,
-             DEFAULT_STOPPING_RADIUS);
-    }
-
-    /**
-     * @param numberOfInterpolationPoints Number of interpolation conditions.
-     * For a problem of dimension {@code n}, its value must be in the interval
-     * {@code [n+2, (n+1)(n+2)/2]}.
-     * Choices that exceed {@code 2n+1} are not recommended.
-     * @param initialTrustRegionRadius Initial trust region radius.
-     * @param stoppingTrustRegionRadius Stopping trust region radius.
-     */
-    public BOBYQAOptimizer(int numberOfInterpolationPoints,
-                           double initialTrustRegionRadius,
-                           double stoppingTrustRegionRadius) {
-        super(null); // No custom convergence criterion.
-        this.numberOfInterpolationPoints = numberOfInterpolationPoints;
-        this.initialTrustRegionRadius = initialTrustRegionRadius;
-        this.stoppingTrustRegionRadius = stoppingTrustRegionRadius;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected PointValuePair doOptimize() {
-        final double[] lowerBound = getLowerBound();
-        final double[] upperBound = getUpperBound();
-
-        // Validity checks.
-        setup(lowerBound, upperBound);
-
-        isMinimize = (getGoalType() == GoalType.MINIMIZE);
-        currentBest = new ArrayRealVector(getStartPoint());
-
-        final double value = bobyqa(lowerBound, upperBound);
-
-        return new PointValuePair(currentBest.getDataRef(),
-                                      isMinimize ? value : -value);
-    }
-
-    /**
-     *     This subroutine seeks the least value of a function of many variables,
-     *     by applying a trust region method that forms quadratic models by
-     *     interpolation. There is usually some freedom in the interpolation
-     *     conditions, which is taken up by minimizing the Frobenius norm of
-     *     the change to the second derivative of the model, beginning with the
-     *     zero matrix. The values of the variables are constrained by upper and
-     *     lower bounds. The arguments of the subroutine are as follows.
-     *
-     *     N must be set to the number of variables and must be at least two.
-     *     NPT is the number of interpolation conditions. Its value must be in
-     *       the interval [N+2,(N+1)(N+2)/2]. Choices that exceed 2*N+1 are not
-     *       recommended.
-     *     Initial values of the variables must be set in X(1),X(2),...,X(N). They
-     *       will be changed to the values that give the least calculated F.
-     *     For I=1,2,...,N, XL(I) and XU(I) must provide the lower and upper
-     *       bounds, respectively, on X(I). The construction of quadratic models
-     *       requires XL(I) to be strictly less than XU(I) for each I. Further,
-     *       the contribution to a model from changes to the I-th variable is
-     *       damaged severely by rounding errors if XU(I)-XL(I) is too small.
-     *     RHOBEG and RHOEND must be set to the initial and final values of a trust
-     *       region radius, so both must be positive with RHOEND no greater than
-     *       RHOBEG. Typically, RHOBEG should be about one tenth of the greatest
-     *       expected change to a variable, while RHOEND should indicate the
-     *       accuracy that is required in the final values of the variables. An
-     *       error return occurs if any of the differences XU(I)-XL(I), I=1,...,N,
-     *       is less than 2*RHOBEG.
-     *     MAXFUN must be set to an upper bound on the number of calls of CALFUN.
-     *     The array W will be used for working space. Its length must be at least
-     *       (NPT+5)*(NPT+N)+3*N*(N+5)/2.
-     *
-     * @param lowerBound Lower bounds.
-     * @param upperBound Upper bounds.
-     * @return the value of the objective at the optimum.
-     */
-    private double bobyqa(double[] lowerBound,
-                          double[] upperBound) {
-        printMethod(); // XXX
-
-        final int n = currentBest.getDimension();
-
-        // Return if there is insufficient space between the bounds. Modify the
-        // initial X if necessary in order to avoid conflicts between the bounds
-        // and the construction of the first quadratic model. The lower and upper
-        // bounds on moves from the updated X are set now, in the ISL and ISU
-        // partitions of W, in order to provide useful and exact information about
-        // components of X that become within distance RHOBEG from their bounds.
-
-        for (int j = 0; j < n; j++) {
-            final double boundDiff = boundDifference[j];
-            lowerDifference.setEntry(j, lowerBound[j] - currentBest.getEntry(j));
-            upperDifference.setEntry(j, upperBound[j] - currentBest.getEntry(j));
-            if (lowerDifference.getEntry(j) >= -initialTrustRegionRadius) {
-                if (lowerDifference.getEntry(j) >= ZERO) {
-                    currentBest.setEntry(j, lowerBound[j]);
-                    lowerDifference.setEntry(j, ZERO);
-                    upperDifference.setEntry(j, boundDiff);
-                } else {
-                    currentBest.setEntry(j, lowerBound[j] + initialTrustRegionRadius);
-                    lowerDifference.setEntry(j, -initialTrustRegionRadius);
-                    // Computing MAX
-                    final double deltaOne = upperBound[j] - currentBest.getEntry(j);
-                    upperDifference.setEntry(j, FastMath.max(deltaOne, initialTrustRegionRadius));
-                }
-            } else if (upperDifference.getEntry(j) <= initialTrustRegionRadius) {
-                if (upperDifference.getEntry(j) <= ZERO) {
-                    currentBest.setEntry(j, upperBound[j]);
-                    lowerDifference.setEntry(j, -boundDiff);
-                    upperDifference.setEntry(j, ZERO);
-                } else {
-                    currentBest.setEntry(j, upperBound[j] - initialTrustRegionRadius);
-                    // Computing MIN
-                    final double deltaOne = lowerBound[j] - currentBest.getEntry(j);
-                    final double deltaTwo = -initialTrustRegionRadius;
-                    lowerDifference.setEntry(j, FastMath.min(deltaOne, deltaTwo));
-                    upperDifference.setEntry(j, initialTrustRegionRadius);
-                }
-            }
-        }
-
-        // Make the call of BOBYQB.
-
-        return bobyqb(lowerBound, upperBound);
-    } // bobyqa
-
-    // ----------------------------------------------------------------------------------------
-
-    /**
-     *     The arguments N, NPT, X, XL, XU, RHOBEG, RHOEND, IPRINT and MAXFUN
-     *       are identical to the corresponding arguments in SUBROUTINE BOBYQA.
-     *     XBASE holds a shift of origin that should reduce the contributions
-     *       from rounding errors to values of the model and Lagrange functions.
-     *     XPT is a two-dimensional array that holds the coordinates of the
-     *       interpolation points relative to XBASE.
-     *     FVAL holds the values of F at the interpolation points.
-     *     XOPT is set to the displacement from XBASE of the trust region centre.
-     *     GOPT holds the gradient of the quadratic model at XBASE+XOPT.
-     *     HQ holds the explicit second derivatives of the quadratic model.
-     *     PQ contains the parameters of the implicit second derivatives of the
-     *       quadratic model.
-     *     BMAT holds the last N columns of H.
-     *     ZMAT holds the factorization of the leading NPT by NPT submatrix of H,
-     *       this factorization being ZMAT times ZMAT^T, which provides both the
-     *       correct rank and positive semi-definiteness.
-     *     NDIM is the first dimension of BMAT and has the value NPT+N.
-     *     SL and SU hold the differences XL-XBASE and XU-XBASE, respectively.
-     *       All the components of every XOPT are going to satisfy the bounds
-     *       SL(I) .LEQ. XOPT(I) .LEQ. SU(I), with appropriate equalities when
-     *       XOPT is on a constraint boundary.
-     *     XNEW is chosen by SUBROUTINE TRSBOX or ALTMOV. Usually XBASE+XNEW is the
-     *       vector of variables for the next call of CALFUN. XNEW also satisfies
-     *       the SL and SU constraints in the way that has just been mentioned.
-     *     XALT is an alternative to XNEW, chosen by ALTMOV, that may replace XNEW
-     *       in order to increase the denominator in the updating of UPDATE.
-     *     D is reserved for a trial step from XOPT, which is usually XNEW-XOPT.
-     *     VLAG contains the values of the Lagrange functions at a new point X.
-     *       They are part of a product that requires VLAG to be of length NDIM.
-     *     W is a one-dimensional array that is used for working space. Its length
-     *       must be at least 3*NDIM = 3*(NPT+N).
-     *
-     * @param lowerBound Lower bounds.
-     * @param upperBound Upper bounds.
-     * @return the value of the objective at the optimum.
-     */
-    private double bobyqb(double[] lowerBound,
-                          double[] upperBound) {
-        printMethod(); // XXX
-
-        final int n = currentBest.getDimension();
-        final int npt = numberOfInterpolationPoints;
-        final int np = n + 1;
-        final int nptm = npt - np;
-        final int nh = n * np / 2;
-
-        final ArrayRealVector work1 = new ArrayRealVector(n);
-        final ArrayRealVector work2 = new ArrayRealVector(npt);
-        final ArrayRealVector work3 = new ArrayRealVector(npt);
-
-        double cauchy = Double.NaN;
-        double alpha = Double.NaN;
-        double dsq = Double.NaN;
-        double crvmin = Double.NaN;
-
-        // Set some constants.
-        // Parameter adjustments
-
-        // Function Body
-
-        // The call of PRELIM sets the elements of XBASE, XPT, FVAL, GOPT, HQ, PQ,
-        // BMAT and ZMAT for the first iteration, with the corresponding values of
-        // of NF and KOPT, which are the number of calls of CALFUN so far and the
-        // index of the interpolation point at the trust region centre. Then the
-        // initial XOPT is set too. The branch to label 720 occurs if MAXFUN is
-        // less than NPT. GOPT will be updated if KOPT is different from KBASE.
-
-        trustRegionCenterInterpolationPointIndex = 0;
-
-        prelim(lowerBound, upperBound);
-        double xoptsq = ZERO;
-        for (int i = 0; i < n; i++) {
-            trustRegionCenterOffset.setEntry(i, interpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex, i));
-            // Computing 2nd power
-            final double deltaOne = trustRegionCenterOffset.getEntry(i);
-            xoptsq += deltaOne * deltaOne;
-        }
-        double fsave = fAtInterpolationPoints.getEntry(0);
-        final int kbase = 0;
-
-        // Complete the settings that are required for the iterative procedure.
-
-        int ntrits = 0;
-        int itest = 0;
-        int knew = 0;
-        int nfsav = getEvaluations();
-        double rho = initialTrustRegionRadius;
-        double delta = rho;
-        double diffa = ZERO;
-        double diffb = ZERO;
-        double diffc = ZERO;
-        double f = ZERO;
-        double beta = ZERO;
-        double adelt = ZERO;
-        double denom = ZERO;
-        double ratio = ZERO;
-        double dnorm = ZERO;
-        double scaden = ZERO;
-        double biglsq = ZERO;
-        double distsq = ZERO;
-
-        // Update GOPT if necessary before the first iteration and after each
-        // call of RESCUE that makes a call of CALFUN.
-
-        int state = 20;
-        for(;;) switch (state) {
-        case 20: {
-            printState(20); // XXX
-            if (trustRegionCenterInterpolationPointIndex != kbase) {
-                int ih = 0;
-                for (int j = 0; j < n; j++) {
-                    for (int i = 0; i <= j; i++) {
-                        if (i < j) {
-                            gradientAtTrustRegionCenter.setEntry(j,  gradientAtTrustRegionCenter.getEntry(j) + modelSecondDerivativesValues.getEntry(ih) * trustRegionCenterOffset.getEntry(i));
-                        }
-                        gradientAtTrustRegionCenter.setEntry(i,  gradientAtTrustRegionCenter.getEntry(i) + modelSecondDerivativesValues.getEntry(ih) * trustRegionCenterOffset.getEntry(j));
-                        ih++;
-                    }
-                }
-                if (getEvaluations() > npt) {
-                    for (int k = 0; k < npt; k++) {
-                        double temp = ZERO;
-                        for (int j = 0; j < n; j++) {
-                            temp += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
-                        }
-                        temp *= modelSecondDerivativesParameters.getEntry(k);
-                        for (int i = 0; i < n; i++) {
-                            gradientAtTrustRegionCenter.setEntry(i, gradientAtTrustRegionCenter.getEntry(i) + temp * interpolationPoints.getEntry(k, i));
-                        }
-                    }
-                    // throw new PathIsExploredException(); // XXX
-                }
-            }
-
-            // Generate the next point in the trust region that provides a small value
-            // of the quadratic model subject to the constraints on the variables.
-            // The int NTRITS is set to the number "trust region" iterations that
-            // have occurred since the last "alternative" iteration. If the length
-            // of XNEW-XOPT is less than HALF*RHO, however, then there is a branch to
-            // label 650 or 680 with NTRITS=-1, instead of calculating F at XNEW.
-
-        }
-        case 60: {
-            printState(60); // XXX
-            final ArrayRealVector gnew = new ArrayRealVector(n);
-            final ArrayRealVector xbdi = new ArrayRealVector(n);
-            final ArrayRealVector s = new ArrayRealVector(n);
-            final ArrayRealVector hs = new ArrayRealVector(n);
-            final ArrayRealVector hred = new ArrayRealVector(n);
-
-            final double[] dsqCrvmin = trsbox(delta, gnew, xbdi, s,
-                                              hs, hred);
-            dsq = dsqCrvmin[0];
-            crvmin = dsqCrvmin[1];
-
-            // Computing MIN
-            double deltaOne = delta;
-            double deltaTwo = FastMath.sqrt(dsq);
-            dnorm = FastMath.min(deltaOne, deltaTwo);
-            if (dnorm < HALF * rho) {
-                ntrits = -1;
-                // Computing 2nd power
-                deltaOne = TEN * rho;
-                distsq = deltaOne * deltaOne;
-                if (getEvaluations() <= nfsav + 2) {
-                    state = 650; break;
-                }
-
-                // The following choice between labels 650 and 680 depends on whether or
-                // not our work with the current RHO seems to be complete. Either RHO is
-                // decreased or termination occurs if the errors in the quadratic model at
-                // the last three interpolation points compare favourably with predictions
-                // of likely improvements to the model within distance HALF*RHO of XOPT.
-
-                // Computing MAX
-                deltaOne = FastMath.max(diffa, diffb);
-                final double errbig = FastMath.max(deltaOne, diffc);
-                final double frhosq = rho * ONE_OVER_EIGHT * rho;
-                if (crvmin > ZERO &&
-                    errbig > frhosq * crvmin) {
-                    state = 650; break;
-                }
-                final double bdtol = errbig / rho;
-                for (int j = 0; j < n; j++) {
-                    double bdtest = bdtol;
-                    if (newPoint.getEntry(j) == lowerDifference.getEntry(j)) {
-                        bdtest = work1.getEntry(j);
-                    }
-                    if (newPoint.getEntry(j) == upperDifference.getEntry(j)) {
-                        bdtest = -work1.getEntry(j);
-                    }
-                    if (bdtest < bdtol) {
-                        double curv = modelSecondDerivativesValues.getEntry((j + j * j) / 2);
-                        for (int k = 0; k < npt; k++) {
-                            // Computing 2nd power
-                            final double d1 = interpolationPoints.getEntry(k, j);
-                            curv += modelSecondDerivativesParameters.getEntry(k) * (d1 * d1);
-                        }
-                        bdtest += HALF * curv * rho;
-                        if (bdtest < bdtol) {
-                            state = 650; break;
-                        }
-                        // throw new PathIsExploredException(); // XXX
-                    }
-                }
-                state = 680; break;
-            }
-            ++ntrits;
-
-            // Severe cancellation is likely to occur if XOPT is too far from XBASE.
-            // If the following test holds, then XBASE is shifted so that XOPT becomes
-            // zero. The appropriate changes are made to BMAT and to the second
-            // derivatives of the current model, beginning with the changes to BMAT
-            // that do not depend on ZMAT. VLAG is used temporarily for working space.
-
-        }
-        case 90: {
-            printState(90); // XXX
-            if (dsq <= xoptsq * ONE_OVER_A_THOUSAND) {
-                final double fracsq = xoptsq * ONE_OVER_FOUR;
-                double sumpq = ZERO;
-                // final RealVector sumVector
-                //     = new ArrayRealVector(npt, -HALF * xoptsq).add(interpolationPoints.operate(trustRegionCenter));
-                for (int k = 0; k < npt; k++) {
-                    sumpq += modelSecondDerivativesParameters.getEntry(k);
-                    double sum = -HALF * xoptsq;
-                    for (int i = 0; i < n; i++) {
-                        sum += interpolationPoints.getEntry(k, i) * trustRegionCenterOffset.getEntry(i);
-                    }
-                    // sum = sumVector.getEntry(k); // XXX "testAckley" and "testDiffPow" fail.
-                    work2.setEntry(k, sum);
-                    final double temp = fracsq - HALF * sum;
-                    for (int i = 0; i < n; i++) {
-                        work1.setEntry(i, bMatrix.getEntry(k, i));
-                        lagrangeValuesAtNewPoint.setEntry(i, sum * interpolationPoints.getEntry(k, i) + temp * trustRegionCenterOffset.getEntry(i));
-                        final int ip = npt + i;
-                        for (int j = 0; j <= i; j++) {
-                            bMatrix.setEntry(ip, j,
-                                          bMatrix.getEntry(ip, j)
-                                          + work1.getEntry(i) * lagrangeValuesAtNewPoint.getEntry(j)
-                                          + lagrangeValuesAtNewPoint.getEntry(i) * work1.getEntry(j));
-                        }
-                    }
-                }
-
-                // Then the revisions of BMAT that depend on ZMAT are calculated.
-
-                for (int m = 0; m < nptm; m++) {
-                    double sumz = ZERO;
-                    double sumw = ZERO;
-                    for (int k = 0; k < npt; k++) {
-                        sumz += zMatrix.getEntry(k, m);
-                        lagrangeValuesAtNewPoint.setEntry(k, work2.getEntry(k) * zMatrix.getEntry(k, m));
-                        sumw += lagrangeValuesAtNewPoint.getEntry(k);
-                    }
-                    for (int j = 0; j < n; j++) {
-                        double sum = (fracsq * sumz - HALF * sumw) * trustRegionCenterOffset.getEntry(j);
-                        for (int k = 0; k < npt; k++) {
-                            sum += lagrangeValuesAtNewPoint.getEntry(k) * interpolationPoints.getEntry(k, j);
-                        }
-                        work1.setEntry(j, sum);
-                        for (int k = 0; k < npt; k++) {
-                            bMatrix.setEntry(k, j,
-                                          bMatrix.getEntry(k, j)
-                                          + sum * zMatrix.getEntry(k, m));
-                        }
-                    }
-                    for (int i = 0; i < n; i++) {
-                        final int ip = i + npt;
-                        final double temp = work1.getEntry(i);
-                        for (int j = 0; j <= i; j++) {
-                            bMatrix.setEntry(ip, j,
-                                          bMatrix.getEntry(ip, j)
-                                          + temp * work1.getEntry(j));
-                        }
-                    }
-                }
-
-                // The following instructions complete the shift, including the changes
-                // to the second derivative parameters of the quadratic model.
-
-                int ih = 0;
-                for (int j = 0; j < n; j++) {
-                    work1.setEntry(j, -HALF * sumpq * trustRegionCenterOffset.getEntry(j));
-                    for (int k = 0; k < npt; k++) {
-                        work1.setEntry(j, work1.getEntry(j) + modelSecondDerivativesParameters.getEntry(k) * interpolationPoints.getEntry(k, j));
-                        interpolationPoints.setEntry(k, j, interpolationPoints.getEntry(k, j) - trustRegionCenterOffset.getEntry(j));
-                    }
-                    for (int i = 0; i <= j; i++) {
-                         modelSecondDerivativesValues.setEntry(ih,
-                                    modelSecondDerivativesValues.getEntry(ih)
-                                    + work1.getEntry(i) * trustRegionCenterOffset.getEntry(j)
-                                    + trustRegionCenterOffset.getEntry(i) * work1.getEntry(j));
-                        bMatrix.setEntry(npt + i, j, bMatrix.getEntry(npt + j, i));
-                        ih++;
-                    }
-                }
-                for (int i = 0; i < n; i++) {
-                    originShift.setEntry(i, originShift.getEntry(i) + trustRegionCenterOffset.getEntry(i));
-                    newPoint.setEntry(i, newPoint.getEntry(i) - trustRegionCenterOffset.getEntry(i));
-                    lowerDifference.setEntry(i, lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i));
-                    upperDifference.setEntry(i, upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i));
-                    trustRegionCenterOffset.setEntry(i, ZERO);
-                }
-                xoptsq = ZERO;
-            }
-            if (ntrits == 0) {
-                state = 210; break;
-            }
-            state = 230; break;
-
-            // XBASE is also moved to XOPT by a call of RESCUE. This calculation is
-            // more expensive than the previous shift, because new matrices BMAT and
-            // ZMAT are generated from scratch, which may include the replacement of
-            // interpolation points whose positions seem to be causing near linear
-            // dependence in the interpolation conditions. Therefore RESCUE is called
-            // only if rounding errors have reduced by at least a factor of two the
-            // denominator of the formula for updating the H matrix. It provides a
-            // useful safeguard, but is not invoked in most applications of BOBYQA.
-
-        }
-        case 210: {
-            printState(210); // XXX
-            // Pick two alternative vectors of variables, relative to XBASE, that
-            // are suitable as new positions of the KNEW-th interpolation point.
-            // Firstly, XNEW is set to the point on a line through XOPT and another
-            // interpolation point that minimizes the predicted value of the next
-            // denominator, subject to ||XNEW - XOPT|| .LEQ. ADELT and to the SL
-            // and SU bounds. Secondly, XALT is set to the best feasible point on
-            // a constrained version of the Cauchy step of the KNEW-th Lagrange
-            // function, the corresponding value of the square of this function
-            // being returned in CAUCHY. The choice between these alternatives is
-            // going to be made when the denominator is calculated.
-
-            final double[] alphaCauchy = altmov(knew, adelt);
-            alpha = alphaCauchy[0];
-            cauchy = alphaCauchy[1];
-
-            for (int i = 0; i < n; i++) {
-                trialStepPoint.setEntry(i, newPoint.getEntry(i) - trustRegionCenterOffset.getEntry(i));
-            }
-
-            // Calculate VLAG and BETA for the current choice of D. The scalar
-            // product of D with XPT(K,.) is going to be held in W(NPT+K) for
-            // use when VQUAD is calculated.
-
-        }
-        case 230: {
-            printState(230); // XXX
-            for (int k = 0; k < npt; k++) {
-                double suma = ZERO;
-                double sumb = ZERO;
-                double sum = ZERO;
-                for (int j = 0; j < n; j++) {
-                    suma += interpolationPoints.getEntry(k, j) * trialStepPoint.getEntry(j);
-                    sumb += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
-                    sum += bMatrix.getEntry(k, j) * trialStepPoint.getEntry(j);
-                }
-                work3.setEntry(k, suma * (HALF * suma + sumb));
-                lagrangeValuesAtNewPoint.setEntry(k, sum);
-                work2.setEntry(k, suma);
-            }
-            beta = ZERO;
-            for (int m = 0; m < nptm; m++) {
-                double sum = ZERO;
-                for (int k = 0; k < npt; k++) {
-                    sum += zMatrix.getEntry(k, m) * work3.getEntry(k);
-                }
-                beta -= sum * sum;
-                for (int k = 0; k < npt; k++) {
-                    lagrangeValuesAtNewPoint.setEntry(k, lagrangeValuesAtNewPoint.getEntry(k) + sum * zMatrix.getEntry(k, m));
-                }
-            }
-            dsq = ZERO;
-            double bsum = ZERO;
-            double dx = ZERO;
-            for (int j = 0; j < n; j++) {
-                // Computing 2nd power
-                final double d1 = trialStepPoint.getEntry(j);
-                dsq += d1 * d1;
-                double sum = ZERO;
-                for (int k = 0; k < npt; k++) {
-                    sum += work3.getEntry(k) * bMatrix.getEntry(k, j);
-                }
-                bsum += sum * trialStepPoint.getEntry(j);
-                final int jp = npt + j;
-                for (int i = 0; i < n; i++) {
-                    sum += bMatrix.getEntry(jp, i) * trialStepPoint.getEntry(i);
-                }
-                lagrangeValuesAtNewPoint.setEntry(jp, sum);
-                bsum += sum * trialStepPoint.getEntry(j);
-                dx += trialStepPoint.getEntry(j) * trustRegionCenterOffset.getEntry(j);
-            }
-
-            beta = dx * dx + dsq * (xoptsq + dx + dx + HALF * dsq) + beta - bsum; // Original
-            // beta += dx * dx + dsq * (xoptsq + dx + dx + HALF * dsq) - bsum; // XXX "testAckley" and "testDiffPow" fail.
-            // beta = dx * dx + dsq * (xoptsq + 2 * dx + HALF * dsq) + beta - bsum; // XXX "testDiffPow" fails.
-
-            lagrangeValuesAtNewPoint.setEntry(trustRegionCenterInterpolationPointIndex,
-                          lagrangeValuesAtNewPoint.getEntry(trustRegionCenterInterpolationPointIndex) + ONE);
-
-            // If NTRITS is zero, the denominator may be increased by replacing
-            // the step D of ALTMOV by a Cauchy step. Then RESCUE may be called if
-            // rounding errors have damaged the chosen denominator.
-
-            if (ntrits == 0) {
-                // Computing 2nd power
-                final double d1 = lagrangeValuesAtNewPoint.getEntry(knew);
-                denom = d1 * d1 + alpha * beta;
-                if (denom < cauchy && cauchy > ZERO) {
-                    for (int i = 0; i < n; i++) {
-                        newPoint.setEntry(i, alternativeNewPoint.getEntry(i));
-                        trialStepPoint.setEntry(i, newPoint.getEntry(i) - trustRegionCenterOffset.getEntry(i));
-                    }
-                    cauchy = ZERO; // XXX Useful statement?
-                    state = 230; break;
-                }
-                // Alternatively, if NTRITS is positive, then set KNEW to the index of
-                // the next interpolation point to be deleted to make room for a trust
-                // region step. Again RESCUE may be called if rounding errors have damaged_
-                // the chosen denominator, which is the reason for attempting to select
-                // KNEW before calculating the next value of the objective function.
-
-            } else {
-                final double delsq = delta * delta;
-                scaden = ZERO;
-                biglsq = ZERO;
-                knew = 0;
-                for (int k = 0; k < npt; k++) {
-                    if (k == trustRegionCenterInterpolationPointIndex) {
-                        continue;
-                    }
-                    double hdiag = ZERO;
-                    for (int m = 0; m < nptm; m++) {
-                        // Computing 2nd power
-                        final double d1 = zMatrix.getEntry(k, m);
-                        hdiag += d1 * d1;
-                    }
-                    // Computing 2nd power
-                    final double d2 = lagrangeValuesAtNewPoint.getEntry(k);
-                    final double den = beta * hdiag + d2 * d2;
-                    distsq = ZERO;
-                    for (int j = 0; j < n; j++) {
-                        // Computing 2nd power
-                        final double d3 = interpolationPoints.getEntry(k, j) - trustRegionCenterOffset.getEntry(j);
-                        distsq += d3 * d3;
-                    }
-                    // Computing MAX
-                    // Computing 2nd power
-                    final double d4 = distsq / delsq;
-                    final double temp = FastMath.max(ONE, d4 * d4);
-                    if (temp * den > scaden) {
-                        scaden = temp * den;
-                        knew = k;
-                        denom = den;
-                    }
-                    // Computing MAX
-                    // Computing 2nd power
-                    final double d5 = lagrangeValuesAtNewPoint.getEntry(k);
-                    biglsq = FastMath.max(biglsq, temp * (d5 * d5));
-                }
-            }
-
-            // Put the variables for the next calculation of the objective function
-            //   in XNEW, with any adjustments for the bounds.
-
-            // Calculate the value of the objective function at XBASE+XNEW, unless
-            //   the limit on the number of calculations of F has been reached.
-
-        }
-        case 360: {
-            printState(360); // XXX
-            for (int i = 0; i < n; i++) {
-                // Computing MIN
-                // Computing MAX
-                final double d3 = lowerBound[i];
-                final double d4 = originShift.getEntry(i) + newPoint.getEntry(i);
-                final double d1 = FastMath.max(d3, d4);
-                final double d2 = upperBound[i];
-                currentBest.setEntry(i, FastMath.min(d1, d2));
-                if (newPoint.getEntry(i) == lowerDifference.getEntry(i)) {
-                    currentBest.setEntry(i, lowerBound[i]);
-                }
-                if (newPoint.getEntry(i) == upperDifference.getEntry(i)) {
-                    currentBest.setEntry(i, upperBound[i]);
-                }
-            }
-
-            f = computeObjectiveValue(currentBest.toArray());
-
-            if (!isMinimize)
-                f = -f;
-            if (ntrits == -1) {
-                fsave = f;
-                state = 720; break;
-            }
-
-            // Use the quadratic model to predict the change in F due to the step D,
-            //   and set DIFF to the error of this prediction.
-
-            final double fopt = fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex);
-            double vquad = ZERO;
-            int ih = 0;
-            for (int j = 0; j < n; j++) {
-                vquad += trialStepPoint.getEntry(j) * gradientAtTrustRegionCenter.getEntry(j);
-                for (int i = 0; i <= j; i++) {
-                    double temp = trialStepPoint.getEntry(i) * trialStepPoint.getEntry(j);
-                    if (i == j) {
-                        temp *= HALF;
-                    }
-                    vquad += modelSecondDerivativesValues.getEntry(ih) * temp;
-                    ih++;
-               }
-            }
-            for (int k = 0; k < npt; k++) {
-                // Computing 2nd power
-                final double d1 = work2.getEntry(k);
-                final double d2 = d1 * d1; // "d1" must be squared first to prevent test failures.
-                vquad += HALF * modelSecondDerivativesParameters.getEntry(k) * d2;
-            }
-            final double diff = f - fopt - vquad;
-            diffc = diffb;
-            diffb = diffa;
-            diffa = FastMath.abs(diff);
-            if (dnorm > rho) {
-                nfsav = getEvaluations();
-            }
-
-            // Pick the next value of DELTA after a trust region step.
-
-            if (ntrits > 0) {
-                if (vquad >= ZERO) {
-                    throw new MathIllegalStateException(LocalizedFormats.TRUST_REGION_STEP_FAILED, vquad);
-                }
-                ratio = (f - fopt) / vquad;
-                final double hDelta = HALF * delta;
-                if (ratio <= ONE_OVER_TEN) {
-                    // Computing MIN
-                    delta = FastMath.min(hDelta, dnorm);
-                } else if (ratio <= .7) {
-                    // Computing MAX
-                    delta = FastMath.max(hDelta, dnorm);
-                } else {
-                    // Computing MAX
-                    delta = FastMath.max(hDelta, 2 * dnorm);
-                }
-                if (delta <= rho * 1.5) {
-                    delta = rho;
-                }
-
-                // Recalculate KNEW and DENOM if the new F is less than FOPT.
-
-                if (f < fopt) {
-                    final int ksav = knew;
-                    final double densav = denom;
-                    final double delsq = delta * delta;
-                    scaden = ZERO;
-                    biglsq = ZERO;
-                    knew = 0;
-                    for (int k = 0; k < npt; k++) {
-                        double hdiag = ZERO;
-                        for (int m = 0; m < nptm; m++) {
-                            // Computing 2nd power
-                            final double d1 = zMatrix.getEntry(k, m);
-                            hdiag += d1 * d1;
-                        }
-                        // Computing 2nd power
-                        final double d1 = lagrangeValuesAtNewPoint.getEntry(k);
-                        final double den = beta * hdiag + d1 * d1;
-                        distsq = ZERO;
-                        for (int j = 0; j < n; j++) {
-                            // Computing 2nd power
-                            final double d2 = interpolationPoints.getEntry(k, j) - newPoint.getEntry(j);
-                            distsq += d2 * d2;
-                        }
-                        // Computing MAX
-                        // Computing 2nd power
-                        final double d3 = distsq / delsq;
-                        final double temp = FastMath.max(ONE, d3 * d3);
-                        if (temp * den > scaden) {
-                            scaden = temp * den;
-                            knew = k;
-                            denom = den;
-                        }
-                        // Computing MAX
-                        // Computing 2nd power
-                        final double d4 = lagrangeValuesAtNewPoint.getEntry(k);
-                        final double d5 = temp * (d4 * d4);
-                        biglsq = FastMath.max(biglsq, d5);
-                    }
-                    if (scaden <= HALF * biglsq) {
-                        knew = ksav;
-                        denom = densav;
-                    }
-                }
-            }
-
-            // Update BMAT and ZMAT, so that the KNEW-th interpolation point can be
-            // moved. Also update the second derivative terms of the model.
-
-            update(beta, denom, knew);
-
-            ih = 0;
-            final double pqold = modelSecondDerivativesParameters.getEntry(knew);
-            modelSecondDerivativesParameters.setEntry(knew, ZERO);
-            for (int i = 0; i < n; i++) {
-                final double temp = pqold * interpolationPoints.getEntry(knew, i);
-                for (int j = 0; j <= i; j++) {
-                    modelSecondDerivativesValues.setEntry(ih, modelSecondDerivativesValues.getEntry(ih) + temp * interpolationPoints.getEntry(knew, j));
-                    ih++;
-                }
-            }
-            for (int m = 0; m < nptm; m++) {
-                final double temp = diff * zMatrix.getEntry(knew, m);
-                for (int k = 0; k < npt; k++) {
-                    modelSecondDerivativesParameters.setEntry(k, modelSecondDerivativesParameters.getEntry(k) + temp * zMatrix.getEntry(k, m));
-                }
-            }
-
-            // Include the new interpolation point, and make the changes to GOPT at
-            // the old XOPT that are caused by the updating of the quadratic model.
-
-            fAtInterpolationPoints.setEntry(knew,  f);
-            for (int i = 0; i < n; i++) {
-                interpolationPoints.setEntry(knew, i, newPoint.getEntry(i));
-                work1.setEntry(i, bMatrix.getEntry(knew, i));
-            }
-            for (int k = 0; k < npt; k++) {
-                double suma = ZERO;
-                for (int m = 0; m < nptm; m++) {
-                    suma += zMatrix.getEntry(knew, m) * zMatrix.getEntry(k, m);
-                }
-                double sumb = ZERO;
-                for (int j = 0; j < n; j++) {
-                    sumb += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
-                }
-                final double temp = suma * sumb;
-                for (int i = 0; i < n; i++) {
-                    work1.setEntry(i, work1.getEntry(i) + temp * interpolationPoints.getEntry(k, i));
-                }
-            }
-            for (int i = 0; i < n; i++) {
-                gradientAtTrustRegionCenter.setEntry(i, gradientAtTrustRegionCenter.getEntry(i) + diff * work1.getEntry(i));
-            }
-
-            // Update XOPT, GOPT and KOPT if the new calculated F is less than FOPT.
-
-            if (f < fopt) {
-                trustRegionCenterInterpolationPointIndex = knew;
-                xoptsq = ZERO;
-                ih = 0;
-                for (int j = 0; j < n; j++) {
-                    trustRegionCenterOffset.setEntry(j, newPoint.getEntry(j));
-                    // Computing 2nd power
-                    final double d1 = trustRegionCenterOffset.getEntry(j);
-                    xoptsq += d1 * d1;
-                    for (int i = 0; i <= j; i++) {
-                        if (i < j) {
-                            gradientAtTrustRegionCenter.setEntry(j, gradientAtTrustRegionCenter.getEntry(j) + modelSecondDerivativesValues.getEntry(ih) * trialStepPoint.getEntry(i));
-                        }
-                        gradientAtTrustRegionCenter.setEntry(i, gradientAtTrustRegionCenter.getEntry(i) + modelSecondDerivativesValues.getEntry(ih) * trialStepPoint.getEntry(j));
-                        ih++;
-                    }
-                }
-                for (int k = 0; k < npt; k++) {
-                    double temp = ZERO;
-                    for (int j = 0; j < n; j++) {
-                        temp += interpolationPoints.getEntry(k, j) * trialStepPoint.getEntry(j);
-                    }
-                    temp *= modelSecondDerivativesParameters.getEntry(k);
-                    for (int i = 0; i < n; i++) {
-                        gradientAtTrustRegionCenter.setEntry(i, gradientAtTrustRegionCenter.getEntry(i) + temp * interpolationPoints.getEntry(k, i));
-                    }
-                }
-            }
-
-            // Calculate the parameters of the least Frobenius norm interpolant to
-            // the current data, the gradient of this interpolant at XOPT being put
-            // into VLAG(NPT+I), I=1,2,...,N.
-
-            if (ntrits > 0) {
-                for (int k = 0; k < npt; k++) {
-                    lagrangeValuesAtNewPoint.setEntry(k, fAtInterpolationPoints.getEntry(k) - fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex));
-                    work3.setEntry(k, ZERO);
-                }
-                for (int j = 0; j < nptm; j++) {
-                    double sum = ZERO;
-                    for (int k = 0; k < npt; k++) {
-                        sum += zMatrix.getEntry(k, j) * lagrangeValuesAtNewPoint.getEntry(k);
-                    }
-                    for (int k = 0; k < npt; k++) {
-                        work3.setEntry(k, work3.getEntry(k) + sum * zMatrix.getEntry(k, j));
-                    }
-                }
-                for (int k = 0; k < npt; k++) {
-                    double sum = ZERO;
-                    for (int j = 0; j < n; j++) {
-                        sum += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
-                    }
-                    work2.setEntry(k, work3.getEntry(k));
-                    work3.setEntry(k, sum * work3.getEntry(k));
-                }
-                double gqsq = ZERO;
-                double gisq = ZERO;
-                for (int i = 0; i < n; i++) {
-                    double sum = ZERO;
-                    for (int k = 0; k < npt; k++) {
-                        sum += bMatrix.getEntry(k, i) *
-                            lagrangeValuesAtNewPoint.getEntry(k) + interpolationPoints.getEntry(k, i) * work3.getEntry(k);
-                    }
-                    if (trustRegionCenterOffset.getEntry(i) == lowerDifference.getEntry(i)) {
-                        // Computing MIN
-                        // Computing 2nd power
-                        final double d1 = FastMath.min(ZERO, gradientAtTrustRegionCenter.getEntry(i));
-                        gqsq += d1 * d1;
-                        // Computing 2nd power
-                        final double d2 = FastMath.min(ZERO, sum);
-                        gisq += d2 * d2;
-                    } else if (trustRegionCenterOffset.getEntry(i) == upperDifference.getEntry(i)) {
-                        // Computing MAX
-                        // Computing 2nd power
-                        final double d1 = FastMath.max(ZERO, gradientAtTrustRegionCenter.getEntry(i));
-                        gqsq += d1 * d1;
-                        // Computing 2nd power
-                        final double d2 = FastMath.max(ZERO, sum);
-                        gisq += d2 * d2;
-                    } else {
-                        // Computing 2nd power
-                        final double d1 = gradientAtTrustRegionCenter.getEntry(i);
-                        gqsq += d1 * d1;
-                        gisq += sum * sum;
-                    }
-                    lagrangeValuesAtNewPoint.setEntry(npt + i, sum);
-                }
-
-                // Test whether to replace the new quadratic model by the least Frobenius
-                // norm interpolant, making the replacement if the test is satisfied.
-
-                ++itest;
-                if (gqsq < TEN * gisq) {
-                    itest = 0;
-                }
-                if (itest >= 3) {
-                    for (int i = 0, max = FastMath.max(npt, nh); i < max; i++) {
-                        if (i < n) {
-                            gradientAtTrustRegionCenter.setEntry(i, lagrangeValuesAtNewPoint.getEntry(npt + i));
-                        }
-                        if (i < npt) {
-                            modelSecondDerivativesParameters.setEntry(i, work2.getEntry(i));
-                        }
-                        if (i < nh) {
-                            modelSecondDerivativesValues.setEntry(i, ZERO);
-                        }
-                        itest = 0;
-                    }
-                }
-            }
-
-            // If a trust region step has provided a sufficient decrease in F, then
-            // branch for another trust region calculation. The case NTRITS=0 occurs
-            // when the new interpolation point was reached by an alternative step.
-
-            if (ntrits == 0) {
-                state = 60; break;
-            }
-            if (f <= fopt + ONE_OVER_TEN * vquad) {
-                state = 60; break;
-            }
-
-            // Alternatively, find out if the interpolation points are close enough
-            //   to the best point so far.
-
-            // Computing MAX
-            // Computing 2nd power
-            final double d1 = TWO * delta;
-            // Computing 2nd power
-            final double d2 = TEN * rho;
-            distsq = FastMath.max(d1 * d1, d2 * d2);
-        }
-        case 650: {
-            printState(650); // XXX
-            knew = -1;
-            for (int k = 0; k < npt; k++) {
-                double sum = ZERO;
-                for (int j = 0; j < n; j++) {
-                    // Computing 2nd power
-                    final double d1 = interpolationPoints.getEntry(k, j) - trustRegionCenterOffset.getEntry(j);
-                    sum += d1 * d1;
-                }
-                if (sum > distsq) {
-                    knew = k;
-                    distsq = sum;
-                }
-            }
-
-            // If KNEW is positive, then ALTMOV finds alternative new positions for
-            // the KNEW-th interpolation point within distance ADELT of XOPT. It is
-            // reached via label 90. Otherwise, there is a branch to label 60 for
-            // another trust region iteration, unless the calculations with the
-            // current RHO are complete.
-
-            if (knew >= 0) {
-                final double dist = FastMath.sqrt(distsq);
-                if (ntrits == -1) {
-                    // Computing MIN
-                    delta = FastMath.min(ONE_OVER_TEN * delta, HALF * dist);
-                    if (delta <= rho * 1.5) {
-                        delta = rho;
-                    }
-                }
-                ntrits = 0;
-                // Computing MAX
-                // Computing MIN
-                final double d1 = FastMath.min(ONE_OVER_TEN * dist, delta);
-                adelt = FastMath.max(d1, rho);
-                dsq = adelt * adelt;
-                state = 90; break;
-            }
-            if (ntrits == -1) {
-                state = 680; break;
-            }
-            if (ratio > ZERO) {
-                state = 60; break;
-            }
-            if (FastMath.max(delta, dnorm) > rho) {
-                state = 60; break;
-            }
-
-            // The calculations with the current value of RHO are complete. Pick the
-            //   next values of RHO and DELTA.
-        }
-        case 680: {
-            printState(680); // XXX
-            if (rho > stoppingTrustRegionRadius) {
-                delta = HALF * rho;
-                ratio = rho / stoppingTrustRegionRadius;
-                if (ratio <= SIXTEEN) {
-                    rho = stoppingTrustRegionRadius;
-                } else if (ratio <= TWO_HUNDRED_FIFTY) {
-                    rho = FastMath.sqrt(ratio) * stoppingTrustRegionRadius;
-                } else {
-                    rho *= ONE_OVER_TEN;
-                }
-                delta = FastMath.max(delta, rho);
-                ntrits = 0;
-                nfsav = getEvaluations();
-                state = 60; break;
-            }
-
-            // Return from the calculation, after another Newton-Raphson step, if
-            //   it is too short to have been tried before.
-
-            if (ntrits == -1) {
-                state = 360; break;
-            }
-        }
-        case 720: {
-            printState(720); // XXX
-            if (fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex) <= fsave) {
-                for (int i = 0; i < n; i++) {
-                    // Computing MIN
-                    // Computing MAX
-                    final double d3 = lowerBound[i];
-                    final double d4 = originShift.getEntry(i) + trustRegionCenterOffset.getEntry(i);
-                    final double d1 = FastMath.max(d3, d4);
-                    final double d2 = upperBound[i];
-                    currentBest.setEntry(i, FastMath.min(d1, d2));
-                    if (trustRegionCenterOffset.getEntry(i) == lowerDifference.getEntry(i)) {
-                        currentBest.setEntry(i, lowerBound[i]);
-                    }
-                    if (trustRegionCenterOffset.getEntry(i) == upperDifference.getEntry(i)) {
-                        currentBest.setEntry(i, upperBound[i]);
-                    }
-                }
-                f = fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex);
-            }
-            return f;
-        }
-        default: {
-            throw new MathIllegalStateException(LocalizedFormats.SIMPLE_MESSAGE, "bobyqb");
-        }}
-    } // bobyqb
-
-    // ----------------------------------------------------------------------------------------
-
-    /**
-     *     The arguments N, NPT, XPT, XOPT, BMAT, ZMAT, NDIM, SL and SU all have
-     *       the same meanings as the corresponding arguments of BOBYQB.
-     *     KOPT is the index of the optimal interpolation point.
-     *     KNEW is the index of the interpolation point that is going to be moved.
-     *     ADELT is the current trust region bound.
-     *     XNEW will be set to a suitable new position for the interpolation point
-     *       XPT(KNEW,.). Specifically, it satisfies the SL, SU and trust region
-     *       bounds and it should provide a large denominator in the next call of
-     *       UPDATE. The step XNEW-XOPT from XOPT is restricted to moves along the
-     *       straight lines through XOPT and another interpolation point.
-     *     XALT also provides a large value of the modulus of the KNEW-th Lagrange
-     *       function subject to the constraints that have been mentioned, its main
-     *       difference from XNEW being that XALT-XOPT is a constrained version of
-     *       the Cauchy step within the trust region. An exception is that XALT is
-     *       not calculated if all components of GLAG (see below) are zero.
-     *     ALPHA will be set to the KNEW-th diagonal element of the H matrix.
-     *     CAUCHY will be set to the square of the KNEW-th Lagrange function at
-     *       the step XALT-XOPT from XOPT for the vector XALT that is returned,
-     *       except that CAUCHY is set to zero if XALT is not calculated.
-     *     GLAG is a working space vector of length N for the gradient of the
-     *       KNEW-th Lagrange function at XOPT.
-     *     HCOL is a working space vector of length NPT for the second derivative
-     *       coefficients of the KNEW-th Lagrange function.
-     *     W is a working space vector of length 2N that is going to hold the
-     *       constrained Cauchy step from XOPT of the Lagrange function, followed
-     *       by the downhill version of XALT when the uphill step is calculated.
-     *
-     *     Set the first NPT components of W to the leading elements of the
-     *     KNEW-th column of the H matrix.
-     * @param knew
-     * @param adelt
-     */
-    private double[] altmov(
-            int knew,
-            double adelt
-    ) {
-        printMethod(); // XXX
-
-        final int n = currentBest.getDimension();
-        final int npt = numberOfInterpolationPoints;
-
-        final ArrayRealVector glag = new ArrayRealVector(n);
-        final ArrayRealVector hcol = new ArrayRealVector(npt);
-
-        final ArrayRealVector work1 = new ArrayRealVector(n);
-        final ArrayRealVector work2 = new ArrayRealVector(n);
-
-        for (int k = 0; k < npt; k++) {
-            hcol.setEntry(k, ZERO);
-        }
-        for (int j = 0, max = npt - n - 1; j < max; j++) {
-            final double tmp = zMatrix.getEntry(knew, j);
-            for (int k = 0; k < npt; k++) {
-                hcol.setEntry(k, hcol.getEntry(k) + tmp * zMatrix.getEntry(k, j));
-            }
-        }
-        final double alpha = hcol.getEntry(knew);
-        final double ha = HALF * alpha;
-
-        // Calculate the gradient of the KNEW-th Lagrange function at XOPT.
-
-        for (int i = 0; i < n; i++) {
-            glag.setEntry(i, bMatrix.getEntry(knew, i));
-        }
-        for (int k = 0; k < npt; k++) {
-            double tmp = ZERO;
-            for (int j = 0; j < n; j++) {
-                tmp += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
-            }
-            tmp *= hcol.getEntry(k);
-            for (int i = 0; i < n; i++) {
-                glag.setEntry(i, glag.getEntry(i) + tmp * interpolationPoints.getEntry(k, i));
-            }
-        }
-
-        // Search for a large denominator along the straight lines through XOPT
-        // and another interpolation point. SLBD and SUBD will be lower and upper
-        // bounds on the step along each of these lines in turn. PREDSQ will be
-        // set to the square of the predicted denominator for each line. PRESAV
-        // will be set to the largest admissible value of PREDSQ that occurs.
-
-        double presav = ZERO;
-        double step = Double.NaN;
-        int ksav = 0;
-        int ibdsav = 0;
-        double stpsav = 0;
-        for (int k = 0; k < npt; k++) {
-            if (k == trustRegionCenterInterpolationPointIndex) {
-                continue;
-            }
-            double dderiv = ZERO;
-            double distsq = ZERO;
-            for (int i = 0; i < n; i++) {
-                final double tmp = interpolationPoints.getEntry(k, i) - trustRegionCenterOffset.getEntry(i);
-                dderiv += glag.getEntry(i) * tmp;
-                distsq += tmp * tmp;
-            }
-            double subd = adelt / FastMath.sqrt(distsq);
-            double slbd = -subd;
-            int ilbd = 0;
-            int iubd = 0;
-            final double sumin = FastMath.min(ONE, subd);
-
-            // Revise SLBD and SUBD if necessary because of the bounds in SL and SU.
-
-            for (int i = 0; i < n; i++) {
-                final double tmp = interpolationPoints.getEntry(k, i) - trustRegionCenterOffset.getEntry(i);
-                if (tmp > ZERO) {
-                    if (slbd * tmp < lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) {
-                        slbd = (lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) / tmp;
-                        ilbd = -i - 1;
-                    }
-                    if (subd * tmp > upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) {
-                        // Computing MAX
-                        subd = FastMath.max(sumin,
-                                            (upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) / tmp);
-                        iubd = i + 1;
-                    }
-                } else if (tmp < ZERO) {
-                    if (slbd * tmp > upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) {
-                        slbd = (upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) / tmp;
-                        ilbd = i + 1;
-                    }
-                    if (subd * tmp < lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) {
-                        // Computing MAX
-                        subd = FastMath.max(sumin,
-                                            (lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) / tmp);
-                        iubd = -i - 1;
-                    }
-                }
-            }
-
-            // Seek a large modulus of the KNEW-th Lagrange function when the index
-            // of the other interpolation point on the line through XOPT is KNEW.
-
-            step = slbd;
-            int isbd = ilbd;
-            double vlag = Double.NaN;
-            if (k == knew) {
-                final double diff = dderiv - ONE;
-                vlag = slbd * (dderiv - slbd * diff);
-                final double d1 = subd * (dderiv - subd * diff);
-                if (FastMath.abs(d1) > FastMath.abs(vlag)) {
-                    step = subd;
-                    vlag = d1;
-                    isbd = iubd;
-                }
-                final double d2 = HALF * dderiv;
-                final double d3 = d2 - diff * slbd;
-                final double d4 = d2 - diff * subd;
-                if (d3 * d4 < ZERO) {
-                    final double d5 = d2 * d2 / diff;
-                    if (FastMath.abs(d5) > FastMath.abs(vlag)) {
-                        step = d2 / diff;
-                        vlag = d5;
-                        isbd = 0;
-                    }
-                }
-
-                // Search along each of the other lines through XOPT and another point.
-
-            } else {
-                vlag = slbd * (ONE - slbd);
-                final double tmp = subd * (ONE - subd);
-                if (FastMath.abs(tmp) > FastMath.abs(vlag)) {
-                    step = subd;
-                    vlag = tmp;
-                    isbd = iubd;
-                }
-                if (subd > HALF && FastMath.abs(vlag) < ONE_OVER_FOUR) {
-                    step = HALF;
-                    vlag = ONE_OVER_FOUR;
-                    isbd = 0;
-                }
-                vlag *= dderiv;
-            }
-
-            // Calculate PREDSQ for the current line search and maintain PRESAV.
-
-            final double tmp = step * (ONE - step) * distsq;
-            final double predsq = vlag * vlag * (vlag * vlag + ha * tmp * tmp);
-            if (predsq > presav) {
-                presav = predsq;
-                ksav = k;
-                stpsav = step;
-                ibdsav = isbd;
-            }
-        }
-
-        // Construct XNEW in a way that satisfies the bound constraints exactly.
-
-        for (int i = 0; i < n; i++) {
-            final double tmp = trustRegionCenterOffset.getEntry(i) + stpsav * (interpolationPoints.getEntry(ksav, i) - trustRegionCenterOffset.getEntry(i));
-            newPoint.setEntry(i, FastMath.max(lowerDifference.getEntry(i),
-                                              FastMath.min(upperDifference.getEntry(i), tmp)));
-        }
-        if (ibdsav < 0) {
-            newPoint.setEntry(-ibdsav - 1, lowerDifference.getEntry(-ibdsav - 1));
-        }
-        if (ibdsav > 0) {
-            newPoint.setEntry(ibdsav - 1, upperDifference.getEntry(ibdsav - 1));
-        }
-
-        // Prepare for the iterative method that assembles the constrained Cauchy
-        // step in W. The sum of squares of the fixed components of W is formed in
-        // WFIXSQ, and the free components of W are set to BIGSTP.
-
-        final double bigstp = adelt + adelt;
-        int iflag = 0;
-        double cauchy = Double.NaN;
-        double csave = ZERO;
-        while (true) {
-            double wfixsq = ZERO;
-            double ggfree = ZERO;
-            for (int i = 0; i < n; i++) {
-                final double glagValue = glag.getEntry(i);
-                work1.setEntry(i, ZERO);
-                if (FastMath.min(trustRegionCenterOffset.getEntry(i) - lowerDifference.getEntry(i), glagValue) > ZERO ||
-                    FastMath.max(trustRegionCenterOffset.getEntry(i) - upperDifference.getEntry(i), glagValue) < ZERO) {
-                    work1.setEntry(i, bigstp);
-                    // Computing 2nd power
-                    ggfree += glagValue * glagValue;
-                }
-            }
-            if (ggfree == ZERO) {
-                return new double[] { alpha, ZERO };
-            }
-
-            // Investigate whether more components of W can be fixed.
-            final double tmp1 = adelt * adelt - wfixsq;
-            if (tmp1 > ZERO) {
-                step = FastMath.sqrt(tmp1 / ggfree);
-                ggfree = ZERO;
-                for (int i = 0; i < n; i++) {
-                    if (work1.getEntry(i) == bigstp) {
-                        final double tmp2 = trustRegionCenterOffset.getEntry(i) - step * glag.getEntry(i);
-                        if (tmp2 <= lowerDifference.getEntry(i)) {
-                            work1.setEntry(i, lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i));
-                            // Computing 2nd power
-                            final double d1 = work1.getEntry(i);
-                            wfixsq += d1 * d1;
-                        } else if (tmp2 >= upperDifference.getEntry(i)) {
-                            work1.setEntry(i, upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i));
-                            // Computing 2nd power
-                            final double d1 = work1.getEntry(i);
-                            wfixsq += d1 * d1;
-                        } else {
-                            // Computing 2nd power
-                            final double d1 = glag.getEntry(i);
-                            ggfree += d1 * d1;
-                        }
-                    }
-                }
-            }
-
-            // Set the remaining free components of W and all components of XALT,
-            // except that W may be scaled later.
-
-            double gw = ZERO;
-            for (int i = 0; i < n; i++) {
-                final double glagValue = glag.getEntry(i);
-                if (work1.getEntry(i) == bigstp) {
-                    work1.setEntry(i, -step * glagValue);
-                    final double min = FastMath.min(upperDifference.getEntry(i),
-                                                    trustRegionCenterOffset.getEntry(i) + work1.getEntry(i));
-                    alternativeNewPoint.setEntry(i, FastMath.max(lowerDifference.getEntry(i), min));
-                } else if (work1.getEntry(i) == ZERO) {
-                    alternativeNewPoint.setEntry(i, trustRegionCenterOffset.getEntry(i));
-                } else if (glagValue > ZERO) {
-                    alternativeNewPoint.setEntry(i, lowerDifference.getEntry(i));
-                } else {
-                    alternativeNewPoint.setEntry(i, upperDifference.getEntry(i));
-                }
-                gw += glagValue * work1.getEntry(i);
-            }
-
-            // Set CURV to the curvature of the KNEW-th Lagrange function along W.
-            // Scale W by a factor less than one if that can reduce the modulus of
-            // the Lagrange function at XOPT+W. Set CAUCHY to the final value of
-            // the square of this function.
-
-            double curv = ZERO;
-            for (int k = 0; k < npt; k++) {
-                double tmp = ZERO;
-                for (int j = 0; j < n; j++) {
-                    tmp += interpolationPoints.getEntry(k, j) * work1.getEntry(j);
-                }
-                curv += hcol.getEntry(k) * tmp * tmp;
-            }
-            if (iflag == 1) {
-                curv = -curv;
-            }
-            if (curv > -gw &&
-                curv < -gw * (ONE + FastMath.sqrt(TWO))) {
-                final double scale = -gw / curv;
-                for (int i = 0; i < n; i++) {
-                    final double tmp = trustRegionCenterOffset.getEntry(i) + scale * work1.getEntry(i);
-                    alternativeNewPoint.setEntry(i, FastMath.max(lowerDifference.getEntry(i),
-                                                    FastMath.min(upperDifference.getEntry(i), tmp)));
-                }
-                // Computing 2nd power
-                final double d1 = HALF * gw * scale;
-                cauchy = d1 * d1;
-            } else {
-                // Computing 2nd power
-                final double d1 = gw + HALF * curv;
-                cauchy = d1 * d1;
-            }
-
-            // If IFLAG is zero, then XALT is calculated as before after reversing
-            // the sign of GLAG. Thus two XALT vectors become available. The one that
-            // is chosen is the one that gives the larger value of CAUCHY.
-
-            if (iflag == 0) {
-                for (int i = 0; i < n; i++) {
-                    glag.setEntry(i, -glag.getEntry(i));
-                    work2.setEntry(i, alternativeNewPoint.getEntry(i));
-                }
-                csave = cauchy;
-                iflag = 1;
-            } else {
-                break;
-            }
-        }
-        if (csave > cauchy) {
-            for (int i = 0; i < n; i++) {
-                alternativeNewPoint.setEntry(i, work2.getEntry(i));
-            }
-            cauchy = csave;
-        }
-
-        return new double[] { alpha, cauchy };
-    } // altmov
-
-    // ----------------------------------------------------------------------------------------
-
-    /**
-     *     SUBROUTINE PRELIM sets the elements of XBASE, XPT, FVAL, GOPT, HQ, PQ,
-     *     BMAT and ZMAT for the first iteration, and it maintains the values of
-     *     NF and KOPT. The vector X is also changed by PRELIM.
-     *
-     *     The arguments N, NPT, X, XL, XU, RHOBEG, IPRINT and MAXFUN are the
-     *       same as the corresponding arguments in SUBROUTINE BOBYQA.
-     *     The arguments XBASE, XPT, FVAL, HQ, PQ, BMAT, ZMAT, NDIM, SL and SU
-     *       are the same as the corresponding arguments in BOBYQB, the elements
-     *       of SL and SU being set in BOBYQA.
-     *     GOPT is usually the gradient of the quadratic model at XOPT+XBASE, but
-     *       it is set by PRELIM to the gradient of the quadratic model at XBASE.
-     *       If XOPT is nonzero, BOBYQB will change it to its usual value later.
-     *     NF is maintaned as the number of calls of CALFUN so far.
-     *     KOPT will be such that the least calculated value of F so far is at
-     *       the point XPT(KOPT,.)+XBASE in the space of the variables.
-     *
-     * @param lowerBound Lower bounds.
-     * @param upperBound Upper bounds.
-     */
-    private void prelim(double[] lowerBound,
-                        double[] upperBound) {
-        printMethod(); // XXX
-
-        final int n = currentBest.getDimension();
-        final int npt = numberOfInterpolationPoints;
-        final int ndim = bMatrix.getRowDimension();
-
-        final double rhosq = initialTrustRegionRadius * initialTrustRegionRadius;
-        final double recip = 1d / rhosq;
-        final int np = n + 1;
-
-        // Set XBASE to the initial vector of variables, and set the initial
-        // elements of XPT, BMAT, HQ, PQ and ZMAT to zero.
-
-        for (int j = 0; j < n; j++) {
-            originShift.setEntry(j, currentBest.getEntry(j));
-            for (int k = 0; k < npt; k++) {
-                interpolationPoints.setEntry(k, j, ZERO);
-            }
-            for (int i = 0; i < ndim; i++) {
-                bMatrix.setEntry(i, j, ZERO);
-            }
-        }
-        for (int i = 0, max = n * np / 2; i < max; i++) {
-            modelSecondDerivativesValues.setEntry(i, ZERO);
-        }
-        for (int k = 0; k < npt; k++) {
-            modelSecondDerivativesParameters.setEntry(k, ZERO);
-            for (int j = 0, max = npt - np; j < max; j++) {
-                zMatrix.setEntry(k, j, ZERO);
-            }
-        }
-
-        // Begin the initialization procedure. NF becomes one more than the number
-        // of function values so far. The coordinates of the displacement of the
-        // next initial interpolation point from XBASE are set in XPT(NF+1,.).
-
-        int ipt = 0;
-        int jpt = 0;
-        double fbeg = Double.NaN;
-        do {
-            final int nfm = getEvaluations();
-            final int nfx = nfm - n;
-            final int nfmm = nfm - 1;
-            final int nfxm = nfx - 1;
-            double stepa = 0;
-            double stepb = 0;
-            if (nfm <= 2 * n) {
-                if (nfm >= 1 &&
-                    nfm <= n) {
-                    stepa = initialTrustRegionRadius;
-                    if (upperDifference.getEntry(nfmm) == ZERO) {
-                        stepa = -stepa;
-                        // throw new PathIsExploredException(); // XXX
-                    }
-                    interpolationPoints.setEntry(nfm, nfmm, stepa);
-                } else if (nfm > n) {
-                    stepa = interpolationPoints.getEntry(nfx, nfxm);
-                    stepb = -initialTrustRegionRadius;
-                    if (lowerDifference.getEntry(nfxm) == ZERO) {
-                        stepb = FastMath.min(TWO * initialTrustRegionRadius, upperDifference.getEntry(nfxm));
-                        // throw new PathIsExploredException(); // XXX
-                    }
-                    if (upperDifference.getEntry(nfxm) == ZERO) {
-                        stepb = FastMath.max(-TWO * initialTrustRegionRadius, lowerDifference.getEntry(nfxm));
-                        // throw new PathIsExploredException(); // XXX
-                    }
-                    interpolationPoints.setEntry(nfm, nfxm, stepb);
-                }
-            } else {
-                final int tmp1 = (nfm - np) / n;
-                jpt = nfm - tmp1 * n - n;
-                ipt = jpt + tmp1;
-                if (ipt > n) {
-                    final int tmp2 = jpt;
-                    jpt = ipt - n;
-                    ipt = tmp2;
-//                     throw new PathIsExploredException(); // XXX
-                }
-                final int iptMinus1 = ipt - 1;
-                final int jptMinus1 = jpt - 1;
-                interpolationPoints.setEntry(nfm, iptMinus1, interpolationPoints.getEntry(ipt, iptMinus1));
-                interpolationPoints.setEntry(nfm, jptMinus1, interpolationPoints.getEntry(jpt, jptMinus1));
-            }
-
-            // Calculate the next value of F. The least function value so far and
-            // its index are required.
-
-            for (int j = 0; j < n; j++) {
-                currentBest.setEntry(j, FastMath.min(FastMath.max(lowerBound[j],
-                                                                  originShift.getEntry(j) + interpolationPoints.getEntry(nfm, j)),
-                                                     upperBound[j]));
-                if (interpolationPoints.getEntry(nfm, j) == lowerDifference.getEntry(j)) {
-                    currentBest.setEntry(j, lowerBound[j]);
-                }
-                if (interpolationPoints.getEntry(nfm, j) == upperDifference.getEntry(j)) {
-                    currentBest.setEntry(j, upperBound[j]);
-                }
-            }
-
-            final double objectiveValue = computeObjectiveValue(currentBest.toArray());
-            final double f = isMinimize ? objectiveValue : -objectiveValue;
-            final int numEval = getEvaluations(); // nfm + 1
-            fAtInterpolationPoints.setEntry(nfm, f);
-
-            if (numEval == 1) {
-                fbeg = f;
-                trustRegionCenterInterpolationPointIndex = 0;
-            } else if (f < fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex)) {
-                trustRegionCenterInterpolationPointIndex = nfm;
-            }
-
-            // Set the nonzero initial elements of BMAT and the quadratic model in the
-            // cases when NF is at most 2*N+1. If NF exceeds N+1, then the positions
-            // of the NF-th and (NF-N)-th interpolation points may be switched, in
-            // order that the function value at the first of them contributes to the
-            // off-diagonal second derivative terms of the initial quadratic model.
-
-            if (numEval <= 2 * n + 1) {
-                if (numEval >= 2 &&
-                    numEval <= n + 1) {
-                    gradientAtTrustRegionCenter.setEntry(nfmm, (f - fbeg) / stepa);
-                    if (npt < numEval + n) {
-                        final double oneOverStepA = ONE / stepa;
-                        bMatrix.setEntry(0, nfmm, -oneOverStepA);
-                        bMatrix.setEntry(nfm, nfmm, oneOverStepA);
-                        bMatrix.setEntry(npt + nfmm, nfmm, -HALF * rhosq);
-                        // throw new PathIsExploredException(); // XXX
-                    }
-                } else if (numEval >= n + 2) {
-                    final int ih = nfx * (nfx + 1) / 2 - 1;
-                    final double tmp = (f - fbeg) / stepb;
-                    final double diff = stepb - stepa;
-                    modelSecondDerivativesValues.setEntry(ih, TWO * (tmp - gradientAtTrustRegionCenter.getEntry(nfxm)) / diff);
-                    gradientAtTrustRegionCenter.setEntry(nfxm, (gradientAtTrustRegionCenter.getEntry(nfxm) * stepb - tmp * stepa) / diff);
-                    if (stepa * stepb < ZERO && f < fAtInterpolationPoints.getEntry(nfm - n)) {
-                        fAtInterpolationPoints.setEntry(nfm, fAtInterpolationPoints.getEntry(nfm - n));
-                        fAtInterpolationPoints.setEntry(nfm - n, f);
-                        if (trustRegionCenterInterpolationPointIndex == nfm) {
-                            trustRegionCenterInterpolationPointIndex = nfm - n;
-                        }
-                        interpolationPoints.setEntry(nfm - n, nfxm, stepb);
-                        interpolationPoints.setEntry(nfm, nfxm, stepa);
-                    }
-                    bMatrix.setEntry(0, nfxm, -(stepa + stepb) / (stepa * stepb));
-                    bMatrix.setEntry(nfm, nfxm, -HALF / interpolationPoints.getEntry(nfm - n, nfxm));
-                    bMatrix.setEntry(nfm - n, nfxm,
-                                  -bMatrix.getEntry(0, nfxm) - bMatrix.getEntry(nfm, nfxm));
-                    zMatrix.setEntry(0, nfxm, FastMath.sqrt(TWO) / (stepa * stepb));
-                    zMatrix.setEntry(nfm, nfxm, FastMath.sqrt(HALF) / rhosq);
-                    // zMatrix.setEntry(nfm, nfxm, FastMath.sqrt(HALF) * recip); // XXX "testAckley" and "testDiffPow" fail.
-                    zMatrix.setEntry(nfm - n, nfxm,
-                                  -zMatrix.getEntry(0, nfxm) - zMatrix.getEntry(nfm, nfxm));
-                }
-
-                // Set the off-diagonal second derivatives of the Lagrange functions and
-                // the initial quadratic model.
-
-            } else {
-                zMatrix.setEntry(0, nfxm, recip);
-                zMatrix.setEntry(nfm, nfxm, recip);
-                zMatrix.setEntry(ipt, nfxm, -recip);
-                zMatrix.setEntry(jpt, nfxm, -recip);
-
-                final int ih = ipt * (ipt - 1) / 2 + jpt - 1;
-                final double tmp = interpolationPoints.getEntry(nfm, ipt - 1) * interpolationPoints.getEntry(nfm, jpt - 1);
-                modelSecondDerivativesValues.setEntry(ih, (fbeg - fAtInterpolationPoints.getEntry(ipt) - fAtInterpolationPoints.getEntry(jpt) + f) / tmp);
-//                 throw new PathIsExploredException(); // XXX
-            }
-        } while (getEvaluations() < npt);
-    } // prelim
-
-
-    // ----------------------------------------------------------------------------------------
-
-    /**
-     *     A version of the truncated conjugate gradient is applied. If a line
-     *     search is restricted by a constraint, then the procedure is restarted,
-     *     the values of the variables that are at their bounds being fixed. If
-     *     the trust region boundary is reached, then further changes may be made
-     *     to D, each one being in the two dimensional space that is spanned
-     *     by the current D and the gradient of Q at XOPT+D, staying on the trust
-     *     region boundary. Termination occurs when the reduction in Q seems to
-     *     be close to the greatest reduction that can be achieved.
-     *     The arguments N, NPT, XPT, XOPT, GOPT, HQ, PQ, SL and SU have the same
-     *       meanings as the corresponding arguments of BOBYQB.
-     *     DELTA is the trust region radius for the present calculation, which
-     *       seeks a small value of the quadratic model within distance DELTA of
-     *       XOPT subject to the bounds on the variables.
-     *     XNEW will be set to a new vector of variables that is approximately
-     *       the one that minimizes the quadratic model within the trust region
-     *       subject to the SL and SU constraints on the variables. It satisfies
-     *       as equations the bounds that become active during the calculation.
-     *     D is the calculated trial step from XOPT, generated iteratively from an
-     *       initial value of zero. Thus XNEW is XOPT+D after the final iteration.
-     *     GNEW holds the gradient of the quadratic model at XOPT+D. It is updated
-     *       when D is updated.
-     *     xbdi.get( is a working space vector. For I=1,2,...,N, the element xbdi.get((I) is
-     *       set to -1.0, 0.0, or 1.0, the value being nonzero if and only if the
-     *       I-th variable has become fixed at a bound, the bound being SL(I) or
-     *       SU(I) in the case xbdi.get((I)=-1.0 or xbdi.get((I)=1.0, respectively. This
-     *       information is accumulated during the construction of XNEW.
-     *     The arrays S, HS and HRED are also used for working space. They hold the
-     *       current search direction, and the changes in the gradient of Q along S
-     *       and the reduced D, respectively, where the reduced D is the same as D,
-     *       except that the components of the fixed variables are zero.
-     *     DSQ will be set to the square of the length of XNEW-XOPT.
-     *     CRVMIN is set to zero if D reaches the trust region boundary. Otherwise
-     *       it is set to the least curvature of H that occurs in the conjugate
-     *       gradient searches that are not restricted by any constraints. The
-     *       value CRVMIN=-1.0D0 is set, however, if all of these searches are
-     *       constrained.
-     * @param delta
-     * @param gnew
-     * @param xbdi
-     * @param s
-     * @param hs
-     * @param hred
-     */
-    private double[] trsbox(
-            double delta,
-            ArrayRealVector gnew,
-            ArrayRealVector xbdi,
-            ArrayRealVector s,
-            ArrayRealVector hs,
-            ArrayRealVector hred
-    ) {
-        printMethod(); // XXX
-
-        final int n = currentBest.getDimension();
-        final int npt = numberOfInterpolationPoints;
-
-        double dsq = Double.NaN;
-        double crvmin = Double.NaN;
-
-        // Local variables
-        double ds;
-        int iu;
-        double dhd, dhs, cth, shs, sth, ssq, beta=0, sdec, blen;
-        int iact = -1;
-        int nact = 0;
-        double angt = 0, qred;
-        int 

<TRUNCATED>

[10/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/general/LevenbergMarquardtOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/general/LevenbergMarquardtOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/general/LevenbergMarquardtOptimizer.java
deleted file mode 100644
index 407f721..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/general/LevenbergMarquardtOptimizer.java
+++ /dev/null
@@ -1,943 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.general;
-
-import java.util.Arrays;
-
-import org.apache.commons.math4.exception.ConvergenceException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.Precision;
-
-
-/**
- * This class solves a least squares problem using the Levenberg-Marquardt algorithm.
- *
- * <p>This implementation <em>should</em> work even for over-determined systems
- * (i.e. systems having more point than equations). Over-determined systems
- * are solved by ignoring the point which have the smallest impact according
- * to their jacobian column norm. Only the rank of the matrix and some loop bounds
- * are changed to implement this.</p>
- *
- * <p>The resolution engine is a simple translation of the MINPACK <a
- * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
- * changes. The changes include the over-determined resolution, the use of
- * inherited convergence checker and the Q.R. decomposition which has been
- * rewritten following the algorithm described in the
- * P. Lascaux and R. Theodor book <i>Analyse num&eacute;rique matricielle
- * appliqu&eacute;e &agrave; l'art de l'ing&eacute;nieur</i>, Masson 1986.</p>
- * <p>The authors of the original fortran version are:
- * <ul>
- * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
- * <li>Burton S. Garbow</li>
- * <li>Kenneth E. Hillstrom</li>
- * <li>Jorge J. More</li>
- * </ul>
- * The redistribution policy for MINPACK is available <a
- * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
- * is reproduced below.</p>
- *
- * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
- * <tr><td>
- *    Minpack Copyright Notice (1999) University of Chicago.
- *    All rights reserved
- * </td></tr>
- * <tr><td>
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * <ol>
- *  <li>Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.</li>
- * <li>Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials provided
- *     with the distribution.</li>
- * <li>The end-user documentation included with the redistribution, if any,
- *     must include the following acknowledgment:
- *     <code>This product includes software developed by the University of
- *           Chicago, as Operator of Argonne National Laboratory.</code>
- *     Alternately, this acknowledgment may appear in the software itself,
- *     if and wherever such third-party acknowledgments normally appear.</li>
- * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
- *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
- *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
- *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
- *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
- *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
- *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
- *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
- *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
- *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
- *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
- *     BE CORRECTED.</strong></li>
- * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
- *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
- *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
- *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
- *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
- *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
- *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
- *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
- *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
- *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
- * <ol></td></tr>
- * </table>
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- *
- */
-@Deprecated
-public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer {
-    /** Number of solved point. */
-    private int solvedCols;
-    /** Diagonal elements of the R matrix in the Q.R. decomposition. */
-    private double[] diagR;
-    /** Norms of the columns of the jacobian matrix. */
-    private double[] jacNorm;
-    /** Coefficients of the Householder transforms vectors. */
-    private double[] beta;
-    /** Columns permutation array. */
-    private int[] permutation;
-    /** Rank of the jacobian matrix. */
-    private int rank;
-    /** Levenberg-Marquardt parameter. */
-    private double lmPar;
-    /** Parameters evolution direction associated with lmPar. */
-    private double[] lmDir;
-    /** Positive input variable used in determining the initial step bound. */
-    private final double initialStepBoundFactor;
-    /** Desired relative error in the sum of squares. */
-    private final double costRelativeTolerance;
-    /**  Desired relative error in the approximate solution parameters. */
-    private final double parRelativeTolerance;
-    /** Desired max cosine on the orthogonality between the function vector
-     * and the columns of the jacobian. */
-    private final double orthoTolerance;
-    /** Threshold for QR ranking. */
-    private final double qrRankingThreshold;
-    /** Weighted residuals. */
-    private double[] weightedResidual;
-    /** Weighted Jacobian. */
-    private double[][] weightedJacobian;
-
-    /**
-     * Build an optimizer for least squares problems with default values
-     * for all the tuning parameters (see the {@link
-     * #LevenbergMarquardtOptimizer(double,double,double,double,double)
-     * other contructor}.
-     * The default values for the algorithm settings are:
-     * <ul>
-     *  <li>Initial step bound factor: 100</li>
-     *  <li>Cost relative tolerance: 1e-10</li>
-     *  <li>Parameters relative tolerance: 1e-10</li>
-     *  <li>Orthogonality tolerance: 1e-10</li>
-     *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
-     * </ul>
-     */
-    public LevenbergMarquardtOptimizer() {
-        this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
-    }
-
-    /**
-     * Constructor that allows the specification of a custom convergence
-     * checker.
-     * Note that all the usual convergence checks will be <em>disabled</em>.
-     * The default values for the algorithm settings are:
-     * <ul>
-     *  <li>Initial step bound factor: 100</li>
-     *  <li>Cost relative tolerance: 1e-10</li>
-     *  <li>Parameters relative tolerance: 1e-10</li>
-     *  <li>Orthogonality tolerance: 1e-10</li>
-     *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
-     * </ul>
-     *
-     * @param checker Convergence checker.
-     */
-    public LevenbergMarquardtOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
-        this(100, checker, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
-    }
-
-    /**
-     * Constructor that allows the specification of a custom convergence
-     * checker, in addition to the standard ones.
-     *
-     * @param initialStepBoundFactor Positive input variable used in
-     * determining the initial step bound. This bound is set to the
-     * product of initialStepBoundFactor and the euclidean norm of
-     * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
-     * itself. In most cases factor should lie in the interval
-     * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
-     * @param checker Convergence checker.
-     * @param costRelativeTolerance Desired relative error in the sum of
-     * squares.
-     * @param parRelativeTolerance Desired relative error in the approximate
-     * solution parameters.
-     * @param orthoTolerance Desired max cosine on the orthogonality between
-     * the function vector and the columns of the Jacobian.
-     * @param threshold Desired threshold for QR ranking. If the squared norm
-     * of a column vector is smaller or equal to this threshold during QR
-     * decomposition, it is considered to be a zero vector and hence the rank
-     * of the matrix is reduced.
-     */
-    public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
-                                       ConvergenceChecker<PointVectorValuePair> checker,
-                                       double costRelativeTolerance,
-                                       double parRelativeTolerance,
-                                       double orthoTolerance,
-                                       double threshold) {
-        super(checker);
-        this.initialStepBoundFactor = initialStepBoundFactor;
-        this.costRelativeTolerance = costRelativeTolerance;
-        this.parRelativeTolerance = parRelativeTolerance;
-        this.orthoTolerance = orthoTolerance;
-        this.qrRankingThreshold = threshold;
-    }
-
-    /**
-     * Build an optimizer for least squares problems with default values
-     * for some of the tuning parameters (see the {@link
-     * #LevenbergMarquardtOptimizer(double,double,double,double,double)
-     * other contructor}.
-     * The default values for the algorithm settings are:
-     * <ul>
-     *  <li>Initial step bound factor}: 100</li>
-     *  <li>QR ranking threshold}: {@link Precision#SAFE_MIN}</li>
-     * </ul>
-     *
-     * @param costRelativeTolerance Desired relative error in the sum of
-     * squares.
-     * @param parRelativeTolerance Desired relative error in the approximate
-     * solution parameters.
-     * @param orthoTolerance Desired max cosine on the orthogonality between
-     * the function vector and the columns of the Jacobian.
-     */
-    public LevenbergMarquardtOptimizer(double costRelativeTolerance,
-                                       double parRelativeTolerance,
-                                       double orthoTolerance) {
-        this(100,
-             costRelativeTolerance, parRelativeTolerance, orthoTolerance,
-             Precision.SAFE_MIN);
-    }
-
-    /**
-     * The arguments control the behaviour of the default convergence checking
-     * procedure.
-     * Additional criteria can defined through the setting of a {@link
-     * ConvergenceChecker}.
-     *
-     * @param initialStepBoundFactor Positive input variable used in
-     * determining the initial step bound. This bound is set to the
-     * product of initialStepBoundFactor and the euclidean norm of
-     * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
-     * itself. In most cases factor should lie in the interval
-     * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
-     * @param costRelativeTolerance Desired relative error in the sum of
-     * squares.
-     * @param parRelativeTolerance Desired relative error in the approximate
-     * solution parameters.
-     * @param orthoTolerance Desired max cosine on the orthogonality between
-     * the function vector and the columns of the Jacobian.
-     * @param threshold Desired threshold for QR ranking. If the squared norm
-     * of a column vector is smaller or equal to this threshold during QR
-     * decomposition, it is considered to be a zero vector and hence the rank
-     * of the matrix is reduced.
-     */
-    public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
-                                       double costRelativeTolerance,
-                                       double parRelativeTolerance,
-                                       double orthoTolerance,
-                                       double threshold) {
-        super(null); // No custom convergence criterion.
-        this.initialStepBoundFactor = initialStepBoundFactor;
-        this.costRelativeTolerance = costRelativeTolerance;
-        this.parRelativeTolerance = parRelativeTolerance;
-        this.orthoTolerance = orthoTolerance;
-        this.qrRankingThreshold = threshold;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected PointVectorValuePair doOptimize() {
-        final int nR = getTarget().length; // Number of observed data.
-        final double[] currentPoint = getStartPoint();
-        final int nC = currentPoint.length; // Number of parameters.
-
-        // arrays shared with the other private methods
-        solvedCols  = FastMath.min(nR, nC);
-        diagR       = new double[nC];
-        jacNorm     = new double[nC];
-        beta        = new double[nC];
-        permutation = new int[nC];
-        lmDir       = new double[nC];
-
-        // local point
-        double   delta   = 0;
-        double   xNorm   = 0;
-        double[] diag    = new double[nC];
-        double[] oldX    = new double[nC];
-        double[] oldRes  = new double[nR];
-        double[] oldObj  = new double[nR];
-        double[] qtf     = new double[nR];
-        double[] work1   = new double[nC];
-        double[] work2   = new double[nC];
-        double[] work3   = new double[nC];
-
-        final RealMatrix weightMatrixSqrt = getWeightSquareRoot();
-
-        // Evaluate the function at the starting point and calculate its norm.
-        double[] currentObjective = computeObjectiveValue(currentPoint);
-        double[] currentResiduals = computeResiduals(currentObjective);
-        PointVectorValuePair current = new PointVectorValuePair(currentPoint, currentObjective);
-        double currentCost = computeCost(currentResiduals);
-
-        // Outer loop.
-        lmPar = 0;
-        boolean firstIteration = true;
-        int iter = 0;
-        final ConvergenceChecker<PointVectorValuePair> checker = getConvergenceChecker();
-        while (true) {
-            ++iter;
-            final PointVectorValuePair previous = current;
-
-            // QR decomposition of the jacobian matrix
-            qrDecomposition(computeWeightedJacobian(currentPoint));
-
-            weightedResidual = weightMatrixSqrt.operate(currentResiduals);
-            for (int i = 0; i < nR; i++) {
-                qtf[i] = weightedResidual[i];
-            }
-
-            // compute Qt.res
-            qTy(qtf);
-
-            // now we don't need Q anymore,
-            // so let jacobian contain the R matrix with its diagonal elements
-            for (int k = 0; k < solvedCols; ++k) {
-                int pk = permutation[k];
-                weightedJacobian[k][pk] = diagR[pk];
-            }
-
-            if (firstIteration) {
-                // scale the point according to the norms of the columns
-                // of the initial jacobian
-                xNorm = 0;
-                for (int k = 0; k < nC; ++k) {
-                    double dk = jacNorm[k];
-                    if (dk == 0) {
-                        dk = 1.0;
-                    }
-                    double xk = dk * currentPoint[k];
-                    xNorm  += xk * xk;
-                    diag[k] = dk;
-                }
-                xNorm = FastMath.sqrt(xNorm);
-
-                // initialize the step bound delta
-                delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
-            }
-
-            // check orthogonality between function vector and jacobian columns
-            double maxCosine = 0;
-            if (currentCost != 0) {
-                for (int j = 0; j < solvedCols; ++j) {
-                    int    pj = permutation[j];
-                    double s  = jacNorm[pj];
-                    if (s != 0) {
-                        double sum = 0;
-                        for (int i = 0; i <= j; ++i) {
-                            sum += weightedJacobian[i][pj] * qtf[i];
-                        }
-                        maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
-                    }
-                }
-            }
-            if (maxCosine <= orthoTolerance) {
-                // Convergence has been reached.
-                setCost(currentCost);
-                // Update (deprecated) "point" field.
-                point = current.getPoint();
-                return current;
-            }
-
-            // rescale if necessary
-            for (int j = 0; j < nC; ++j) {
-                diag[j] = FastMath.max(diag[j], jacNorm[j]);
-            }
-
-            // Inner loop.
-            for (double ratio = 0; ratio < 1.0e-4;) {
-
-                // save the state
-                for (int j = 0; j < solvedCols; ++j) {
-                    int pj = permutation[j];
-                    oldX[pj] = currentPoint[pj];
-                }
-                final double previousCost = currentCost;
-                double[] tmpVec = weightedResidual;
-                weightedResidual = oldRes;
-                oldRes    = tmpVec;
-                tmpVec    = currentObjective;
-                currentObjective = oldObj;
-                oldObj    = tmpVec;
-
-                // determine the Levenberg-Marquardt parameter
-                determineLMParameter(qtf, delta, diag, work1, work2, work3);
-
-                // compute the new point and the norm of the evolution direction
-                double lmNorm = 0;
-                for (int j = 0; j < solvedCols; ++j) {
-                    int pj = permutation[j];
-                    lmDir[pj] = -lmDir[pj];
-                    currentPoint[pj] = oldX[pj] + lmDir[pj];
-                    double s = diag[pj] * lmDir[pj];
-                    lmNorm  += s * s;
-                }
-                lmNorm = FastMath.sqrt(lmNorm);
-                // on the first iteration, adjust the initial step bound.
-                if (firstIteration) {
-                    delta = FastMath.min(delta, lmNorm);
-                }
-
-                // Evaluate the function at x + p and calculate its norm.
-                currentObjective = computeObjectiveValue(currentPoint);
-                currentResiduals = computeResiduals(currentObjective);
-                current = new PointVectorValuePair(currentPoint, currentObjective);
-                currentCost = computeCost(currentResiduals);
-
-                // compute the scaled actual reduction
-                double actRed = -1.0;
-                if (0.1 * currentCost < previousCost) {
-                    double r = currentCost / previousCost;
-                    actRed = 1.0 - r * r;
-                }
-
-                // compute the scaled predicted reduction
-                // and the scaled directional derivative
-                for (int j = 0; j < solvedCols; ++j) {
-                    int pj = permutation[j];
-                    double dirJ = lmDir[pj];
-                    work1[j] = 0;
-                    for (int i = 0; i <= j; ++i) {
-                        work1[i] += weightedJacobian[i][pj] * dirJ;
-                    }
-                }
-                double coeff1 = 0;
-                for (int j = 0; j < solvedCols; ++j) {
-                    coeff1 += work1[j] * work1[j];
-                }
-                double pc2 = previousCost * previousCost;
-                coeff1 /= pc2;
-                double coeff2 = lmPar * lmNorm * lmNorm / pc2;
-                double preRed = coeff1 + 2 * coeff2;
-                double dirDer = -(coeff1 + coeff2);
-
-                // ratio of the actual to the predicted reduction
-                ratio = (preRed == 0) ? 0 : (actRed / preRed);
-
-                // update the step bound
-                if (ratio <= 0.25) {
-                    double tmp =
-                        (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
-                        if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
-                            tmp = 0.1;
-                        }
-                        delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
-                        lmPar /= tmp;
-                } else if ((lmPar == 0) || (ratio >= 0.75)) {
-                    delta = 2 * lmNorm;
-                    lmPar *= 0.5;
-                }
-
-                // test for successful iteration.
-                if (ratio >= 1.0e-4) {
-                    // successful iteration, update the norm
-                    firstIteration = false;
-                    xNorm = 0;
-                    for (int k = 0; k < nC; ++k) {
-                        double xK = diag[k] * currentPoint[k];
-                        xNorm += xK * xK;
-                    }
-                    xNorm = FastMath.sqrt(xNorm);
-
-                    // tests for convergence.
-                    if (checker != null && checker.converged(iter, previous, current)) {
-                        setCost(currentCost);
-                        // Update (deprecated) "point" field.
-                        point = current.getPoint();
-                        return current;
-                    }
-                } else {
-                    // failed iteration, reset the previous values
-                    currentCost = previousCost;
-                    for (int j = 0; j < solvedCols; ++j) {
-                        int pj = permutation[j];
-                        currentPoint[pj] = oldX[pj];
-                    }
-                    tmpVec    = weightedResidual;
-                    weightedResidual = oldRes;
-                    oldRes    = tmpVec;
-                    tmpVec    = currentObjective;
-                    currentObjective = oldObj;
-                    oldObj    = tmpVec;
-                    // Reset "current" to previous values.
-                    current = new PointVectorValuePair(currentPoint, currentObjective);
-                }
-
-                // Default convergence criteria.
-                if ((FastMath.abs(actRed) <= costRelativeTolerance &&
-                     preRed <= costRelativeTolerance &&
-                     ratio <= 2.0) ||
-                    delta <= parRelativeTolerance * xNorm) {
-                    setCost(currentCost);
-                    // Update (deprecated) "point" field.
-                    point = current.getPoint();
-                    return current;
-                }
-
-                // tests for termination and stringent tolerances
-                // (2.2204e-16 is the machine epsilon for IEEE754)
-                if ((FastMath.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
-                    throw new ConvergenceException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
-                                                   costRelativeTolerance);
-                } else if (delta <= 2.2204e-16 * xNorm) {
-                    throw new ConvergenceException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
-                                                   parRelativeTolerance);
-                } else if (maxCosine <= 2.2204e-16)  {
-                    throw new ConvergenceException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
-                                                   orthoTolerance);
-                }
-            }
-        }
-    }
-
-    /**
-     * Determine the Levenberg-Marquardt parameter.
-     * <p>This implementation is a translation in Java of the MINPACK
-     * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
-     * routine.</p>
-     * <p>This method sets the lmPar and lmDir attributes.</p>
-     * <p>The authors of the original fortran function are:</p>
-     * <ul>
-     *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
-     *   <li>Burton  S. Garbow</li>
-     *   <li>Kenneth E. Hillstrom</li>
-     *   <li>Jorge   J. More</li>
-     * </ul>
-     * <p>Luc Maisonobe did the Java translation.</p>
-     *
-     * @param qy array containing qTy
-     * @param delta upper bound on the euclidean norm of diagR * lmDir
-     * @param diag diagonal matrix
-     * @param work1 work array
-     * @param work2 work array
-     * @param work3 work array
-     */
-    private void determineLMParameter(double[] qy, double delta, double[] diag,
-                                      double[] work1, double[] work2, double[] work3) {
-        final int nC = weightedJacobian[0].length;
-
-        // compute and store in x the gauss-newton direction, if the
-        // jacobian is rank-deficient, obtain a least squares solution
-        for (int j = 0; j < rank; ++j) {
-            lmDir[permutation[j]] = qy[j];
-        }
-        for (int j = rank; j < nC; ++j) {
-            lmDir[permutation[j]] = 0;
-        }
-        for (int k = rank - 1; k >= 0; --k) {
-            int pk = permutation[k];
-            double ypk = lmDir[pk] / diagR[pk];
-            for (int i = 0; i < k; ++i) {
-                lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
-            }
-            lmDir[pk] = ypk;
-        }
-
-        // evaluate the function at the origin, and test
-        // for acceptance of the Gauss-Newton direction
-        double dxNorm = 0;
-        for (int j = 0; j < solvedCols; ++j) {
-            int pj = permutation[j];
-            double s = diag[pj] * lmDir[pj];
-            work1[pj] = s;
-            dxNorm += s * s;
-        }
-        dxNorm = FastMath.sqrt(dxNorm);
-        double fp = dxNorm - delta;
-        if (fp <= 0.1 * delta) {
-            lmPar = 0;
-            return;
-        }
-
-        // if the jacobian is not rank deficient, the Newton step provides
-        // a lower bound, parl, for the zero of the function,
-        // otherwise set this bound to zero
-        double sum2;
-        double parl = 0;
-        if (rank == solvedCols) {
-            for (int j = 0; j < solvedCols; ++j) {
-                int pj = permutation[j];
-                work1[pj] *= diag[pj] / dxNorm;
-            }
-            sum2 = 0;
-            for (int j = 0; j < solvedCols; ++j) {
-                int pj = permutation[j];
-                double sum = 0;
-                for (int i = 0; i < j; ++i) {
-                    sum += weightedJacobian[i][pj] * work1[permutation[i]];
-                }
-                double s = (work1[pj] - sum) / diagR[pj];
-                work1[pj] = s;
-                sum2 += s * s;
-            }
-            parl = fp / (delta * sum2);
-        }
-
-        // calculate an upper bound, paru, for the zero of the function
-        sum2 = 0;
-        for (int j = 0; j < solvedCols; ++j) {
-            int pj = permutation[j];
-            double sum = 0;
-            for (int i = 0; i <= j; ++i) {
-                sum += weightedJacobian[i][pj] * qy[i];
-            }
-            sum /= diag[pj];
-            sum2 += sum * sum;
-        }
-        double gNorm = FastMath.sqrt(sum2);
-        double paru = gNorm / delta;
-        if (paru == 0) {
-            // 2.2251e-308 is the smallest positive real for IEE754
-            paru = 2.2251e-308 / FastMath.min(delta, 0.1);
-        }
-
-        // if the input par lies outside of the interval (parl,paru),
-        // set par to the closer endpoint
-        lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
-        if (lmPar == 0) {
-            lmPar = gNorm / dxNorm;
-        }
-
-        for (int countdown = 10; countdown >= 0; --countdown) {
-
-            // evaluate the function at the current value of lmPar
-            if (lmPar == 0) {
-                lmPar = FastMath.max(2.2251e-308, 0.001 * paru);
-            }
-            double sPar = FastMath.sqrt(lmPar);
-            for (int j = 0; j < solvedCols; ++j) {
-                int pj = permutation[j];
-                work1[pj] = sPar * diag[pj];
-            }
-            determineLMDirection(qy, work1, work2, work3);
-
-            dxNorm = 0;
-            for (int j = 0; j < solvedCols; ++j) {
-                int pj = permutation[j];
-                double s = diag[pj] * lmDir[pj];
-                work3[pj] = s;
-                dxNorm += s * s;
-            }
-            dxNorm = FastMath.sqrt(dxNorm);
-            double previousFP = fp;
-            fp = dxNorm - delta;
-
-            // if the function is small enough, accept the current value
-            // of lmPar, also test for the exceptional cases where parl is zero
-            if ((FastMath.abs(fp) <= 0.1 * delta) ||
-                    ((parl == 0) && (fp <= previousFP) && (previousFP < 0))) {
-                return;
-            }
-
-            // compute the Newton correction
-            for (int j = 0; j < solvedCols; ++j) {
-                int pj = permutation[j];
-                work1[pj] = work3[pj] * diag[pj] / dxNorm;
-            }
-            for (int j = 0; j < solvedCols; ++j) {
-                int pj = permutation[j];
-                work1[pj] /= work2[j];
-                double tmp = work1[pj];
-                for (int i = j + 1; i < solvedCols; ++i) {
-                    work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
-                }
-            }
-            sum2 = 0;
-            for (int j = 0; j < solvedCols; ++j) {
-                double s = work1[permutation[j]];
-                sum2 += s * s;
-            }
-            double correction = fp / (delta * sum2);
-
-            // depending on the sign of the function, update parl or paru.
-            if (fp > 0) {
-                parl = FastMath.max(parl, lmPar);
-            } else if (fp < 0) {
-                paru = FastMath.min(paru, lmPar);
-            }
-
-            // compute an improved estimate for lmPar
-            lmPar = FastMath.max(parl, lmPar + correction);
-
-        }
-    }
-
-    /**
-     * Solve a*x = b and d*x = 0 in the least squares sense.
-     * <p>This implementation is a translation in Java of the MINPACK
-     * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
-     * routine.</p>
-     * <p>This method sets the lmDir and lmDiag attributes.</p>
-     * <p>The authors of the original fortran function are:</p>
-     * <ul>
-     *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
-     *   <li>Burton  S. Garbow</li>
-     *   <li>Kenneth E. Hillstrom</li>
-     *   <li>Jorge   J. More</li>
-     * </ul>
-     * <p>Luc Maisonobe did the Java translation.</p>
-     *
-     * @param qy array containing qTy
-     * @param diag diagonal matrix
-     * @param lmDiag diagonal elements associated with lmDir
-     * @param work work array
-     */
-    private void determineLMDirection(double[] qy, double[] diag,
-                                      double[] lmDiag, double[] work) {
-
-        // copy R and Qty to preserve input and initialize s
-        //  in particular, save the diagonal elements of R in lmDir
-        for (int j = 0; j < solvedCols; ++j) {
-            int pj = permutation[j];
-            for (int i = j + 1; i < solvedCols; ++i) {
-                weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
-            }
-            lmDir[j] = diagR[pj];
-            work[j]  = qy[j];
-        }
-
-        // eliminate the diagonal matrix d using a Givens rotation
-        for (int j = 0; j < solvedCols; ++j) {
-
-            // prepare the row of d to be eliminated, locating the
-            // diagonal element using p from the Q.R. factorization
-            int pj = permutation[j];
-            double dpj = diag[pj];
-            if (dpj != 0) {
-                Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
-            }
-            lmDiag[j] = dpj;
-
-            //  the transformations to eliminate the row of d
-            // modify only a single element of Qty
-            // beyond the first n, which is initially zero.
-            double qtbpj = 0;
-            for (int k = j; k < solvedCols; ++k) {
-                int pk = permutation[k];
-
-                // determine a Givens rotation which eliminates the
-                // appropriate element in the current row of d
-                if (lmDiag[k] != 0) {
-
-                    final double sin;
-                    final double cos;
-                    double rkk = weightedJacobian[k][pk];
-                    if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
-                        final double cotan = rkk / lmDiag[k];
-                        sin   = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
-                        cos   = sin * cotan;
-                    } else {
-                        final double tan = lmDiag[k] / rkk;
-                        cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
-                        sin = cos * tan;
-                    }
-
-                    // compute the modified diagonal element of R and
-                    // the modified element of (Qty,0)
-                    weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
-                    final double temp = cos * work[k] + sin * qtbpj;
-                    qtbpj = -sin * work[k] + cos * qtbpj;
-                    work[k] = temp;
-
-                    // accumulate the tranformation in the row of s
-                    for (int i = k + 1; i < solvedCols; ++i) {
-                        double rik = weightedJacobian[i][pk];
-                        final double temp2 = cos * rik + sin * lmDiag[i];
-                        lmDiag[i] = -sin * rik + cos * lmDiag[i];
-                        weightedJacobian[i][pk] = temp2;
-                    }
-                }
-            }
-
-            // store the diagonal element of s and restore
-            // the corresponding diagonal element of R
-            lmDiag[j] = weightedJacobian[j][permutation[j]];
-            weightedJacobian[j][permutation[j]] = lmDir[j];
-        }
-
-        // solve the triangular system for z, if the system is
-        // singular, then obtain a least squares solution
-        int nSing = solvedCols;
-        for (int j = 0; j < solvedCols; ++j) {
-            if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
-                nSing = j;
-            }
-            if (nSing < solvedCols) {
-                work[j] = 0;
-            }
-        }
-        if (nSing > 0) {
-            for (int j = nSing - 1; j >= 0; --j) {
-                int pj = permutation[j];
-                double sum = 0;
-                for (int i = j + 1; i < nSing; ++i) {
-                    sum += weightedJacobian[i][pj] * work[i];
-                }
-                work[j] = (work[j] - sum) / lmDiag[j];
-            }
-        }
-
-        // permute the components of z back to components of lmDir
-        for (int j = 0; j < lmDir.length; ++j) {
-            lmDir[permutation[j]] = work[j];
-        }
-    }
-
-    /**
-     * Decompose a matrix A as A.P = Q.R using Householder transforms.
-     * <p>As suggested in the P. Lascaux and R. Theodor book
-     * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave;
-     * l'art de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing
-     * the Householder transforms with u<sub>k</sub> unit vectors such that:
-     * <pre>
-     * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
-     * </pre>
-     * we use <sub>k</sub> non-unit vectors such that:
-     * <pre>
-     * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
-     * </pre>
-     * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
-     * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
-     * them from the v<sub>k</sub> vectors would be costly.</p>
-     * <p>This decomposition handles rank deficient cases since the tranformations
-     * are performed in non-increasing columns norms order thanks to columns
-     * pivoting. The diagonal elements of the R matrix are therefore also in
-     * non-increasing absolute values order.</p>
-     *
-     * @param jacobian Weighted Jacobian matrix at the current point.
-     * @exception ConvergenceException if the decomposition cannot be performed
-     */
-    private void qrDecomposition(RealMatrix jacobian) throws ConvergenceException {
-        // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
-        // hence the multiplication by -1.
-        weightedJacobian = jacobian.scalarMultiply(-1).getData();
-
-        final int nR = weightedJacobian.length;
-        final int nC = weightedJacobian[0].length;
-
-        // initializations
-        for (int k = 0; k < nC; ++k) {
-            permutation[k] = k;
-            double norm2 = 0;
-            for (int i = 0; i < nR; ++i) {
-                double akk = weightedJacobian[i][k];
-                norm2 += akk * akk;
-            }
-            jacNorm[k] = FastMath.sqrt(norm2);
-        }
-
-        // transform the matrix column after column
-        for (int k = 0; k < nC; ++k) {
-
-            // select the column with the greatest norm on active components
-            int nextColumn = -1;
-            double ak2 = Double.NEGATIVE_INFINITY;
-            for (int i = k; i < nC; ++i) {
-                double norm2 = 0;
-                for (int j = k; j < nR; ++j) {
-                    double aki = weightedJacobian[j][permutation[i]];
-                    norm2 += aki * aki;
-                }
-                if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
-                    throw new ConvergenceException(LocalizedFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
-                                                   nR, nC);
-                }
-                if (norm2 > ak2) {
-                    nextColumn = i;
-                    ak2        = norm2;
-                }
-            }
-            if (ak2 <= qrRankingThreshold) {
-                rank = k;
-                return;
-            }
-            int pk                  = permutation[nextColumn];
-            permutation[nextColumn] = permutation[k];
-            permutation[k]          = pk;
-
-            // choose alpha such that Hk.u = alpha ek
-            double akk   = weightedJacobian[k][pk];
-            double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
-            double betak = 1.0 / (ak2 - akk * alpha);
-            beta[pk]     = betak;
-
-            // transform the current column
-            diagR[pk]        = alpha;
-            weightedJacobian[k][pk] -= alpha;
-
-            // transform the remaining columns
-            for (int dk = nC - 1 - k; dk > 0; --dk) {
-                double gamma = 0;
-                for (int j = k; j < nR; ++j) {
-                    gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
-                }
-                gamma *= betak;
-                for (int j = k; j < nR; ++j) {
-                    weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
-                }
-            }
-        }
-        rank = solvedCols;
-    }
-
-    /**
-     * Compute the product Qt.y for some Q.R. decomposition.
-     *
-     * @param y vector to multiply (will be overwritten with the result)
-     */
-    private void qTy(double[] y) {
-        final int nR = weightedJacobian.length;
-        final int nC = weightedJacobian[0].length;
-
-        for (int k = 0; k < nC; ++k) {
-            int pk = permutation[k];
-            double gamma = 0;
-            for (int i = k; i < nR; ++i) {
-                gamma += weightedJacobian[i][pk] * y[i];
-            }
-            gamma *= beta[pk];
-            for (int i = k; i < nR; ++i) {
-                y[i] -= gamma * weightedJacobian[i][pk];
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/general/NonLinearConjugateGradientOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/general/NonLinearConjugateGradientOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/general/NonLinearConjugateGradientOptimizer.java
deleted file mode 100644
index 499fd07..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/general/NonLinearConjugateGradientOptimizer.java
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.analysis.solvers.BrentSolver;
-import org.apache.commons.math4.analysis.solvers.UnivariateSolver;
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleValueChecker;
-import org.apache.commons.math4.util.FastMath;
-
-/**
- * Non-linear conjugate gradient optimizer.
- * <p>
- * This class supports both the Fletcher-Reeves and the Polak-Ribi&egrave;re
- * update formulas for the conjugate search directions. It also supports
- * optional preconditioning.
- * </p>
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- *
- */
-@Deprecated
-public class NonLinearConjugateGradientOptimizer
-    extends AbstractScalarDifferentiableOptimizer {
-    /** Update formula for the beta parameter. */
-    private final ConjugateGradientFormula updateFormula;
-    /** Preconditioner (may be null). */
-    private final Preconditioner preconditioner;
-    /** solver to use in the line search (may be null). */
-    private final UnivariateSolver solver;
-    /** Initial step used to bracket the optimum in line search. */
-    private double initialStep;
-    /** Current point. */
-    private double[] point;
-
-    /**
-     * Constructor with default {@link SimpleValueChecker checker},
-     * {@link BrentSolver line search solver} and
-     * {@link IdentityPreconditioner preconditioner}.
-     *
-     * @param updateFormula formula to use for updating the &beta; parameter,
-     * must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
-     * ConjugateGradientFormula#POLAK_RIBIERE}.
-     * @deprecated See {@link SimpleValueChecker#SimpleValueChecker()}
-     */
-    @Deprecated
-    public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula) {
-        this(updateFormula,
-             new SimpleValueChecker());
-    }
-
-    /**
-     * Constructor with default {@link BrentSolver line search solver} and
-     * {@link IdentityPreconditioner preconditioner}.
-     *
-     * @param updateFormula formula to use for updating the &beta; parameter,
-     * must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
-     * ConjugateGradientFormula#POLAK_RIBIERE}.
-     * @param checker Convergence checker.
-     */
-    public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula,
-                                               ConvergenceChecker<PointValuePair> checker) {
-        this(updateFormula,
-             checker,
-             new BrentSolver(),
-             new IdentityPreconditioner());
-    }
-
-
-    /**
-     * Constructor with default {@link IdentityPreconditioner preconditioner}.
-     *
-     * @param updateFormula formula to use for updating the &beta; parameter,
-     * must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
-     * ConjugateGradientFormula#POLAK_RIBIERE}.
-     * @param checker Convergence checker.
-     * @param lineSearchSolver Solver to use during line search.
-     */
-    public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula,
-                                               ConvergenceChecker<PointValuePair> checker,
-                                               final UnivariateSolver lineSearchSolver) {
-        this(updateFormula,
-             checker,
-             lineSearchSolver,
-             new IdentityPreconditioner());
-    }
-
-    /**
-     * @param updateFormula formula to use for updating the &beta; parameter,
-     * must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
-     * ConjugateGradientFormula#POLAK_RIBIERE}.
-     * @param checker Convergence checker.
-     * @param lineSearchSolver Solver to use during line search.
-     * @param preconditioner Preconditioner.
-     */
-    public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula,
-                                               ConvergenceChecker<PointValuePair> checker,
-                                               final UnivariateSolver lineSearchSolver,
-                                               final Preconditioner preconditioner) {
-        super(checker);
-
-        this.updateFormula = updateFormula;
-        solver = lineSearchSolver;
-        this.preconditioner = preconditioner;
-        initialStep = 1.0;
-    }
-
-    /**
-     * Set the initial step used to bracket the optimum in line search.
-     * <p>
-     * The initial step is a factor with respect to the search direction,
-     * which itself is roughly related to the gradient of the function
-     * </p>
-     * @param initialStep initial step used to bracket the optimum in line search,
-     * if a non-positive value is used, the initial step is reset to its
-     * default value of 1.0
-     */
-    public void setInitialStep(final double initialStep) {
-        if (initialStep <= 0) {
-            this.initialStep = 1.0;
-        } else {
-            this.initialStep = initialStep;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected PointValuePair doOptimize() {
-        final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker();
-        point = getStartPoint();
-        final GoalType goal = getGoalType();
-        final int n = point.length;
-        double[] r = computeObjectiveGradient(point);
-        if (goal == GoalType.MINIMIZE) {
-            for (int i = 0; i < n; ++i) {
-                r[i] = -r[i];
-            }
-        }
-
-        // Initial search direction.
-        double[] steepestDescent = preconditioner.precondition(point, r);
-        double[] searchDirection = steepestDescent.clone();
-
-        double delta = 0;
-        for (int i = 0; i < n; ++i) {
-            delta += r[i] * searchDirection[i];
-        }
-
-        PointValuePair current = null;
-        int iter = 0;
-        int maxEval = getMaxEvaluations();
-        while (true) {
-            ++iter;
-
-            final double objective = computeObjectiveValue(point);
-            PointValuePair previous = current;
-            current = new PointValuePair(point, objective);
-            if (previous != null && checker.converged(iter, previous, current)) {
-                // We have found an optimum.
-                return current;
-            }
-
-            // Find the optimal step in the search direction.
-            final UnivariateFunction lsf = new LineSearchFunction(searchDirection);
-            final double uB = findUpperBound(lsf, 0, initialStep);
-            // XXX Last parameters is set to a value close to zero in order to
-            // work around the divergence problem in the "testCircleFitting"
-            // unit test (see MATH-439).
-            final double step = solver.solve(maxEval, lsf, 0, uB, 1e-15);
-            maxEval -= solver.getEvaluations(); // Subtract used up evaluations.
-
-            // Validate new point.
-            for (int i = 0; i < point.length; ++i) {
-                point[i] += step * searchDirection[i];
-            }
-
-            r = computeObjectiveGradient(point);
-            if (goal == GoalType.MINIMIZE) {
-                for (int i = 0; i < n; ++i) {
-                    r[i] = -r[i];
-                }
-            }
-
-            // Compute beta.
-            final double deltaOld = delta;
-            final double[] newSteepestDescent = preconditioner.precondition(point, r);
-            delta = 0;
-            for (int i = 0; i < n; ++i) {
-                delta += r[i] * newSteepestDescent[i];
-            }
-
-            final double beta;
-            if (updateFormula == ConjugateGradientFormula.FLETCHER_REEVES) {
-                beta = delta / deltaOld;
-            } else {
-                double deltaMid = 0;
-                for (int i = 0; i < r.length; ++i) {
-                    deltaMid += r[i] * steepestDescent[i];
-                }
-                beta = (delta - deltaMid) / deltaOld;
-            }
-            steepestDescent = newSteepestDescent;
-
-            // Compute conjugate search direction.
-            if (iter % n == 0 ||
-                beta < 0) {
-                // Break conjugation: reset search direction.
-                searchDirection = steepestDescent.clone();
-            } else {
-                // Compute new conjugate search direction.
-                for (int i = 0; i < n; ++i) {
-                    searchDirection[i] = steepestDescent[i] + beta * searchDirection[i];
-                }
-            }
-        }
-    }
-
-    /**
-     * Find the upper bound b ensuring bracketing of a root between a and b.
-     *
-     * @param f function whose root must be bracketed.
-     * @param a lower bound of the interval.
-     * @param h initial step to try.
-     * @return b such that f(a) and f(b) have opposite signs.
-     * @throws MathIllegalStateException if no bracket can be found.
-     */
-    private double findUpperBound(final UnivariateFunction f,
-                                  final double a, final double h) {
-        final double yA = f.value(a);
-        double yB = yA;
-        for (double step = h; step < Double.MAX_VALUE; step *= FastMath.max(2, yA / yB)) {
-            final double b = a + step;
-            yB = f.value(b);
-            if (yA * yB <= 0) {
-                return b;
-            }
-        }
-        throw new MathIllegalStateException(LocalizedFormats.UNABLE_TO_BRACKET_OPTIMUM_IN_LINE_SEARCH);
-    }
-
-    /** Default identity preconditioner. */
-    public static class IdentityPreconditioner implements Preconditioner {
-
-        /** {@inheritDoc} */
-        public double[] precondition(double[] variables, double[] r) {
-            return r.clone();
-        }
-    }
-
-    /** Internal class for line search.
-     * <p>
-     * The function represented by this class is the dot product of
-     * the objective function gradient and the search direction. Its
-     * value is zero when the gradient is orthogonal to the search
-     * direction, i.e. when the objective function value is a local
-     * extremum along the search direction.
-     * </p>
-     */
-    private class LineSearchFunction implements UnivariateFunction {
-        /** Search direction. */
-        private final double[] searchDirection;
-
-        /** Simple constructor.
-         * @param searchDirection search direction
-         */
-        public LineSearchFunction(final double[] searchDirection) {
-            this.searchDirection = searchDirection;
-        }
-
-        /** {@inheritDoc} */
-        public double value(double x) {
-            // current point in the search direction
-            final double[] shiftedPoint = point.clone();
-            for (int i = 0; i < shiftedPoint.length; ++i) {
-                shiftedPoint[i] += x * searchDirection[i];
-            }
-
-            // gradient of the objective function
-            final double[] gradient = computeObjectiveGradient(shiftedPoint);
-
-            // dot product with the search direction
-            double dotProduct = 0;
-            for (int i = 0; i < gradient.length; ++i) {
-                dotProduct += gradient[i] * searchDirection[i];
-            }
-
-            return dotProduct;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/general/Preconditioner.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/general/Preconditioner.java b/src/main/java/org/apache/commons/math4/optimization/general/Preconditioner.java
deleted file mode 100644
index 882b789..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/general/Preconditioner.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-/**
- * This interface represents a preconditioner for differentiable scalar
- * objective function optimizers.
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public interface Preconditioner {
-    /**
-     * Precondition a search direction.
-     * <p>
-     * The returned preconditioned search direction must be computed fast or
-     * the algorithm performances will drop drastically. A classical approach
-     * is to compute only the diagonal elements of the hessian and to divide
-     * the raw search direction by these elements if they are all positive.
-     * If at least one of them is negative, it is safer to return a clone of
-     * the raw search direction as if the hessian was the identity matrix. The
-     * rationale for this simplified choice is that a negative diagonal element
-     * means the current point is far from the optimum and preconditioning will
-     * not be efficient anyway in this case.
-     * </p>
-     * @param point current point at which the search direction was computed
-     * @param r raw search direction (i.e. opposite of the gradient)
-     * @return approximation of H<sup>-1</sup>r where H is the objective function hessian
-     */
-    double[] precondition(double[] point, double[] r);
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/general/package-info.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/general/package-info.java b/src/main/java/org/apache/commons/math4/optimization/general/package-info.java
deleted file mode 100644
index ac50fd4..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/general/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *
- * This package provides optimization algorithms that require derivatives.
- *
- */
-package org.apache.commons.math4.optimization.general;

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/AbstractLinearOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/AbstractLinearOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/linear/AbstractLinearOptimizer.java
deleted file mode 100644
index 7a58f0d..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/AbstractLinearOptimizer.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import java.util.Collection;
-import java.util.Collections;
-
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.MaxCountExceededException;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-
-/**
- * Base class for implementing linear optimizers.
- * <p>
- * This base class handles the boilerplate methods associated to thresholds
- * settings and iterations counters.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public abstract class AbstractLinearOptimizer implements LinearOptimizer {
-
-    /** Default maximal number of iterations allowed. */
-    public static final int DEFAULT_MAX_ITERATIONS = 100;
-
-    /**
-     * Linear objective function.
-     * @since 2.1
-     */
-    private LinearObjectiveFunction function;
-
-    /**
-     * Linear constraints.
-     * @since 2.1
-     */
-    private Collection<LinearConstraint> linearConstraints;
-
-    /**
-     * Type of optimization goal: either {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}.
-     * @since 2.1
-     */
-    private GoalType goal;
-
-    /**
-     * Whether to restrict the variables to non-negative values.
-     * @since 2.1
-     */
-    private boolean nonNegative;
-
-    /** Maximal number of iterations allowed. */
-    private int maxIterations;
-
-    /** Number of iterations already performed. */
-    private int iterations;
-
-    /**
-     * Simple constructor with default settings.
-     * <p>The maximal number of evaluation is set to its default value.</p>
-     */
-    protected AbstractLinearOptimizer() {
-        setMaxIterations(DEFAULT_MAX_ITERATIONS);
-    }
-
-    /**
-     * @return {@code true} if the variables are restricted to non-negative values.
-     */
-    protected boolean restrictToNonNegative() {
-        return nonNegative;
-    }
-
-    /**
-     * @return the optimization type.
-     */
-    protected GoalType getGoalType() {
-        return goal;
-    }
-
-    /**
-     * @return the optimization type.
-     */
-    protected LinearObjectiveFunction getFunction() {
-        return function;
-    }
-
-    /**
-     * @return the optimization type.
-     */
-    protected Collection<LinearConstraint> getConstraints() {
-        return Collections.unmodifiableCollection(linearConstraints);
-    }
-
-    /** {@inheritDoc} */
-    public void setMaxIterations(int maxIterations) {
-        this.maxIterations = maxIterations;
-    }
-
-    /** {@inheritDoc} */
-    public int getMaxIterations() {
-        return maxIterations;
-    }
-
-    /** {@inheritDoc} */
-    public int getIterations() {
-        return iterations;
-    }
-
-    /**
-     * Increment the iterations counter by 1.
-     * @exception MaxCountExceededException if the maximal number of iterations is exceeded
-     */
-    protected void incrementIterationsCounter()
-        throws MaxCountExceededException {
-        if (++iterations > maxIterations) {
-            throw new MaxCountExceededException(maxIterations);
-        }
-    }
-
-    /** {@inheritDoc} */
-    public PointValuePair optimize(final LinearObjectiveFunction f,
-                                   final Collection<LinearConstraint> constraints,
-                                   final GoalType goalType, final boolean restrictToNonNegative)
-        throws MathIllegalStateException {
-
-        // store linear problem characteristics
-        this.function          = f;
-        this.linearConstraints = constraints;
-        this.goal              = goalType;
-        this.nonNegative       = restrictToNonNegative;
-
-        iterations  = 0;
-
-        // solve the problem
-        return doOptimize();
-
-    }
-
-    /**
-     * Perform the bulk of optimization algorithm.
-     * @return the point/value pair giving the optimal value for objective function
-     * @exception MathIllegalStateException if no solution fulfilling the constraints
-     * can be found in the allowed number of iterations
-     */
-    protected abstract PointValuePair doOptimize() throws MathIllegalStateException;
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/LinearConstraint.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/LinearConstraint.java b/src/main/java/org/apache/commons/math4/optimization/linear/LinearConstraint.java
deleted file mode 100644
index 85c3b2f..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/LinearConstraint.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
-
-import org.apache.commons.math4.linear.ArrayRealVector;
-import org.apache.commons.math4.linear.MatrixUtils;
-import org.apache.commons.math4.linear.RealVector;
-
-
-/**
- * A linear constraint for a linear optimization problem.
- * <p>
- * A linear constraint has one of the forms:
- * <ul>
- *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> = v</li>
- *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> &lt;= v</li>
- *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> >= v</li>
- *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> =
- *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
- *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> &lt;=
- *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
- *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> >=
- *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
- * </ul>
- * The c<sub>i</sub>, l<sub>i</sub> or r<sub>i</sub> are the coefficients of the constraints, the x<sub>i</sub>
- * are the coordinates of the current point and v is the value of the constraint.
- * </p>
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class LinearConstraint implements Serializable {
-
-    /** Serializable version identifier. */
-    private static final long serialVersionUID = -764632794033034092L;
-
-    /** Coefficients of the constraint (left hand side). */
-    private final transient RealVector coefficients;
-
-    /** Relationship between left and right hand sides (=, &lt;=, >=). */
-    private final Relationship relationship;
-
-    /** Value of the constraint (right hand side). */
-    private final double value;
-
-    /**
-     * Build a constraint involving a single linear equation.
-     * <p>
-     * A linear constraint with a single linear equation has one of the forms:
-     * <ul>
-     *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> = v</li>
-     *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> &lt;= v</li>
-     *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> >= v</li>
-     * </ul>
-     * </p>
-     * @param coefficients The coefficients of the constraint (left hand side)
-     * @param relationship The type of (in)equality used in the constraint
-     * @param value The value of the constraint (right hand side)
-     */
-    public LinearConstraint(final double[] coefficients, final Relationship relationship,
-                            final double value) {
-        this(new ArrayRealVector(coefficients), relationship, value);
-    }
-
-    /**
-     * Build a constraint involving a single linear equation.
-     * <p>
-     * A linear constraint with a single linear equation has one of the forms:
-     * <ul>
-     *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> = v</li>
-     *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> &lt;= v</li>
-     *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> >= v</li>
-     * </ul>
-     * </p>
-     * @param coefficients The coefficients of the constraint (left hand side)
-     * @param relationship The type of (in)equality used in the constraint
-     * @param value The value of the constraint (right hand side)
-     */
-    public LinearConstraint(final RealVector coefficients, final Relationship relationship,
-                            final double value) {
-        this.coefficients = coefficients;
-        this.relationship = relationship;
-        this.value        = value;
-    }
-
-    /**
-     * Build a constraint involving two linear equations.
-     * <p>
-     * A linear constraint with two linear equation has one of the forms:
-     * <ul>
-     *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> =
-     *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
-     *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> &lt;=
-     *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
-     *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> >=
-     *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
-     * </ul>
-     * </p>
-     * @param lhsCoefficients The coefficients of the linear expression on the left hand side of the constraint
-     * @param lhsConstant The constant term of the linear expression on the left hand side of the constraint
-     * @param relationship The type of (in)equality used in the constraint
-     * @param rhsCoefficients The coefficients of the linear expression on the right hand side of the constraint
-     * @param rhsConstant The constant term of the linear expression on the right hand side of the constraint
-     */
-    public LinearConstraint(final double[] lhsCoefficients, final double lhsConstant,
-                            final Relationship relationship,
-                            final double[] rhsCoefficients, final double rhsConstant) {
-        double[] sub = new double[lhsCoefficients.length];
-        for (int i = 0; i < sub.length; ++i) {
-            sub[i] = lhsCoefficients[i] - rhsCoefficients[i];
-        }
-        this.coefficients = new ArrayRealVector(sub, false);
-        this.relationship = relationship;
-        this.value        = rhsConstant - lhsConstant;
-    }
-
-    /**
-     * Build a constraint involving two linear equations.
-     * <p>
-     * A linear constraint with two linear equation has one of the forms:
-     * <ul>
-     *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> =
-     *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
-     *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> &lt;=
-     *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
-     *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> >=
-     *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
-     * </ul>
-     * </p>
-     * @param lhsCoefficients The coefficients of the linear expression on the left hand side of the constraint
-     * @param lhsConstant The constant term of the linear expression on the left hand side of the constraint
-     * @param relationship The type of (in)equality used in the constraint
-     * @param rhsCoefficients The coefficients of the linear expression on the right hand side of the constraint
-     * @param rhsConstant The constant term of the linear expression on the right hand side of the constraint
-     */
-    public LinearConstraint(final RealVector lhsCoefficients, final double lhsConstant,
-                            final Relationship relationship,
-                            final RealVector rhsCoefficients, final double rhsConstant) {
-        this.coefficients = lhsCoefficients.subtract(rhsCoefficients);
-        this.relationship = relationship;
-        this.value        = rhsConstant - lhsConstant;
-    }
-
-    /**
-     * Get the coefficients of the constraint (left hand side).
-     * @return coefficients of the constraint (left hand side)
-     */
-    public RealVector getCoefficients() {
-        return coefficients;
-    }
-
-    /**
-     * Get the relationship between left and right hand sides.
-     * @return relationship between left and right hand sides
-     */
-    public Relationship getRelationship() {
-        return relationship;
-    }
-
-    /**
-     * Get the value of the constraint (right hand side).
-     * @return value of the constraint (right hand side)
-     */
-    public double getValue() {
-        return value;
-    }
-
-    @Override
-    public boolean equals(Object other) {
-
-      if (this == other) {
-        return true;
-      }
-
-      if (other instanceof LinearConstraint) {
-          LinearConstraint rhs = (LinearConstraint) other;
-          return (relationship == rhs.relationship) &&
-                 (value        == rhs.value) &&
-                 coefficients.equals(rhs.coefficients);
-      }
-      return false;
-    }
-
-    @Override
-    public int hashCode() {
-        return relationship.hashCode() ^
-               Double.valueOf(value).hashCode() ^
-               coefficients.hashCode();
-    }
-
-    /**
-     * Serialize the instance.
-     * @param oos stream where object should be written
-     * @throws IOException if object cannot be written to stream
-     */
-    private void writeObject(ObjectOutputStream oos)
-        throws IOException {
-        oos.defaultWriteObject();
-        MatrixUtils.serializeRealVector(coefficients, oos);
-    }
-
-    /**
-     * Deserialize the instance.
-     * @param ois stream from which the object should be read
-     * @throws ClassNotFoundException if a class in the stream cannot be found
-     * @throws IOException if object cannot be read from the stream
-     */
-    private void readObject(ObjectInputStream ois)
-      throws ClassNotFoundException, IOException {
-        ois.defaultReadObject();
-        MatrixUtils.deserializeRealVector(this, "coefficients", ois);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/LinearObjectiveFunction.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/LinearObjectiveFunction.java b/src/main/java/org/apache/commons/math4/optimization/linear/LinearObjectiveFunction.java
deleted file mode 100644
index be5ed6bd..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/LinearObjectiveFunction.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
-
-import org.apache.commons.math4.linear.ArrayRealVector;
-import org.apache.commons.math4.linear.MatrixUtils;
-import org.apache.commons.math4.linear.RealVector;
-
-/**
- * An objective function for a linear optimization problem.
- * <p>
- * A linear objective function has one the form:
- * <pre>
- * c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> + d
- * </pre>
- * The c<sub>i</sub> and d are the coefficients of the equation,
- * the x<sub>i</sub> are the coordinates of the current point.
- * </p>
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class LinearObjectiveFunction implements Serializable {
-
-    /** Serializable version identifier. */
-    private static final long serialVersionUID = -4531815507568396090L;
-
-    /** Coefficients of the constraint (c<sub>i</sub>). */
-    private final transient RealVector coefficients;
-
-    /** Constant term of the linear equation. */
-    private final double constantTerm;
-
-    /**
-     * @param coefficients The coefficients for the linear equation being optimized
-     * @param constantTerm The constant term of the linear equation
-     */
-    public LinearObjectiveFunction(double[] coefficients, double constantTerm) {
-        this(new ArrayRealVector(coefficients), constantTerm);
-    }
-
-    /**
-     * @param coefficients The coefficients for the linear equation being optimized
-     * @param constantTerm The constant term of the linear equation
-     */
-    public LinearObjectiveFunction(RealVector coefficients, double constantTerm) {
-        this.coefficients = coefficients;
-        this.constantTerm = constantTerm;
-    }
-
-    /**
-     * Get the coefficients of the linear equation being optimized.
-     * @return coefficients of the linear equation being optimized
-     */
-    public RealVector getCoefficients() {
-        return coefficients;
-    }
-
-    /**
-     * Get the constant of the linear equation being optimized.
-     * @return constant of the linear equation being optimized
-     */
-    public double getConstantTerm() {
-        return constantTerm;
-    }
-
-    /**
-     * Compute the value of the linear equation at the current point
-     * @param point point at which linear equation must be evaluated
-     * @return value of the linear equation at the current point
-     */
-    public double getValue(final double[] point) {
-        return coefficients.dotProduct(new ArrayRealVector(point, false)) + constantTerm;
-    }
-
-    /**
-     * Compute the value of the linear equation at the current point
-     * @param point point at which linear equation must be evaluated
-     * @return value of the linear equation at the current point
-     */
-    public double getValue(final RealVector point) {
-        return coefficients.dotProduct(point) + constantTerm;
-    }
-
-    @Override
-    public boolean equals(Object other) {
-
-      if (this == other) {
-        return true;
-      }
-
-      if (other instanceof LinearObjectiveFunction) {
-          LinearObjectiveFunction rhs = (LinearObjectiveFunction) other;
-          return (constantTerm == rhs.constantTerm) && coefficients.equals(rhs.coefficients);
-      }
-
-      return false;
-    }
-
-    @Override
-    public int hashCode() {
-        return Double.valueOf(constantTerm).hashCode() ^ coefficients.hashCode();
-    }
-
-    /**
-     * Serialize the instance.
-     * @param oos stream where object should be written
-     * @throws IOException if object cannot be written to stream
-     */
-    private void writeObject(ObjectOutputStream oos)
-        throws IOException {
-        oos.defaultWriteObject();
-        MatrixUtils.serializeRealVector(coefficients, oos);
-    }
-
-    /**
-     * Deserialize the instance.
-     * @param ois stream from which the object should be read
-     * @throws ClassNotFoundException if a class in the stream cannot be found
-     * @throws IOException if object cannot be read from the stream
-     */
-    private void readObject(ObjectInputStream ois)
-      throws ClassNotFoundException, IOException {
-        ois.defaultReadObject();
-        MatrixUtils.deserializeRealVector(this, "coefficients", ois);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/LinearOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/LinearOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/linear/LinearOptimizer.java
deleted file mode 100644
index 07e5930..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/LinearOptimizer.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import java.util.Collection;
-
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-
-/**
- * This interface represents an optimization algorithm for linear problems.
- * <p>Optimization algorithms find the input point set that either {@link GoalType
- * maximize or minimize} an objective function. In the linear case the form of
- * the function is restricted to
- * <pre>
- * c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> = v
- * </pre>
- * and there may be linear constraints too, of one of the forms:
- * <ul>
- *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> = v</li>
- *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> &lt;= v</li>
- *   <li>c<sub>1</sub>x<sub>1</sub> + ... c<sub>n</sub>x<sub>n</sub> >= v</li>
- *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> =
- *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
- *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> &lt;=
- *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
- *   <li>l<sub>1</sub>x<sub>1</sub> + ... l<sub>n</sub>x<sub>n</sub> + l<sub>cst</sub> >=
- *       r<sub>1</sub>x<sub>1</sub> + ... r<sub>n</sub>x<sub>n</sub> + r<sub>cst</sub></li>
- * </ul>
- * where the c<sub>i</sub>, l<sub>i</sub> or r<sub>i</sub> are the coefficients of
- * the constraints, the x<sub>i</sub> are the coordinates of the current point and
- * v is the value of the constraint.
- * </p>
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public interface LinearOptimizer {
-
-    /**
-     * Set the maximal number of iterations of the algorithm.
-     * @param maxIterations maximal number of function calls
-     */
-    void setMaxIterations(int maxIterations);
-
-    /**
-     * Get the maximal number of iterations of the algorithm.
-     * @return maximal number of iterations
-     */
-    int getMaxIterations();
-
-    /**
-     * Get the number of iterations realized by the algorithm.
-     * <p>
-     * The number of evaluations corresponds to the last call to the
-     * {@link #optimize(LinearObjectiveFunction, Collection, GoalType, boolean) optimize}
-     * method. It is 0 if the method has not been called yet.
-     * </p>
-     * @return number of iterations
-     */
-    int getIterations();
-
-    /**
-     * Optimizes an objective function.
-     * @param f linear objective function
-     * @param constraints linear constraints
-     * @param goalType type of optimization goal: either {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}
-     * @param restrictToNonNegative whether to restrict the variables to non-negative values
-     * @return point/value pair giving the optimal value for objective function
-     * @exception MathIllegalStateException if no solution fulfilling the constraints
-     *   can be found in the allowed number of iterations
-     */
-   PointValuePair optimize(LinearObjectiveFunction f, Collection<LinearConstraint> constraints,
-                               GoalType goalType, boolean restrictToNonNegative) throws MathIllegalStateException;
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/NoFeasibleSolutionException.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/NoFeasibleSolutionException.java b/src/main/java/org/apache/commons/math4/optimization/linear/NoFeasibleSolutionException.java
deleted file mode 100644
index ca3b438..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/NoFeasibleSolutionException.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-
-/**
- * This class represents exceptions thrown by optimizers when no solution fulfills the constraints.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class NoFeasibleSolutionException extends MathIllegalStateException {
-
-    /** Serializable version identifier. */
-    private static final long serialVersionUID = -3044253632189082760L;
-
-    /**
-     * Simple constructor using a default message.
-     */
-    public NoFeasibleSolutionException() {
-        super(LocalizedFormats.NO_FEASIBLE_SOLUTION);
-    }
-
-}


[03/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/linear/SimplexSolverTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/linear/SimplexSolverTest.java b/src/test/java/org/apache/commons/math4/optimization/linear/SimplexSolverTest.java
deleted file mode 100644
index 0331bd8..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/linear/SimplexSolverTest.java
+++ /dev/null
@@ -1,646 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import org.junit.Assert;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.linear.LinearConstraint;
-import org.apache.commons.math4.optimization.linear.LinearObjectiveFunction;
-import org.apache.commons.math4.optimization.linear.NoFeasibleSolutionException;
-import org.apache.commons.math4.optimization.linear.Relationship;
-import org.apache.commons.math4.optimization.linear.SimplexSolver;
-import org.apache.commons.math4.optimization.linear.UnboundedSolutionException;
-import org.apache.commons.math4.util.Precision;
-import org.junit.Test;
-
-@Deprecated
-public class SimplexSolverTest {
-
-    @Test
-    public void testMath828() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(
-                new double[] { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}, 0.0);
-        
-        ArrayList <LinearConstraint>constraints = new ArrayList<LinearConstraint>();
-
-        constraints.add(new LinearConstraint(new double[] {0.0, 39.0, 23.0, 96.0, 15.0, 48.0, 9.0, 21.0, 48.0, 36.0, 76.0, 19.0, 88.0, 17.0, 16.0, 36.0,}, Relationship.GEQ, 15.0));
-        constraints.add(new LinearConstraint(new double[] {0.0, 59.0, 93.0, 12.0, 29.0, 78.0, 73.0, 87.0, 32.0, 70.0, 68.0, 24.0, 11.0, 26.0, 65.0, 25.0,}, Relationship.GEQ, 29.0));
-        constraints.add(new LinearConstraint(new double[] {0.0, 74.0, 5.0, 82.0, 6.0, 97.0, 55.0, 44.0, 52.0, 54.0, 5.0, 93.0, 91.0, 8.0, 20.0, 97.0,}, Relationship.GEQ, 6.0));
-        constraints.add(new LinearConstraint(new double[] {8.0, -3.0, -28.0, -72.0, -8.0, -31.0, -31.0, -74.0, -47.0, -59.0, -24.0, -57.0, -56.0, -16.0, -92.0, -59.0,}, Relationship.GEQ, 0.0));
-        constraints.add(new LinearConstraint(new double[] {25.0, -7.0, -99.0, -78.0, -25.0, -14.0, -16.0, -89.0, -39.0, -56.0, -53.0, -9.0, -18.0, -26.0, -11.0, -61.0,}, Relationship.GEQ, 0.0));
-        constraints.add(new LinearConstraint(new double[] {33.0, -95.0, -15.0, -4.0, -33.0, -3.0, -20.0, -96.0, -27.0, -13.0, -80.0, -24.0, -3.0, -13.0, -57.0, -76.0,}, Relationship.GEQ, 0.0));
-        constraints.add(new LinearConstraint(new double[] {7.0, -95.0, -39.0, -93.0, -7.0, -94.0, -94.0, -62.0, -76.0, -26.0, -53.0, -57.0, -31.0, -76.0, -53.0, -52.0,}, Relationship.GEQ, 0.0));
-        
-        double epsilon = 1e-6;
-        PointValuePair solution = new SimplexSolver().optimize(f, constraints, GoalType.MINIMIZE, true);
-        Assert.assertEquals(1.0d, solution.getValue(), epsilon);
-        Assert.assertTrue(validSolution(solution, constraints, epsilon));
-    }
-
-    @Test
-    public void testMath828Cycle() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(
-                new double[] { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}, 0.0);
-        
-        ArrayList <LinearConstraint>constraints = new ArrayList<LinearConstraint>();
-
-        constraints.add(new LinearConstraint(new double[] {0.0, 16.0, 14.0, 69.0, 1.0, 85.0, 52.0, 43.0, 64.0, 97.0, 14.0, 74.0, 89.0, 28.0, 94.0, 58.0, 13.0, 22.0, 21.0, 17.0, 30.0, 25.0, 1.0, 59.0, 91.0, 78.0, 12.0, 74.0, 56.0, 3.0, 88.0,}, Relationship.GEQ, 91.0));
-        constraints.add(new LinearConstraint(new double[] {0.0, 60.0, 40.0, 81.0, 71.0, 72.0, 46.0, 45.0, 38.0, 48.0, 40.0, 17.0, 33.0, 85.0, 64.0, 32.0, 84.0, 3.0, 54.0, 44.0, 71.0, 67.0, 90.0, 95.0, 54.0, 99.0, 99.0, 29.0, 52.0, 98.0, 9.0,}, Relationship.GEQ, 54.0));
-        constraints.add(new LinearConstraint(new double[] {0.0, 41.0, 12.0, 86.0, 90.0, 61.0, 31.0, 41.0, 23.0, 89.0, 17.0, 74.0, 44.0, 27.0, 16.0, 47.0, 80.0, 32.0, 11.0, 56.0, 68.0, 82.0, 11.0, 62.0, 62.0, 53.0, 39.0, 16.0, 48.0, 1.0, 63.0,}, Relationship.GEQ, 62.0));
-        constraints.add(new LinearConstraint(new double[] {83.0, -76.0, -94.0, -19.0, -15.0, -70.0, -72.0, -57.0, -63.0, -65.0, -22.0, -94.0, -22.0, -88.0, -86.0, -89.0, -72.0, -16.0, -80.0, -49.0, -70.0, -93.0, -95.0, -17.0, -83.0, -97.0, -31.0, -47.0, -31.0, -13.0, -23.0,}, Relationship.GEQ, 0.0));
-        constraints.add(new LinearConstraint(new double[] {41.0, -96.0, -41.0, -48.0, -70.0, -43.0, -43.0, -43.0, -97.0, -37.0, -85.0, -70.0, -45.0, -67.0, -87.0, -69.0, -94.0, -54.0, -54.0, -92.0, -79.0, -10.0, -35.0, -20.0, -41.0, -41.0, -65.0, -25.0, -12.0, -8.0, -46.0,}, Relationship.GEQ, 0.0));
-        constraints.add(new LinearConstraint(new double[] {27.0, -42.0, -65.0, -49.0, -53.0, -42.0, -17.0, -2.0, -61.0, -31.0, -76.0, -47.0, -8.0, -93.0, -86.0, -62.0, -65.0, -63.0, -22.0, -43.0, -27.0, -23.0, -32.0, -74.0, -27.0, -63.0, -47.0, -78.0, -29.0, -95.0, -73.0,}, Relationship.GEQ, 0.0));
-        constraints.add(new LinearConstraint(new double[] {15.0, -46.0, -41.0, -83.0, -98.0, -99.0, -21.0, -35.0, -7.0, -14.0, -80.0, -63.0, -18.0, -42.0, -5.0, -34.0, -56.0, -70.0, -16.0, -18.0, -74.0, -61.0, -47.0, -41.0, -15.0, -79.0, -18.0, -47.0, -88.0, -68.0, -55.0,}, Relationship.GEQ, 0.0));
-        
-        double epsilon = 1e-6;
-        PointValuePair solution = new SimplexSolver().optimize(f, constraints, GoalType.MINIMIZE, true);
-        Assert.assertEquals(1.0d, solution.getValue(), epsilon);
-        Assert.assertTrue(validSolution(solution, constraints, epsilon));        
-    }
-
-    @Test
-    public void testMath781() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 2, 6, 7 }, 0);
-
-        ArrayList<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 2, 1 }, Relationship.LEQ, 2));
-        constraints.add(new LinearConstraint(new double[] { -1, 1, 1 }, Relationship.LEQ, -1));
-        constraints.add(new LinearConstraint(new double[] { 2, -3, 1 }, Relationship.LEQ, -1));
-
-        double epsilon = 1e-6;
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, false);
-
-        Assert.assertTrue(Precision.compareTo(solution.getPoint()[0], 0.0d, epsilon) > 0);
-        Assert.assertTrue(Precision.compareTo(solution.getPoint()[1], 0.0d, epsilon) > 0);
-        Assert.assertTrue(Precision.compareTo(solution.getPoint()[2], 0.0d, epsilon) < 0);
-        Assert.assertEquals(2.0d, solution.getValue(), epsilon);
-    }
-
-    @Test
-    public void testMath713NegativeVariable() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {1.0, 1.0}, 0.0d);
-        ArrayList<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] {1, 0}, Relationship.EQ, 1));
-
-        double epsilon = 1e-6;
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MINIMIZE, true);
-
-        Assert.assertTrue(Precision.compareTo(solution.getPoint()[0], 0.0d, epsilon) >= 0);
-        Assert.assertTrue(Precision.compareTo(solution.getPoint()[1], 0.0d, epsilon) >= 0);
-    }
-
-    @Test
-    public void testMath434NegativeVariable() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {0.0, 0.0, 1.0}, 0.0d);
-        ArrayList<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] {1, 1, 0}, Relationship.EQ, 5));
-        constraints.add(new LinearConstraint(new double[] {0, 0, 1}, Relationship.GEQ, -10));
-
-        double epsilon = 1e-6;
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MINIMIZE, false);
-
-        Assert.assertEquals(5.0, solution.getPoint()[0] + solution.getPoint()[1], epsilon);
-        Assert.assertEquals(-10.0, solution.getPoint()[2], epsilon);
-        Assert.assertEquals(-10.0, solution.getValue(), epsilon);
-
-    }
-
-    @Test(expected = NoFeasibleSolutionException.class)
-    public void testMath434UnfeasibleSolution() {
-        double epsilon = 1e-6;
-
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {1.0, 0.0}, 0.0);
-        ArrayList<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] {epsilon/2, 0.5}, Relationship.EQ, 0));
-        constraints.add(new LinearConstraint(new double[] {1e-3, 0.1}, Relationship.EQ, 10));
-
-        SimplexSolver solver = new SimplexSolver();
-        // allowing only non-negative values, no feasible solution shall be found
-        solver.optimize(f, constraints, GoalType.MINIMIZE, true);
-    }
-
-    @Test
-    public void testMath434PivotRowSelection() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {1.0}, 0.0);
-
-        double epsilon = 1e-6;
-        ArrayList<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] {200}, Relationship.GEQ, 1));
-        constraints.add(new LinearConstraint(new double[] {100}, Relationship.GEQ, 0.499900001));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MINIMIZE, false);
-        
-        Assert.assertTrue(Precision.compareTo(solution.getPoint()[0] * 200.d, 1.d, epsilon) >= 0);
-        Assert.assertEquals(0.0050, solution.getValue(), epsilon);
-    }
-
-    @Test
-    public void testMath434PivotRowSelection2() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {0.0d, 1.0d, 1.0d, 0.0d, 0.0d, 0.0d, 0.0d}, 0.0d);
-
-        ArrayList<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] {1.0d, -0.1d, 0.0d, 0.0d, 0.0d, 0.0d, 0.0d}, Relationship.EQ, -0.1d));
-        constraints.add(new LinearConstraint(new double[] {1.0d, 0.0d, 0.0d, 0.0d, 0.0d, 0.0d, 0.0d}, Relationship.GEQ, -1e-18d));
-        constraints.add(new LinearConstraint(new double[] {0.0d, 1.0d, 0.0d, 0.0d, 0.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
-        constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 0.0d, 1.0d, 0.0d, -0.0128588d, 1e-5d}, Relationship.EQ, 0.0d));
-        constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 0.0d, 0.0d, 1.0d, 1e-5d, -0.0128586d}, Relationship.EQ, 1e-10d));
-        constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 1.0d, -1.0d, 0.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
-        constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 1.0d, 1.0d, 0.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
-        constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 1.0d, 0.0d, -1.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
-        constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 1.0d, 0.0d, 1.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
-
-        double epsilon = 1e-7;
-        SimplexSolver simplex = new SimplexSolver();
-        PointValuePair solution = simplex.optimize(f, constraints, GoalType.MINIMIZE, false);
-        
-        Assert.assertTrue(Precision.compareTo(solution.getPoint()[0], -1e-18d, epsilon) >= 0);
-        Assert.assertEquals(1.0d, solution.getPoint()[1], epsilon);        
-        Assert.assertEquals(0.0d, solution.getPoint()[2], epsilon);
-        Assert.assertEquals(1.0d, solution.getValue(), epsilon);
-    }
-    
-    @Test
-    public void testMath272() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 2, 2, 1 }, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 1, 0 }, Relationship.GEQ,  1));
-        constraints.add(new LinearConstraint(new double[] { 1, 0, 1 }, Relationship.GEQ,  1));
-        constraints.add(new LinearConstraint(new double[] { 0, 1, 0 }, Relationship.GEQ,  1));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MINIMIZE, true);
-
-        Assert.assertEquals(0.0, solution.getPoint()[0], .0000001);
-        Assert.assertEquals(1.0, solution.getPoint()[1], .0000001);
-        Assert.assertEquals(1.0, solution.getPoint()[2], .0000001);
-        Assert.assertEquals(3.0, solution.getValue(), .0000001);
-    }
-
-    @Test
-    public void testMath286() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 0.8, 0.2, 0.7, 0.3, 0.6, 0.4 }, 0 );
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 0, 1, 0, 1, 0 }, Relationship.EQ, 23.0));
-        constraints.add(new LinearConstraint(new double[] { 0, 1, 0, 1, 0, 1 }, Relationship.EQ, 23.0));
-        constraints.add(new LinearConstraint(new double[] { 1, 0, 0, 0, 0, 0 }, Relationship.GEQ, 10.0));
-        constraints.add(new LinearConstraint(new double[] { 0, 0, 1, 0, 0, 0 }, Relationship.GEQ, 8.0));
-        constraints.add(new LinearConstraint(new double[] { 0, 0, 0, 0, 1, 0 }, Relationship.GEQ, 5.0));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, true);
-
-        Assert.assertEquals(25.8, solution.getValue(), .0000001);
-        Assert.assertEquals(23.0, solution.getPoint()[0] + solution.getPoint()[2] + solution.getPoint()[4], 0.0000001);
-        Assert.assertEquals(23.0, solution.getPoint()[1] + solution.getPoint()[3] + solution.getPoint()[5], 0.0000001);
-        Assert.assertTrue(solution.getPoint()[0] >= 10.0 - 0.0000001);
-        Assert.assertTrue(solution.getPoint()[2] >= 8.0 - 0.0000001);
-        Assert.assertTrue(solution.getPoint()[4] >= 5.0 - 0.0000001);
-    }
-
-    @Test
-    public void testDegeneracy() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 0.8, 0.7 }, 0 );
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.LEQ, 18.0));
-        constraints.add(new LinearConstraint(new double[] { 1, 0 }, Relationship.GEQ, 10.0));
-        constraints.add(new LinearConstraint(new double[] { 0, 1 }, Relationship.GEQ, 8.0));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, true);
-        Assert.assertEquals(13.6, solution.getValue(), .0000001);
-    }
-
-    @Test
-    public void testMath288() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 7, 3, 0, 0 }, 0 );
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 3, 0, -5, 0 }, Relationship.LEQ, 0.0));
-        constraints.add(new LinearConstraint(new double[] { 2, 0, 0, -5 }, Relationship.LEQ, 0.0));
-        constraints.add(new LinearConstraint(new double[] { 0, 3, 0, -5 }, Relationship.LEQ, 0.0));
-        constraints.add(new LinearConstraint(new double[] { 1, 0, 0, 0 }, Relationship.LEQ, 1.0));
-        constraints.add(new LinearConstraint(new double[] { 0, 1, 0, 0 }, Relationship.LEQ, 1.0));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, true);
-        Assert.assertEquals(10.0, solution.getValue(), .0000001);
-    }
-
-    @Test
-    public void testMath290GEQ() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 1, 5 }, 0 );
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 2, 0 }, Relationship.GEQ, -1.0));
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MINIMIZE, true);
-        Assert.assertEquals(0, solution.getValue(), .0000001);
-        Assert.assertEquals(0, solution.getPoint()[0], .0000001);
-        Assert.assertEquals(0, solution.getPoint()[1], .0000001);
-    }
-
-    @Test(expected=NoFeasibleSolutionException.class)
-    public void testMath290LEQ() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 1, 5 }, 0 );
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 2, 0 }, Relationship.LEQ, -1.0));
-        SimplexSolver solver = new SimplexSolver();
-        solver.optimize(f, constraints, GoalType.MINIMIZE, true);
-    }
-
-    @Test
-    public void testMath293() {
-      LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 0.8, 0.2, 0.7, 0.3, 0.4, 0.6}, 0 );
-      Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-      constraints.add(new LinearConstraint(new double[] { 1, 0, 1, 0, 1, 0 }, Relationship.EQ, 30.0));
-      constraints.add(new LinearConstraint(new double[] { 0, 1, 0, 1, 0, 1 }, Relationship.EQ, 30.0));
-      constraints.add(new LinearConstraint(new double[] { 0.8, 0.2, 0.0, 0.0, 0.0, 0.0 }, Relationship.GEQ, 10.0));
-      constraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.7, 0.3, 0.0, 0.0 }, Relationship.GEQ, 10.0));
-      constraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.0, 0.0, 0.4, 0.6 }, Relationship.GEQ, 10.0));
-
-      SimplexSolver solver = new SimplexSolver();
-      PointValuePair solution1 = solver.optimize(f, constraints, GoalType.MAXIMIZE, true);
-
-      Assert.assertEquals(15.7143, solution1.getPoint()[0], .0001);
-      Assert.assertEquals(0.0, solution1.getPoint()[1], .0001);
-      Assert.assertEquals(14.2857, solution1.getPoint()[2], .0001);
-      Assert.assertEquals(0.0, solution1.getPoint()[3], .0001);
-      Assert.assertEquals(0.0, solution1.getPoint()[4], .0001);
-      Assert.assertEquals(30.0, solution1.getPoint()[5], .0001);
-      Assert.assertEquals(40.57143, solution1.getValue(), .0001);
-
-      double valA = 0.8 * solution1.getPoint()[0] + 0.2 * solution1.getPoint()[1];
-      double valB = 0.7 * solution1.getPoint()[2] + 0.3 * solution1.getPoint()[3];
-      double valC = 0.4 * solution1.getPoint()[4] + 0.6 * solution1.getPoint()[5];
-
-      f = new LinearObjectiveFunction(new double[] { 0.8, 0.2, 0.7, 0.3, 0.4, 0.6}, 0 );
-      constraints = new ArrayList<LinearConstraint>();
-      constraints.add(new LinearConstraint(new double[] { 1, 0, 1, 0, 1, 0 }, Relationship.EQ, 30.0));
-      constraints.add(new LinearConstraint(new double[] { 0, 1, 0, 1, 0, 1 }, Relationship.EQ, 30.0));
-      constraints.add(new LinearConstraint(new double[] { 0.8, 0.2, 0.0, 0.0, 0.0, 0.0 }, Relationship.GEQ, valA));
-      constraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.7, 0.3, 0.0, 0.0 }, Relationship.GEQ, valB));
-      constraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.0, 0.0, 0.4, 0.6 }, Relationship.GEQ, valC));
-
-      PointValuePair solution2 = solver.optimize(f, constraints, GoalType.MAXIMIZE, true);
-      Assert.assertEquals(40.57143, solution2.getValue(), .0001);
-    }
-
-    @Test
-    public void testSimplexSolver() {
-        LinearObjectiveFunction f =
-            new LinearObjectiveFunction(new double[] { 15, 10 }, 7);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 0 }, Relationship.LEQ, 2));
-        constraints.add(new LinearConstraint(new double[] { 0, 1 }, Relationship.LEQ, 3));
-        constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.EQ, 4));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, false);
-        Assert.assertEquals(2.0, solution.getPoint()[0], 0.0);
-        Assert.assertEquals(2.0, solution.getPoint()[1], 0.0);
-        Assert.assertEquals(57.0, solution.getValue(), 0.0);
-    }
-
-    @Test
-    public void testSingleVariableAndConstraint() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 3 }, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1 }, Relationship.LEQ, 10));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, false);
-        Assert.assertEquals(10.0, solution.getPoint()[0], 0.0);
-        Assert.assertEquals(30.0, solution.getValue(), 0.0);
-    }
-
-    /**
-     * With no artificial variables needed (no equals and no greater than
-     * constraints) we can go straight to Phase 2.
-     */
-    @Test
-    public void testModelWithNoArtificialVars() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 15, 10 }, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 0 }, Relationship.LEQ, 2));
-        constraints.add(new LinearConstraint(new double[] { 0, 1 }, Relationship.LEQ, 3));
-        constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.LEQ, 4));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, false);
-        Assert.assertEquals(2.0, solution.getPoint()[0], 0.0);
-        Assert.assertEquals(2.0, solution.getPoint()[1], 0.0);
-        Assert.assertEquals(50.0, solution.getValue(), 0.0);
-    }
-
-    @Test
-    public void testMinimization() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { -2, 1 }, -5);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 2 }, Relationship.LEQ, 6));
-        constraints.add(new LinearConstraint(new double[] { 3, 2 }, Relationship.LEQ, 12));
-        constraints.add(new LinearConstraint(new double[] { 0, 1 }, Relationship.GEQ, 0));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MINIMIZE, false);
-        Assert.assertEquals(4.0, solution.getPoint()[0], 0.0);
-        Assert.assertEquals(0.0, solution.getPoint()[1], 0.0);
-        Assert.assertEquals(-13.0, solution.getValue(), 0.0);
-    }
-
-    @Test
-    public void testSolutionWithNegativeDecisionVariable() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { -2, 1 }, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.GEQ, 6));
-        constraints.add(new LinearConstraint(new double[] { 1, 2 }, Relationship.LEQ, 14));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, false);
-        Assert.assertEquals(-2.0, solution.getPoint()[0], 0.0);
-        Assert.assertEquals(8.0, solution.getPoint()[1], 0.0);
-        Assert.assertEquals(12.0, solution.getValue(), 0.0);
-    }
-
-    @Test(expected = NoFeasibleSolutionException.class)
-    public void testInfeasibleSolution() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 15 }, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1 }, Relationship.LEQ, 1));
-        constraints.add(new LinearConstraint(new double[] { 1 }, Relationship.GEQ, 3));
-
-        SimplexSolver solver = new SimplexSolver();
-        solver.optimize(f, constraints, GoalType.MAXIMIZE, false);
-    }
-
-    @Test(expected = UnboundedSolutionException.class)
-    public void testUnboundedSolution() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 15, 10 }, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 0 }, Relationship.EQ, 2));
-
-        SimplexSolver solver = new SimplexSolver();
-        solver.optimize(f, constraints, GoalType.MAXIMIZE, false);
-    }
-
-    @Test
-    public void testRestrictVariablesToNonNegative() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 409, 523, 70, 204, 339 }, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] {    43,   56, 345,  56,    5 }, Relationship.LEQ,  4567456));
-        constraints.add(new LinearConstraint(new double[] {    12,   45,   7,  56,   23 }, Relationship.LEQ,    56454));
-        constraints.add(new LinearConstraint(new double[] {     8,  768,   0,  34, 7456 }, Relationship.LEQ,  1923421));
-        constraints.add(new LinearConstraint(new double[] { 12342, 2342,  34, 678, 2342 }, Relationship.GEQ,     4356));
-        constraints.add(new LinearConstraint(new double[] {    45,  678,  76,  52,   23 }, Relationship.EQ,    456356));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, true);
-        Assert.assertEquals(2902.92783505155, solution.getPoint()[0], .0000001);
-        Assert.assertEquals(480.419243986254, solution.getPoint()[1], .0000001);
-        Assert.assertEquals(0.0, solution.getPoint()[2], .0000001);
-        Assert.assertEquals(0.0, solution.getPoint()[3], .0000001);
-        Assert.assertEquals(0.0, solution.getPoint()[4], .0000001);
-        Assert.assertEquals(1438556.7491409, solution.getValue(), .0000001);
-    }
-
-    @Test
-    public void testEpsilon() {
-      LinearObjectiveFunction f =
-          new LinearObjectiveFunction(new double[] { 10, 5, 1 }, 0);
-      Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-      constraints.add(new LinearConstraint(new double[] {  9, 8, 0 }, Relationship.EQ,  17));
-      constraints.add(new LinearConstraint(new double[] {  0, 7, 8 }, Relationship.LEQ,  7));
-      constraints.add(new LinearConstraint(new double[] { 10, 0, 2 }, Relationship.LEQ, 10));
-
-      SimplexSolver solver = new SimplexSolver();
-      PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, false);
-      Assert.assertEquals(1.0, solution.getPoint()[0], 0.0);
-      Assert.assertEquals(1.0, solution.getPoint()[1], 0.0);
-      Assert.assertEquals(0.0, solution.getPoint()[2], 0.0);
-      Assert.assertEquals(15.0, solution.getValue(), 0.0);
-  }
-
-    @Test
-    public void testTrivialModel() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 1, 1 }, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.EQ,  0));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MAXIMIZE, true);
-        Assert.assertEquals(0, solution.getValue(), .0000001);
-    }
-
-    @Test
-    public void testLargeModel() {
-        double[] objective = new double[] {
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 12, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           12, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 12, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 12, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 12, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 12, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                           1, 1, 1, 1, 1, 1};
-
-        LinearObjectiveFunction f = new LinearObjectiveFunction(objective, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(equationFromString(objective.length, "x0 + x1 + x2 + x3 - x12 = 0"));
-        constraints.add(equationFromString(objective.length, "x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 - x13 = 0"));
-        constraints.add(equationFromString(objective.length, "x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 >= 49"));
-        constraints.add(equationFromString(objective.length, "x0 + x1 + x2 + x3 >= 42"));
-        constraints.add(equationFromString(objective.length, "x14 + x15 + x16 + x17 - x26 = 0"));
-        constraints.add(equationFromString(objective.length, "x18 + x19 + x20 + x21 + x22 + x23 + x24 + x25 - x27 = 0"));
-        constraints.add(equationFromString(objective.length, "x14 + x15 + x16 + x17 - x12 = 0"));
-        constraints.add(equationFromString(objective.length, "x18 + x19 + x20 + x21 + x22 + x23 + x24 + x25 - x13 = 0"));
-        constraints.add(equationFromString(objective.length, "x28 + x29 + x30 + x31 - x40 = 0"));
-        constraints.add(equationFromString(objective.length, "x32 + x33 + x34 + x35 + x36 + x37 + x38 + x39 - x41 = 0"));
-        constraints.add(equationFromString(objective.length, "x32 + x33 + x34 + x35 + x36 + x37 + x38 + x39 >= 49"));
-        constraints.add(equationFromString(objective.length, "x28 + x29 + x30 + x31 >= 42"));
-        constraints.add(equationFromString(objective.length, "x42 + x43 + x44 + x45 - x54 = 0"));
-        constraints.add(equationFromString(objective.length, "x46 + x47 + x48 + x49 + x50 + x51 + x52 + x53 - x55 = 0"));
-        constraints.add(equationFromString(objective.length, "x42 + x43 + x44 + x45 - x40 = 0"));
-        constraints.add(equationFromString(objective.length, "x46 + x47 + x48 + x49 + x50 + x51 + x52 + x53 - x41 = 0"));
-        constraints.add(equationFromString(objective.length, "x56 + x57 + x58 + x59 - x68 = 0"));
-        constraints.add(equationFromString(objective.length, "x60 + x61 + x62 + x63 + x64 + x65 + x66 + x67 - x69 = 0"));
-        constraints.add(equationFromString(objective.length, "x60 + x61 + x62 + x63 + x64 + x65 + x66 + x67 >= 51"));
-        constraints.add(equationFromString(objective.length, "x56 + x57 + x58 + x59 >= 44"));
-        constraints.add(equationFromString(objective.length, "x70 + x71 + x72 + x73 - x82 = 0"));
-        constraints.add(equationFromString(objective.length, "x74 + x75 + x76 + x77 + x78 + x79 + x80 + x81 - x83 = 0"));
-        constraints.add(equationFromString(objective.length, "x70 + x71 + x72 + x73 - x68 = 0"));
-        constraints.add(equationFromString(objective.length, "x74 + x75 + x76 + x77 + x78 + x79 + x80 + x81 - x69 = 0"));
-        constraints.add(equationFromString(objective.length, "x84 + x85 + x86 + x87 - x96 = 0"));
-        constraints.add(equationFromString(objective.length, "x88 + x89 + x90 + x91 + x92 + x93 + x94 + x95 - x97 = 0"));
-        constraints.add(equationFromString(objective.length, "x88 + x89 + x90 + x91 + x92 + x93 + x94 + x95 >= 51"));
-        constraints.add(equationFromString(objective.length, "x84 + x85 + x86 + x87 >= 44"));
-        constraints.add(equationFromString(objective.length, "x98 + x99 + x100 + x101 - x110 = 0"));
-        constraints.add(equationFromString(objective.length, "x102 + x103 + x104 + x105 + x106 + x107 + x108 + x109 - x111 = 0"));
-        constraints.add(equationFromString(objective.length, "x98 + x99 + x100 + x101 - x96 = 0"));
-        constraints.add(equationFromString(objective.length, "x102 + x103 + x104 + x105 + x106 + x107 + x108 + x109 - x97 = 0"));
-        constraints.add(equationFromString(objective.length, "x112 + x113 + x114 + x115 - x124 = 0"));
-        constraints.add(equationFromString(objective.length, "x116 + x117 + x118 + x119 + x120 + x121 + x122 + x123 - x125 = 0"));
-        constraints.add(equationFromString(objective.length, "x116 + x117 + x118 + x119 + x120 + x121 + x122 + x123 >= 49"));
-        constraints.add(equationFromString(objective.length, "x112 + x113 + x114 + x115 >= 42"));
-        constraints.add(equationFromString(objective.length, "x126 + x127 + x128 + x129 - x138 = 0"));
-        constraints.add(equationFromString(objective.length, "x130 + x131 + x132 + x133 + x134 + x135 + x136 + x137 - x139 = 0"));
-        constraints.add(equationFromString(objective.length, "x126 + x127 + x128 + x129 - x124 = 0"));
-        constraints.add(equationFromString(objective.length, "x130 + x131 + x132 + x133 + x134 + x135 + x136 + x137 - x125 = 0"));
-        constraints.add(equationFromString(objective.length, "x140 + x141 + x142 + x143 - x152 = 0"));
-        constraints.add(equationFromString(objective.length, "x144 + x145 + x146 + x147 + x148 + x149 + x150 + x151 - x153 = 0"));
-        constraints.add(equationFromString(objective.length, "x144 + x145 + x146 + x147 + x148 + x149 + x150 + x151 >= 59"));
-        constraints.add(equationFromString(objective.length, "x140 + x141 + x142 + x143 >= 42"));
-        constraints.add(equationFromString(objective.length, "x154 + x155 + x156 + x157 - x166 = 0"));
-        constraints.add(equationFromString(objective.length, "x158 + x159 + x160 + x161 + x162 + x163 + x164 + x165 - x167 = 0"));
-        constraints.add(equationFromString(objective.length, "x154 + x155 + x156 + x157 - x152 = 0"));
-        constraints.add(equationFromString(objective.length, "x158 + x159 + x160 + x161 + x162 + x163 + x164 + x165 - x153 = 0"));
-        constraints.add(equationFromString(objective.length, "x83 + x82 - x168 = 0"));
-        constraints.add(equationFromString(objective.length, "x111 + x110 - x169 = 0"));
-        constraints.add(equationFromString(objective.length, "x170 - x182 = 0"));
-        constraints.add(equationFromString(objective.length, "x171 - x183 = 0"));
-        constraints.add(equationFromString(objective.length, "x172 - x184 = 0"));
-        constraints.add(equationFromString(objective.length, "x173 - x185 = 0"));
-        constraints.add(equationFromString(objective.length, "x174 - x186 = 0"));
-        constraints.add(equationFromString(objective.length, "x175 + x176 - x187 = 0"));
-        constraints.add(equationFromString(objective.length, "x177 - x188 = 0"));
-        constraints.add(equationFromString(objective.length, "x178 - x189 = 0"));
-        constraints.add(equationFromString(objective.length, "x179 - x190 = 0"));
-        constraints.add(equationFromString(objective.length, "x180 - x191 = 0"));
-        constraints.add(equationFromString(objective.length, "x181 - x192 = 0"));
-        constraints.add(equationFromString(objective.length, "x170 - x26 = 0"));
-        constraints.add(equationFromString(objective.length, "x171 - x27 = 0"));
-        constraints.add(equationFromString(objective.length, "x172 - x54 = 0"));
-        constraints.add(equationFromString(objective.length, "x173 - x55 = 0"));
-        constraints.add(equationFromString(objective.length, "x174 - x168 = 0"));
-        constraints.add(equationFromString(objective.length, "x177 - x169 = 0"));
-        constraints.add(equationFromString(objective.length, "x178 - x138 = 0"));
-        constraints.add(equationFromString(objective.length, "x179 - x139 = 0"));
-        constraints.add(equationFromString(objective.length, "x180 - x166 = 0"));
-        constraints.add(equationFromString(objective.length, "x181 - x167 = 0"));
-        constraints.add(equationFromString(objective.length, "x193 - x205 = 0"));
-        constraints.add(equationFromString(objective.length, "x194 - x206 = 0"));
-        constraints.add(equationFromString(objective.length, "x195 - x207 = 0"));
-        constraints.add(equationFromString(objective.length, "x196 - x208 = 0"));
-        constraints.add(equationFromString(objective.length, "x197 - x209 = 0"));
-        constraints.add(equationFromString(objective.length, "x198 + x199 - x210 = 0"));
-        constraints.add(equationFromString(objective.length, "x200 - x211 = 0"));
-        constraints.add(equationFromString(objective.length, "x201 - x212 = 0"));
-        constraints.add(equationFromString(objective.length, "x202 - x213 = 0"));
-        constraints.add(equationFromString(objective.length, "x203 - x214 = 0"));
-        constraints.add(equationFromString(objective.length, "x204 - x215 = 0"));
-        constraints.add(equationFromString(objective.length, "x193 - x182 = 0"));
-        constraints.add(equationFromString(objective.length, "x194 - x183 = 0"));
-        constraints.add(equationFromString(objective.length, "x195 - x184 = 0"));
-        constraints.add(equationFromString(objective.length, "x196 - x185 = 0"));
-        constraints.add(equationFromString(objective.length, "x197 - x186 = 0"));
-        constraints.add(equationFromString(objective.length, "x198 + x199 - x187 = 0"));
-        constraints.add(equationFromString(objective.length, "x200 - x188 = 0"));
-        constraints.add(equationFromString(objective.length, "x201 - x189 = 0"));
-        constraints.add(equationFromString(objective.length, "x202 - x190 = 0"));
-        constraints.add(equationFromString(objective.length, "x203 - x191 = 0"));
-        constraints.add(equationFromString(objective.length, "x204 - x192 = 0"));
-
-        SimplexSolver solver = new SimplexSolver();
-        PointValuePair solution = solver.optimize(f, constraints, GoalType.MINIMIZE, true);
-        Assert.assertEquals(7518.0, solution.getValue(), .0000001);
-    }
-
-    /**
-     * Converts a test string to a {@link LinearConstraint}.
-     * Ex: x0 + x1 + x2 + x3 - x12 = 0
-     */
-    private LinearConstraint equationFromString(int numCoefficients, String s) {
-        Relationship relationship;
-        if (s.contains(">=")) {
-            relationship = Relationship.GEQ;
-        } else if (s.contains("<=")) {
-            relationship = Relationship.LEQ;
-        } else if (s.contains("=")) {
-            relationship = Relationship.EQ;
-        } else {
-            throw new IllegalArgumentException();
-        }
-
-        String[] equationParts = s.split("[>|<]?=");
-        double rhs = Double.parseDouble(equationParts[1].trim());
-
-        double[] lhs = new double[numCoefficients];
-        String left = equationParts[0].replaceAll(" ?x", "");
-        String[] coefficients = left.split(" ");
-        for (String coefficient : coefficients) {
-            double value = coefficient.charAt(0) == '-' ? -1 : 1;
-            int index = Integer.parseInt(coefficient.replaceFirst("[+|-]", "").trim());
-            lhs[index] = value;
-        }
-        return new LinearConstraint(lhs, relationship, rhs);
-    }
-
-    private static boolean validSolution(PointValuePair solution, List<LinearConstraint> constraints, double epsilon) {
-        double[] vals = solution.getPoint();
-        for (LinearConstraint c : constraints) {
-            double[] coeffs = c.getCoefficients().toArray();
-            double result = 0.0d;
-            for (int i = 0; i < vals.length; i++) {
-                result += vals[i] * coeffs[i];
-            }
-            
-            switch (c.getRelationship()) {
-            case EQ:
-                if (!Precision.equals(result, c.getValue(), epsilon)) {
-                    return false;
-                }
-                break;
-                
-            case GEQ:
-                if (Precision.compareTo(result, c.getValue(), epsilon) < 0) {
-                    return false;
-                }
-                break;
-                
-            case LEQ:
-                if (Precision.compareTo(result, c.getValue(), epsilon) > 0) {
-                    return false;
-                }
-                break;
-            }
-        }
-        
-        return true;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/linear/SimplexTableauTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/linear/SimplexTableauTest.java b/src/test/java/org/apache/commons/math4/optimization/linear/SimplexTableauTest.java
deleted file mode 100644
index 6b642bf..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/linear/SimplexTableauTest.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import java.util.ArrayList;
-import java.util.Collection;
-
-import org.apache.commons.math4.TestUtils;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.linear.LinearConstraint;
-import org.apache.commons.math4.optimization.linear.LinearObjectiveFunction;
-import org.apache.commons.math4.optimization.linear.Relationship;
-import org.apache.commons.math4.optimization.linear.SimplexTableau;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class SimplexTableauTest {
-
-    @Test
-    public void testInitialization() {
-        LinearObjectiveFunction f = createFunction();
-        Collection<LinearConstraint> constraints = createConstraints();
-        SimplexTableau tableau =
-            new SimplexTableau(f, constraints, GoalType.MAXIMIZE, false, 1.0e-6);
-        double[][] expectedInitialTableau = {
-                                             {-1, 0,  -1,  -1,  2, 0, 0, 0, -4},
-                                             { 0, 1, -15, -10, 25, 0, 0, 0,  0},
-                                             { 0, 0,   1,   0, -1, 1, 0, 0,  2},
-                                             { 0, 0,   0,   1, -1, 0, 1, 0,  3},
-                                             { 0, 0,   1,   1, -2, 0, 0, 1,  4}
-        };
-        assertMatrixEquals(expectedInitialTableau, tableau.getData());
-    }
-
-    @Test
-    public void testDropPhase1Objective() {
-        LinearObjectiveFunction f = createFunction();
-        Collection<LinearConstraint> constraints = createConstraints();
-        SimplexTableau tableau =
-            new SimplexTableau(f, constraints, GoalType.MAXIMIZE, false, 1.0e-6);
-        double[][] expectedTableau = {
-                                      { 1, -15, -10, 0, 0, 0, 0},
-                                      { 0,   1,   0, 1, 0, 0, 2},
-                                      { 0,   0,   1, 0, 1, 0, 3},
-                                      { 0,   1,   1, 0, 0, 1, 4}
-        };
-        tableau.dropPhase1Objective();
-        assertMatrixEquals(expectedTableau, tableau.getData());
-    }
-
-    @Test
-    public void testTableauWithNoArtificialVars() {
-        LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {15, 10}, 0);
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] {1, 0}, Relationship.LEQ, 2));
-        constraints.add(new LinearConstraint(new double[] {0, 1}, Relationship.LEQ, 3));
-        constraints.add(new LinearConstraint(new double[] {1, 1}, Relationship.LEQ, 4));
-        SimplexTableau tableau =
-            new SimplexTableau(f, constraints, GoalType.MAXIMIZE, false, 1.0e-6);
-        double[][] initialTableau = {
-                                     {1, -15, -10, 25, 0, 0, 0, 0},
-                                     {0,   1,   0, -1, 1, 0, 0, 2},
-                                     {0,   0,   1, -1, 0, 1, 0, 3},
-                                     {0,   1,   1, -2, 0, 0, 1, 4}
-        };
-        assertMatrixEquals(initialTableau, tableau.getData());
-    }
-
-    @Test
-    public void testSerial() {
-        LinearObjectiveFunction f = createFunction();
-        Collection<LinearConstraint> constraints = createConstraints();
-        SimplexTableau tableau =
-            new SimplexTableau(f, constraints, GoalType.MAXIMIZE, false, 1.0e-6);
-        Assert.assertEquals(tableau, TestUtils.serializeAndRecover(tableau));
-    }
-
-    private LinearObjectiveFunction createFunction() {
-        return new LinearObjectiveFunction(new double[] {15, 10}, 0);
-    }
-
-    private Collection<LinearConstraint> createConstraints() {
-        Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
-        constraints.add(new LinearConstraint(new double[] {1, 0}, Relationship.LEQ, 2));
-        constraints.add(new LinearConstraint(new double[] {0, 1}, Relationship.LEQ, 3));
-        constraints.add(new LinearConstraint(new double[] {1, 1}, Relationship.EQ, 4));
-        return constraints;
-    }
-
-    private void assertMatrixEquals(double[][] expected, double[][] result) {
-        Assert.assertEquals("Wrong number of rows.", expected.length, result.length);
-        for (int i = 0; i < expected.length; i++) {
-            Assert.assertEquals("Wrong number of columns.", expected[i].length, result[i].length);
-            for (int j = 0; j < expected[i].length; j++) {
-                Assert.assertEquals("Wrong value at position [" + i + "," + j + "]", expected[i][j], result[i][j], 1.0e-15);
-            }
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/univariate/BracketFinderTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/univariate/BracketFinderTest.java b/src/test/java/org/apache/commons/math4/optimization/univariate/BracketFinderTest.java
deleted file mode 100644
index d6e0a31..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/univariate/BracketFinderTest.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.univariate;
-
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.univariate.BracketFinder;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test for {@link BracketFinder}.
- */
-@Deprecated
-public class BracketFinderTest {
-
-    @Test
-    public void testCubicMin() {
-        final BracketFinder bFind = new BracketFinder();
-        final UnivariateFunction func = new UnivariateFunction() {
-                public double value(double x) {
-                    if (x < -2) {
-                        return value(-2);
-                    }
-                    else  {
-                        return (x - 1) * (x + 2) * (x + 3);
-                    }
-                }
-            };
-
-        bFind.search(func, GoalType.MINIMIZE, -2 , -1);
-        final double tol = 1e-15;
-        // Comparing with results computed in Python.
-        Assert.assertEquals(-2, bFind.getLo(), tol);
-        Assert.assertEquals(-1, bFind.getMid(), tol);
-        Assert.assertEquals(0.61803399999999997, bFind.getHi(), tol);
-    }
-
-    @Test
-    public void testCubicMax() {
-        final BracketFinder bFind = new BracketFinder();
-        final UnivariateFunction func = new UnivariateFunction() {
-                public double value(double x) {
-                    if (x < -2) {
-                        return value(-2);
-                    }
-                    else  {
-                        return -(x - 1) * (x + 2) * (x + 3);
-                    }
-                }
-            };
-
-        bFind.search(func, GoalType.MAXIMIZE, -2 , -1);
-        final double tol = 1e-15;
-        Assert.assertEquals(-2, bFind.getLo(), tol);
-        Assert.assertEquals(-1, bFind.getMid(), tol);
-        Assert.assertEquals(0.61803399999999997, bFind.getHi(), tol);
-    }
-
-    @Test
-    public void testMinimumIsOnIntervalBoundary() {
-        final UnivariateFunction func = new UnivariateFunction() {
-                public double value(double x) {
-                    return x * x;
-                }
-            };
-
-        final BracketFinder bFind = new BracketFinder();
-
-        bFind.search(func, GoalType.MINIMIZE, 0, 1);
-        Assert.assertTrue(bFind.getLo() <= 0);
-        Assert.assertTrue(0 <= bFind.getHi());
-
-        bFind.search(func, GoalType.MINIMIZE, -1, 0);
-        Assert.assertTrue(bFind.getLo() <= 0);
-        Assert.assertTrue(0 <= bFind.getHi());
-    }
-
-    @Test
-    public void testIntervalBoundsOrdering() {
-        final UnivariateFunction func = new UnivariateFunction() {
-                public double value(double x) {
-                    return x * x;
-                }
-            };
-
-        final BracketFinder bFind = new BracketFinder();
-
-        bFind.search(func, GoalType.MINIMIZE, -1, 1);
-        Assert.assertTrue(bFind.getLo() <= 0);
-        Assert.assertTrue(0 <= bFind.getHi());
-
-        bFind.search(func, GoalType.MINIMIZE, 1, -1);
-        Assert.assertTrue(bFind.getLo() <= 0);
-        Assert.assertTrue(0 <= bFind.getHi());
-
-        bFind.search(func, GoalType.MINIMIZE, 1, 2);
-        Assert.assertTrue(bFind.getLo() <= 0);
-        Assert.assertTrue(0 <= bFind.getHi());
-
-        bFind.search(func, GoalType.MINIMIZE, 2, 1);
-        Assert.assertTrue(bFind.getLo() <= 0);
-        Assert.assertTrue(0 <= bFind.getHi());
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/univariate/BrentOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/univariate/BrentOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/univariate/BrentOptimizerTest.java
deleted file mode 100644
index 18f71b9..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/univariate/BrentOptimizerTest.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.univariate;
-
-
-import org.apache.commons.math4.analysis.FunctionUtils;
-import org.apache.commons.math4.analysis.QuinticFunction;
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.analysis.function.Sin;
-import org.apache.commons.math4.analysis.function.StepFunction;
-import org.apache.commons.math4.exception.NumberIsTooLargeException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.univariate.BrentOptimizer;
-import org.apache.commons.math4.optimization.univariate.SimpleUnivariateValueChecker;
-import org.apache.commons.math4.optimization.univariate.UnivariateOptimizer;
-import org.apache.commons.math4.optimization.univariate.UnivariatePointValuePair;
-import org.apache.commons.math4.stat.descriptive.DescriptiveStatistics;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- */
-@Deprecated
-public final class BrentOptimizerTest {
-
-    @Test
-    public void testSinMin() {
-        UnivariateFunction f = new Sin();
-        UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-14);
-        Assert.assertEquals(3 * Math.PI / 2, optimizer.optimize(200, f, GoalType.MINIMIZE, 4, 5).getPoint(), 1e-8);
-        Assert.assertTrue(optimizer.getEvaluations() <= 50);
-        Assert.assertEquals(200, optimizer.getMaxEvaluations());
-        Assert.assertEquals(3 * Math.PI / 2, optimizer.optimize(200, f, GoalType.MINIMIZE, 1, 5).getPoint(), 1e-8);
-        Assert.assertTrue(optimizer.getEvaluations() <= 100);
-        Assert.assertTrue(optimizer.getEvaluations() >= 15);
-        try {
-            optimizer.optimize(10, f, GoalType.MINIMIZE, 4, 5);
-            Assert.fail("an exception should have been thrown");
-        } catch (TooManyEvaluationsException fee) {
-            // expected
-        }
-    }
-
-    @Test
-    public void testSinMinWithValueChecker() {
-        final UnivariateFunction f = new Sin();
-        final ConvergenceChecker<UnivariatePointValuePair> checker = new SimpleUnivariateValueChecker(1e-5, 1e-14);
-        // The default stopping criterion of Brent's algorithm should not
-        // pass, but the search will stop at the given relative tolerance
-        // for the function value.
-        final UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-14, checker);
-        final UnivariatePointValuePair result = optimizer.optimize(200, f, GoalType.MINIMIZE, 4, 5);
-        Assert.assertEquals(3 * Math.PI / 2, result.getPoint(), 1e-3);
-    }
-
-    @Test
-    public void testBoundaries() {
-        final double lower = -1.0;
-        final double upper = +1.0;
-        UnivariateFunction f = new UnivariateFunction() {            
-            public double value(double x) {
-                if (x < lower) {
-                    throw new NumberIsTooSmallException(x, lower, true);
-                } else if (x > upper) {
-                    throw new NumberIsTooLargeException(x, upper, true);
-                } else {
-                    return x;
-                }
-            }
-        };
-        UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-14);
-        Assert.assertEquals(lower,
-                            optimizer.optimize(100, f, GoalType.MINIMIZE, lower, upper).getPoint(),
-                            1.0e-8);
-        Assert.assertEquals(upper,
-                            optimizer.optimize(100, f, GoalType.MAXIMIZE, lower, upper).getPoint(),
-                            1.0e-8);
-    }
-
-    @Test
-    public void testQuinticMin() {
-        // The function has local minima at -0.27195613 and 0.82221643.
-        UnivariateFunction f = new QuinticFunction();
-        UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-14);
-        Assert.assertEquals(-0.27195613, optimizer.optimize(200, f, GoalType.MINIMIZE, -0.3, -0.2).getPoint(), 1.0e-8);
-        Assert.assertEquals( 0.82221643, optimizer.optimize(200, f, GoalType.MINIMIZE,  0.3,  0.9).getPoint(), 1.0e-8);
-        Assert.assertTrue(optimizer.getEvaluations() <= 50);
-
-        // search in a large interval
-        Assert.assertEquals(-0.27195613, optimizer.optimize(200, f, GoalType.MINIMIZE, -1.0, 0.2).getPoint(), 1.0e-8);
-        Assert.assertTrue(optimizer.getEvaluations() <= 50);
-    }
-
-    @Test
-    public void testQuinticMinStatistics() {
-        // The function has local minima at -0.27195613 and 0.82221643.
-        UnivariateFunction f = new QuinticFunction();
-        UnivariateOptimizer optimizer = new BrentOptimizer(1e-11, 1e-14);
-
-        final DescriptiveStatistics[] stat = new DescriptiveStatistics[2];
-        for (int i = 0; i < stat.length; i++) {
-            stat[i] = new DescriptiveStatistics();
-        }
-
-        final double min = -0.75;
-        final double max = 0.25;
-        final int nSamples = 200;
-        final double delta = (max - min) / nSamples;
-        for (int i = 0; i < nSamples; i++) {
-            final double start = min + i * delta;
-            stat[0].addValue(optimizer.optimize(40, f, GoalType.MINIMIZE, min, max, start).getPoint());
-            stat[1].addValue(optimizer.getEvaluations());
-        }
-
-        final double meanOptValue = stat[0].getMean();
-        final double medianEval = stat[1].getPercentile(50);
-        Assert.assertTrue(meanOptValue > -0.2719561281);
-        Assert.assertTrue(meanOptValue < -0.2719561280);
-        Assert.assertEquals(23, (int) medianEval);
-    }
-
-    @Test
-    public void testQuinticMax() {
-        // The quintic function has zeros at 0, +-0.5 and +-1.
-        // The function has a local maximum at 0.27195613.
-        UnivariateFunction f = new QuinticFunction();
-        UnivariateOptimizer optimizer = new BrentOptimizer(1e-12, 1e-14);
-        Assert.assertEquals(0.27195613, optimizer.optimize(100, f, GoalType.MAXIMIZE, 0.2, 0.3).getPoint(), 1e-8);
-        try {
-            optimizer.optimize(5, f, GoalType.MAXIMIZE, 0.2, 0.3);
-            Assert.fail("an exception should have been thrown");
-        } catch (TooManyEvaluationsException miee) {
-            // expected
-        }
-    }
-
-    @Test
-    public void testMinEndpoints() {
-        UnivariateFunction f = new Sin();
-        UnivariateOptimizer optimizer = new BrentOptimizer(1e-8, 1e-14);
-
-        // endpoint is minimum
-        double result = optimizer.optimize(50, f, GoalType.MINIMIZE, 3 * Math.PI / 2, 5).getPoint();
-        Assert.assertEquals(3 * Math.PI / 2, result, 1e-6);
-
-        result = optimizer.optimize(50, f, GoalType.MINIMIZE, 4, 3 * Math.PI / 2).getPoint();
-        Assert.assertEquals(3 * Math.PI / 2, result, 1e-6);
-    }
-
-    @Test
-    public void testMath832() {
-        final UnivariateFunction f = new UnivariateFunction() {
-                public double value(double x) {
-                    final double sqrtX = FastMath.sqrt(x);
-                    final double a = 1e2 * sqrtX;
-                    final double b = 1e6 / x;
-                    final double c = 1e4 / sqrtX;
-
-                    return a + b + c;
-                }
-            };
-
-        UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-8);
-        final double result = optimizer.optimize(1483,
-                                                 f,
-                                                 GoalType.MINIMIZE,
-                                                 Double.MIN_VALUE,
-                                                 Double.MAX_VALUE).getPoint();
-
-        Assert.assertEquals(804.9355825, result, 1e-6);
-    }
-
-    /**
-     * Contrived example showing that prior to the resolution of MATH-855
-     * (second revision), the algorithm would not return the best point if
-     * it happened to be the initial guess.
-     */
-    @Test
-    public void testKeepInitIfBest() {
-        final double minSin = 3 * Math.PI / 2;
-        final double offset = 1e-8;
-        final double delta = 1e-7;
-        final UnivariateFunction f1 = new Sin();
-        final UnivariateFunction f2 = new StepFunction(new double[] { minSin, minSin + offset, minSin + 2 * offset},
-                                                       new double[] { 0, -1, 0 });
-        final UnivariateFunction f = FunctionUtils.add(f1, f2);
-        // A slightly less stringent tolerance would make the test pass
-        // even with the previous implementation.
-        final double relTol = 1e-8;
-        final UnivariateOptimizer optimizer = new BrentOptimizer(relTol, 1e-100);
-        final double init = minSin + 1.5 * offset;
-        final UnivariatePointValuePair result
-            = optimizer.optimize(200, f, GoalType.MINIMIZE,
-                                 minSin - 6.789 * delta,
-                                 minSin + 9.876 * delta,
-                                 init);
-
-        final double sol = result.getPoint();
-        final double expected = init;
-
-//         System.out.println("numEval=" + numEval);
-//         System.out.println("min=" + init + " f=" + f.value(init));
-//         System.out.println("sol=" + sol + " f=" + f.value(sol));
-//         System.out.println("exp=" + expected + " f=" + f.value(expected));
-
-        Assert.assertTrue("Best point not reported", f.value(sol) <= f.value(expected));
-    }
-
-    /**
-     * Contrived example showing that prior to the resolution of MATH-855,
-     * the algorithm, by always returning the last evaluated point, would
-     * sometimes not report the best point it had found.
-     */
-    @Test
-    public void testMath855() {
-        final double minSin = 3 * Math.PI / 2;
-        final double offset = 1e-8;
-        final double delta = 1e-7;
-        final UnivariateFunction f1 = new Sin();
-        final UnivariateFunction f2 = new StepFunction(new double[] { minSin, minSin + offset, minSin + 5 * offset },
-                                                       new double[] { 0, -1, 0 });
-        final UnivariateFunction f = FunctionUtils.add(f1, f2);
-        final UnivariateOptimizer optimizer = new BrentOptimizer(1e-8, 1e-100);
-        final UnivariatePointValuePair result
-            = optimizer.optimize(200, f, GoalType.MINIMIZE,
-                                 minSin - 6.789 * delta,
-                                 minSin + 9.876 * delta);
-
-        final double sol = result.getPoint();
-        final double expected = 4.712389027602411;
-
-        // System.out.println("min=" + (minSin + offset) + " f=" + f.value(minSin + offset));
-        // System.out.println("sol=" + sol + " f=" + f.value(sol));
-        // System.out.println("exp=" + expected + " f=" + f.value(expected));
-
-        Assert.assertTrue("Best point not reported", f.value(sol) <= f.value(expected));
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/univariate/SimpleUnivariateValueCheckerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/univariate/SimpleUnivariateValueCheckerTest.java b/src/test/java/org/apache/commons/math4/optimization/univariate/SimpleUnivariateValueCheckerTest.java
deleted file mode 100644
index c9f44ad..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/univariate/SimpleUnivariateValueCheckerTest.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.univariate;
-
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.optimization.univariate.SimpleUnivariateValueChecker;
-import org.apache.commons.math4.optimization.univariate.UnivariatePointValuePair;
-import org.junit.Test;
-import org.junit.Assert;
-
-@Deprecated
-public class SimpleUnivariateValueCheckerTest {
-    @Test(expected=NotStrictlyPositiveException.class)
-    public void testIterationCheckPrecondition() {
-        new SimpleUnivariateValueChecker(1e-1, 1e-2, 0);
-    }
-
-    @Test
-    public void testIterationCheck() {
-        final int max = 10;
-        final SimpleUnivariateValueChecker checker = new SimpleUnivariateValueChecker(1e-1, 1e-2, max);
-        Assert.assertTrue(checker.converged(max, null, null)); 
-        Assert.assertTrue(checker.converged(max + 1, null, null));
-    }
-
-    @Test
-    public void testIterationCheckDisabled() {
-        final SimpleUnivariateValueChecker checker = new SimpleUnivariateValueChecker(1e-8, 1e-8);
-
-        final UnivariatePointValuePair a = new UnivariatePointValuePair(1d, 1d);
-        final UnivariatePointValuePair b = new UnivariatePointValuePair(10d, 10d);
-
-        Assert.assertFalse(checker.converged(-1, a, b));
-        Assert.assertFalse(checker.converged(0, a, b));
-        Assert.assertFalse(checker.converged(1000000, a, b));
-
-        Assert.assertTrue(checker.converged(-1, a, a));
-        Assert.assertTrue(checker.converged(-1, b, b));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/univariate/UnivariateMultiStartOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/univariate/UnivariateMultiStartOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/univariate/UnivariateMultiStartOptimizerTest.java
deleted file mode 100644
index ea4b4ab..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/univariate/UnivariateMultiStartOptimizerTest.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.univariate;
-
-import org.apache.commons.math4.analysis.QuinticFunction;
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.analysis.function.Sin;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.univariate.BrentOptimizer;
-import org.apache.commons.math4.optimization.univariate.UnivariateMultiStartOptimizer;
-import org.apache.commons.math4.optimization.univariate.UnivariateOptimizer;
-import org.apache.commons.math4.optimization.univariate.UnivariatePointValuePair;
-import org.apache.commons.math4.random.JDKRandomGenerator;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class UnivariateMultiStartOptimizerTest {
-
-    @Test
-    public void testSinMin() {
-        UnivariateFunction f = new Sin();
-        UnivariateOptimizer underlying = new BrentOptimizer(1e-10, 1e-14);
-        JDKRandomGenerator g = new JDKRandomGenerator();
-        g.setSeed(44428400075l);
-        UnivariateMultiStartOptimizer<UnivariateFunction> optimizer =
-            new UnivariateMultiStartOptimizer<UnivariateFunction>(underlying, 10, g);
-        optimizer.optimize(300, f, GoalType.MINIMIZE, -100.0, 100.0);
-        UnivariatePointValuePair[] optima = optimizer.getOptima();
-        for (int i = 1; i < optima.length; ++i) {
-            double d = (optima[i].getPoint() - optima[i-1].getPoint()) / (2 * FastMath.PI);
-            Assert.assertTrue(FastMath.abs(d - FastMath.rint(d)) < 1.0e-8);
-            Assert.assertEquals(-1.0, f.value(optima[i].getPoint()), 1.0e-10);
-            Assert.assertEquals(f.value(optima[i].getPoint()), optima[i].getValue(), 1.0e-10);
-        }
-        Assert.assertTrue(optimizer.getEvaluations() > 200);
-        Assert.assertTrue(optimizer.getEvaluations() < 300);
-    }
-
-    @Test
-    public void testQuinticMin() {
-        // The quintic function has zeros at 0, +-0.5 and +-1.
-        // The function has extrema (first derivative is zero) at 0.27195613 and 0.82221643,
-        UnivariateFunction f = new QuinticFunction();
-        UnivariateOptimizer underlying = new BrentOptimizer(1e-9, 1e-14);
-        JDKRandomGenerator g = new JDKRandomGenerator();
-        g.setSeed(4312000053L);
-        UnivariateMultiStartOptimizer<UnivariateFunction> optimizer =
-            new UnivariateMultiStartOptimizer<UnivariateFunction>(underlying, 5, g);
-
-        UnivariatePointValuePair optimum
-            = optimizer.optimize(300, f, GoalType.MINIMIZE, -0.3, -0.2);
-        Assert.assertEquals(-0.2719561293, optimum.getPoint(), 1e-9);
-        Assert.assertEquals(-0.0443342695, optimum.getValue(), 1e-9);
-
-        UnivariatePointValuePair[] optima = optimizer.getOptima();
-        for (int i = 0; i < optima.length; ++i) {
-            Assert.assertEquals(f.value(optima[i].getPoint()), optima[i].getValue(), 1e-9);
-        }
-        Assert.assertTrue(optimizer.getEvaluations() >= 50);
-        Assert.assertTrue(optimizer.getEvaluations() <= 100);
-    }
-
-    @Test
-    public void testBadFunction() {
-        UnivariateFunction f = new UnivariateFunction() {
-                public double value(double x) {
-                    if (x < 0) {
-                        throw new LocalException();
-                    }
-                    return 0;
-                }
-            };
-        UnivariateOptimizer underlying = new BrentOptimizer(1e-9, 1e-14);
-        JDKRandomGenerator g = new JDKRandomGenerator();
-        g.setSeed(4312000053L);
-        UnivariateMultiStartOptimizer<UnivariateFunction> optimizer =
-            new UnivariateMultiStartOptimizer<UnivariateFunction>(underlying, 5, g);
- 
-        try {
-            optimizer.optimize(300, f, GoalType.MINIMIZE, -0.3, -0.2);
-            Assert.fail();
-        } catch (LocalException e) {
-            // Expected.
-        }
-
-        // Ensure that the exception was thrown because no optimum was found.
-        Assert.assertTrue(optimizer.getOptima()[0] == null);
-    }
-
-    private static class LocalException extends RuntimeException {
-        private static final long serialVersionUID = 1194682757034350629L;
-    }
-
-}


[07/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/direct/CMAESOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/direct/CMAESOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/direct/CMAESOptimizerTest.java
deleted file mode 100644
index f8587ee..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/direct/CMAESOptimizerTest.java
+++ /dev/null
@@ -1,761 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.direct;
-
-import java.util.Arrays;
-import java.util.Random;
-
-import org.apache.commons.math4.Retry;
-import org.apache.commons.math4.RetryRunner;
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.NotPositiveException;
-import org.apache.commons.math4.exception.NumberIsTooLargeException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.exception.OutOfRangeException;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.InitialGuess;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleBounds;
-import org.apache.commons.math4.optimization.direct.CMAESOptimizer;
-import org.apache.commons.math4.random.MersenneTwister;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-
-/**
- * Test for {@link CMAESOptimizer}.
- */
-@Deprecated
-@RunWith(RetryRunner.class)
-public class CMAESOptimizerTest {
-
-    static final int DIM = 13;
-    static final int LAMBDA = 4 + (int)(3.*FastMath.log(DIM));
-   
-    @Test(expected = NumberIsTooLargeException.class)
-    public void testInitOutofbounds1() {
-        double[] startPoint = point(DIM,3);
-        double[] insigma = point(DIM, 0.3);
-        double[][] boundaries = boundaries(DIM,-1,2);
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-    @Test(expected = NumberIsTooSmallException.class)
-    public void testInitOutofbounds2() {
-        double[] startPoint = point(DIM, -2);
-        double[] insigma = point(DIM, 0.3);
-        double[][] boundaries = boundaries(DIM,-1,2);
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-    
-    @Test(expected = DimensionMismatchException.class)
-    public void testBoundariesDimensionMismatch() {
-        double[] startPoint = point(DIM,0.5);
-        double[] insigma = point(DIM, 0.3);
-        double[][] boundaries = boundaries(DIM+1,-1,2);
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test(expected = NotPositiveException.class)
-    public void testInputSigmaNegative() {
-        double[] startPoint = point(DIM,0.5);
-        double[] insigma = point(DIM,-0.5);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test(expected = OutOfRangeException.class)
-    public void testInputSigmaOutOfRange() {
-        double[] startPoint = point(DIM,0.5);
-        double[] insigma = point(DIM, 1.1);
-        double[][] boundaries = boundaries(DIM,-0.5,0.5);
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test(expected = DimensionMismatchException.class)
-    public void testInputSigmaDimensionMismatch() {
-        double[] startPoint = point(DIM,0.5);
-        double[] insigma = point(DIM + 1, 0.5);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-    
-    @Test
-    @Retry(3)
-    public void testRosen() {
-        double[] startPoint = point(DIM,0.1);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test
-    @Retry(3)
-    public void testMaximize() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),1.0);
-        doTest(new MinusElli(), startPoint, insigma, boundaries,
-                GoalType.MAXIMIZE, LAMBDA, true, 0, 1.0-1e-13,
-                2e-10, 5e-6, 100000, expected);
-        doTest(new MinusElli(), startPoint, insigma, boundaries,
-                GoalType.MAXIMIZE, LAMBDA, false, 0, 1.0-1e-13,
-                2e-10, 5e-6, 100000, expected);
-        boundaries = boundaries(DIM,-0.3,0.3); 
-        startPoint = point(DIM,0.1);
-        doTest(new MinusElli(), startPoint, insigma, boundaries,
-                GoalType.MAXIMIZE, LAMBDA, true, 0, 1.0-1e-13,
-                2e-10, 5e-6, 100000, expected);
-    }
-
-    @Test
-    public void testEllipse() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Elli(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-        doTest(new Elli(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test
-    public void testElliRotated() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new ElliRotated(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-        doTest(new ElliRotated(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test
-    public void testCigar() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Cigar(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 200000, expected);
-        doTest(new Cigar(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test
-    public void testCigarWithBoundaries() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = boundaries(DIM, -1e100, Double.POSITIVE_INFINITY);
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Cigar(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 200000, expected);
-        doTest(new Cigar(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test
-    public void testTwoAxes() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new TwoAxes(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 2*LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 200000, expected);
-        doTest(new TwoAxes(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 2*LAMBDA, false, 0, 1e-13,
-                1e-8, 1e-3, 200000, expected);
-    }
-
-    @Test
-    public void testCigTab() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.3);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new CigTab(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 5e-5, 100000, expected);
-        doTest(new CigTab(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
-                1e-13, 5e-5, 100000, expected);
-    }
-
-    @Test
-    public void testSphere() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Sphere(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-        doTest(new Sphere(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test
-    public void testTablet() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Tablet(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-        doTest(new Tablet(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test
-    public void testDiffPow() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new DiffPow(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 10, true, 0, 1e-13,
-                1e-8, 1e-1, 100000, expected);
-        doTest(new DiffPow(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 10, false, 0, 1e-13,
-                1e-8, 2e-1, 100000, expected);
-    }
-
-    @Test
-    public void testSsDiffPow() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new SsDiffPow(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 10, true, 0, 1e-13,
-                1e-4, 1e-1, 200000, expected);
-        doTest(new SsDiffPow(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 10, false, 0, 1e-13,
-                1e-4, 1e-1, 200000, expected);
-    }
-
-    @Test
-    public void testAckley() {
-        double[] startPoint = point(DIM,1.0);
-        double[] insigma = point(DIM,1.0);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Ackley(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 2*LAMBDA, true, 0, 1e-13,
-                1e-9, 1e-5, 100000, expected);
-        doTest(new Ackley(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 2*LAMBDA, false, 0, 1e-13,
-                1e-9, 1e-5, 100000, expected);
-    }
-
-    @Test
-    public void testRastrigin() {
-        double[] startPoint = point(DIM,0.1);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,0.0),0.0);
-        doTest(new Rastrigin(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, (int)(200*FastMath.sqrt(DIM)), true, 0, 1e-13,
-                1e-13, 1e-6, 200000, expected);
-        doTest(new Rastrigin(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, (int)(200*FastMath.sqrt(DIM)), false, 0, 1e-13,
-                1e-13, 1e-6, 200000, expected);
-    }
-
-    @Test
-    public void testConstrainedRosen() {
-        double[] startPoint = point(DIM, 0.1);
-        double[] insigma = point(DIM, 0.1);
-        double[][] boundaries = boundaries(DIM, -1, 2);
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 2*LAMBDA, true, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, 2*LAMBDA, false, 0, 1e-13,
-                1e-13, 1e-6, 100000, expected);
-    }
-
-    @Test
-    public void testDiagonalRosen() {
-        double[] startPoint = point(DIM,0.1);
-        double[] insigma = point(DIM,0.1);
-        double[][] boundaries = null;
-        PointValuePair expected =
-            new PointValuePair(point(DIM,1.0),0.0);
-        doTest(new Rosen(), startPoint, insigma, boundaries,
-                GoalType.MINIMIZE, LAMBDA, false, 1, 1e-13,
-                1e-10, 1e-4, 1000000, expected);
-     }
-
-    @Test
-    public void testMath864() {
-        final CMAESOptimizer optimizer = new CMAESOptimizer();
-        final MultivariateFunction fitnessFunction = new MultivariateFunction() {
-                public double value(double[] parameters) {
-                    final double target = 1;
-                    final double error = target - parameters[0];
-                    return error * error;
-                }
-            };
-
-        final double[] start = { 0 };
-        final double[] lower = { -1e6 };
-        final double[] upper = { 1.5 };
-        final double[] result = optimizer.optimize(10000, fitnessFunction, GoalType.MINIMIZE,
-                                                   start, lower, upper).getPoint();
-        Assert.assertTrue("Out of bounds (" + result[0] + " > " + upper[0] + ")",
-                          result[0] <= upper[0]);
-    }
-
-    /**
-     * Cf. MATH-867
-     */
-    @Test
-    public void testFitAccuracyDependsOnBoundary() {
-        final CMAESOptimizer optimizer = new CMAESOptimizer();
-        final MultivariateFunction fitnessFunction = new MultivariateFunction() {
-                public double value(double[] parameters) {
-                    final double target = 11.1;
-                    final double error = target - parameters[0];
-                    return error * error;
-                }
-            };
-
-        final double[] start = { 1 };
- 
-        // No bounds.
-        PointValuePair result = optimizer.optimize(100000, fitnessFunction, GoalType.MINIMIZE,
-                                                   start);
-        final double resNoBound = result.getPoint()[0];
-
-        // Optimum is near the lower bound.
-        final double[] lower = { -20 };
-        final double[] upper = { 5e16 };
-        result = optimizer.optimize(100000, fitnessFunction, GoalType.MINIMIZE,
-                                    start, lower, upper);
-        final double resNearLo = result.getPoint()[0];
-
-        // Optimum is near the upper bound.
-        lower[0] = -5e16;
-        upper[0] = 20;
-        result = optimizer.optimize(100000, fitnessFunction, GoalType.MINIMIZE,
-                                    start, lower, upper);
-        final double resNearHi = result.getPoint()[0];
-
-        // System.out.println("resNoBound=" + resNoBound +
-        //                    " resNearLo=" + resNearLo +
-        //                    " resNearHi=" + resNearHi);
-
-        // The two values currently differ by a substantial amount, indicating that
-        // the bounds definition can prevent reaching the optimum.
-        Assert.assertEquals(resNoBound, resNearLo, 1e-3);
-        Assert.assertEquals(resNoBound, resNearHi, 1e-3);
-    }
- 
-    /**
-     * @param func Function to optimize.
-     * @param startPoint Starting point.
-     * @param inSigma Individual input sigma.
-     * @param boundaries Upper / lower point limit.
-     * @param goal Minimization or maximization.
-     * @param lambda Population size used for offspring.
-     * @param isActive Covariance update mechanism.
-     * @param diagonalOnly Simplified covariance update.
-     * @param stopValue Termination criteria for optimization.
-     * @param fTol Tolerance relative error on the objective function.
-     * @param pointTol Tolerance for checking that the optimum is correct.
-     * @param maxEvaluations Maximum number of evaluations.
-     * @param expected Expected point / value.
-     */
-    private void doTest(MultivariateFunction func,
-            double[] startPoint,
-            double[] inSigma,
-            double[][] boundaries,
-            GoalType goal,
-            int lambda,
-            boolean isActive,
-            int diagonalOnly, 
-            double stopValue,
-            double fTol,
-            double pointTol,
-            int maxEvaluations,
-            PointValuePair expected) {
-        int dim = startPoint.length;
-        // test diagonalOnly = 0 - slow but normally fewer feval#
-        CMAESOptimizer optim = new CMAESOptimizer(30000, stopValue, isActive, diagonalOnly,
-                                                  0, new MersenneTwister(), false, null);
-        final double[] lB = boundaries == null ? null : boundaries[0];
-        final double[] uB = boundaries == null ? null : boundaries[1];
-        PointValuePair result = boundaries == null ?
-            optim.optimize(maxEvaluations, func, goal,
-                           new InitialGuess(startPoint),
-                           new CMAESOptimizer.Sigma(inSigma),
-                           new CMAESOptimizer.PopulationSize(lambda)) :
-            optim.optimize(maxEvaluations, func, goal,
-                           new InitialGuess(startPoint),
-                           new SimpleBounds(lB, uB),
-                           new CMAESOptimizer.Sigma(inSigma),
-                           new CMAESOptimizer.PopulationSize(lambda));
-        // System.out.println("sol=" + Arrays.toString(result.getPoint()));
-        Assert.assertEquals(expected.getValue(), result.getValue(), fTol);
-        for (int i = 0; i < dim; i++) {
-            Assert.assertEquals(expected.getPoint()[i], result.getPoint()[i], pointTol);
-        }
-    }
-
-    private static double[] point(int n, double value) {
-        double[] ds = new double[n];
-        Arrays.fill(ds, value);
-        return ds;
-    }
-
-    private static double[][] boundaries(int dim,
-            double lower, double upper) {
-        double[][] boundaries = new double[2][dim];
-        for (int i = 0; i < dim; i++)
-            boundaries[0][i] = lower;
-        for (int i = 0; i < dim; i++)
-            boundaries[1][i] = upper;
-        return boundaries;
-    }
-
-    private static class Sphere implements MultivariateFunction {
-
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length; ++i)
-                f += x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class Cigar implements MultivariateFunction {
-        private double factor;
-
-        Cigar() {
-            this(1e3);
-        }
-
-        Cigar(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = x[0] * x[0];
-            for (int i = 1; i < x.length; ++i)
-                f += factor * x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class Tablet implements MultivariateFunction {
-        private double factor;
-
-        Tablet() {
-            this(1e3);
-        }
-
-        Tablet(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = factor * x[0] * x[0];
-            for (int i = 1; i < x.length; ++i)
-                f += x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class CigTab implements MultivariateFunction {
-        private double factor;
-
-        CigTab() {
-            this(1e4);
-        }
-
-        CigTab(double axisratio) {
-            factor = axisratio;
-        }
-
-        public double value(double[] x) {
-            int end = x.length - 1;
-            double f = x[0] * x[0] / factor + factor * x[end] * x[end];
-            for (int i = 1; i < end; ++i)
-                f += x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class TwoAxes implements MultivariateFunction {
-
-        private double factor;
-
-        TwoAxes() {
-            this(1e6);
-        }
-
-        TwoAxes(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length; ++i)
-                f += (i < x.length / 2 ? factor : 1) * x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class ElliRotated implements MultivariateFunction {
-        private Basis B = new Basis();
-        private double factor;
-
-        ElliRotated() {
-            this(1e3);
-        }
-
-        ElliRotated(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            x = B.Rotate(x);
-            for (int i = 0; i < x.length; ++i)
-                f += FastMath.pow(factor, i / (x.length - 1.)) * x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class Elli implements MultivariateFunction {
-
-        private double factor;
-
-        Elli() {
-            this(1e3);
-        }
-
-        Elli(double axisratio) {
-            factor = axisratio * axisratio;
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length; ++i)
-                f += FastMath.pow(factor, i / (x.length - 1.)) * x[i] * x[i];
-            return f;
-        }
-    }
-
-    private static class MinusElli implements MultivariateFunction {
-
-        public double value(double[] x) {
-            return 1.0-(new Elli().value(x));
-        }
-    }
-
-    private static class DiffPow implements MultivariateFunction {
-
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length; ++i)
-                f += FastMath.pow(FastMath.abs(x[i]), 2. + 10 * (double) i
-                        / (x.length - 1.));
-            return f;
-        }
-    }
-
-    private static class SsDiffPow implements MultivariateFunction {
-
-        public double value(double[] x) {
-            double f = FastMath.pow(new DiffPow().value(x), 0.25);
-            return f;
-        }
-    }
-
-    private static class Rosen implements MultivariateFunction {
-
-        public double value(double[] x) {
-            double f = 0;
-            for (int i = 0; i < x.length - 1; ++i)
-                f += 1e2 * (x[i] * x[i] - x[i + 1]) * (x[i] * x[i] - x[i + 1])
-                + (x[i] - 1.) * (x[i] - 1.);
-            return f;
-        }
-    }
-
-    private static class Ackley implements MultivariateFunction {
-        private double axisratio;
-
-        Ackley(double axra) {
-            axisratio = axra;
-        }
-
-        public Ackley() {
-            this(1);
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            double res2 = 0;
-            double fac = 0;
-            for (int i = 0; i < x.length; ++i) {
-                fac = FastMath.pow(axisratio, (i - 1.) / (x.length - 1.));
-                f += fac * fac * x[i] * x[i];
-                res2 += FastMath.cos(2. * FastMath.PI * fac * x[i]);
-            }
-            f = (20. - 20. * FastMath.exp(-0.2 * FastMath.sqrt(f / x.length))
-                    + FastMath.exp(1.) - FastMath.exp(res2 / x.length));
-            return f;
-        }
-    }
-
-    private static class Rastrigin implements MultivariateFunction {
-
-        private double axisratio;
-        private double amplitude;
-
-        Rastrigin() {
-            this(1, 10);
-        }
-
-        Rastrigin(double axisratio, double amplitude) {
-            this.axisratio = axisratio;
-            this.amplitude = amplitude;
-        }
-
-        public double value(double[] x) {
-            double f = 0;
-            double fac;
-            for (int i = 0; i < x.length; ++i) {
-                fac = FastMath.pow(axisratio, (i - 1.) / (x.length - 1.));
-                if (i == 0 && x[i] < 0)
-                    fac *= 1.;
-                f += fac * fac * x[i] * x[i] + amplitude
-                * (1. - FastMath.cos(2. * FastMath.PI * fac * x[i]));
-            }
-            return f;
-        }
-    }
-
-    private static class Basis {
-        double[][] basis;
-        Random rand = new Random(2); // use not always the same basis
-
-        double[] Rotate(double[] x) {
-            GenBasis(x.length);
-            double[] y = new double[x.length];
-            for (int i = 0; i < x.length; ++i) {
-                y[i] = 0;
-                for (int j = 0; j < x.length; ++j)
-                    y[i] += basis[i][j] * x[j];
-            }
-            return y;
-        }
-
-        void GenBasis(int DIM) {
-            if (basis != null ? basis.length == DIM : false)
-                return;
-
-            double sp;
-            int i, j, k;
-
-            /* generate orthogonal basis */
-            basis = new double[DIM][DIM];
-            for (i = 0; i < DIM; ++i) {
-                /* sample components gaussian */
-                for (j = 0; j < DIM; ++j)
-                    basis[i][j] = rand.nextGaussian();
-                /* substract projection of previous vectors */
-                for (j = i - 1; j >= 0; --j) {
-                    for (sp = 0., k = 0; k < DIM; ++k)
-                        sp += basis[i][k] * basis[j][k]; /* scalar product */
-                    for (k = 0; k < DIM; ++k)
-                        basis[i][k] -= sp * basis[j][k]; /* substract */
-                }
-                /* normalize */
-                for (sp = 0., k = 0; k < DIM; ++k)
-                    sp += basis[i][k] * basis[i][k]; /* squared norm */
-                for (k = 0; k < DIM; ++k)
-                    basis[i][k] /= FastMath.sqrt(sp);
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionMappingAdapterTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionMappingAdapterTest.java b/src/test/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionMappingAdapterTest.java
deleted file mode 100644
index 76d9139..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionMappingAdapterTest.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.direct.MultivariateFunctionMappingAdapter;
-import org.apache.commons.math4.optimization.direct.NelderMeadSimplex;
-import org.apache.commons.math4.optimization.direct.SimplexOptimizer;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class MultivariateFunctionMappingAdapterTest {
-
-    @Test
-    public void testStartSimplexInsideRange() {
-
-        final BiQuadratic biQuadratic = new BiQuadratic(2.0, 2.5, 1.0, 3.0, 2.0, 3.0);
-        final MultivariateFunctionMappingAdapter wrapped =
-                new MultivariateFunctionMappingAdapter(biQuadratic,
-                                                           biQuadratic.getLower(),
-                                                           biQuadratic.getUpper());
-
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[][] {
-            wrapped.boundedToUnbounded(new double[] { 1.5, 2.75 }),
-            wrapped.boundedToUnbounded(new double[] { 1.5, 2.95 }),
-            wrapped.boundedToUnbounded(new double[] { 1.7, 2.90 })
-        }));
-
-        final PointValuePair optimum
-            = optimizer.optimize(300, wrapped, GoalType.MINIMIZE,
-                                 wrapped.boundedToUnbounded(new double[] { 1.5, 2.25 }));
-        final double[] bounded = wrapped.unboundedToBounded(optimum.getPoint());
-
-        Assert.assertEquals(biQuadratic.getBoundedXOptimum(), bounded[0], 2e-7);
-        Assert.assertEquals(biQuadratic.getBoundedYOptimum(), bounded[1], 2e-7);
-
-    }
-
-    @Test
-    public void testOptimumOutsideRange() {
-
-        final BiQuadratic biQuadratic = new BiQuadratic(4.0, 0.0, 1.0, 3.0, 2.0, 3.0);
-        final MultivariateFunctionMappingAdapter wrapped =
-                new MultivariateFunctionMappingAdapter(biQuadratic,
-                                                           biQuadratic.getLower(),
-                                                           biQuadratic.getUpper());
-
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[][] {
-            wrapped.boundedToUnbounded(new double[] { 1.5, 2.75 }),
-            wrapped.boundedToUnbounded(new double[] { 1.5, 2.95 }),
-            wrapped.boundedToUnbounded(new double[] { 1.7, 2.90 })
-        }));
-
-        final PointValuePair optimum
-            = optimizer.optimize(100, wrapped, GoalType.MINIMIZE,
-                                 wrapped.boundedToUnbounded(new double[] { 1.5, 2.25 }));
-        final double[] bounded = wrapped.unboundedToBounded(optimum.getPoint());
-
-        Assert.assertEquals(biQuadratic.getBoundedXOptimum(), bounded[0], 2e-7);
-        Assert.assertEquals(biQuadratic.getBoundedYOptimum(), bounded[1], 2e-7);
-
-    }
-
-    @Test
-    public void testUnbounded() {
-
-        final BiQuadratic biQuadratic = new BiQuadratic(4.0, 0.0,
-                                                        Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY,
-                                                        Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
-        final MultivariateFunctionMappingAdapter wrapped =
-                new MultivariateFunctionMappingAdapter(biQuadratic,
-                                                           biQuadratic.getLower(),
-                                                           biQuadratic.getUpper());
-
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[][] {
-            wrapped.boundedToUnbounded(new double[] { 1.5, 2.75 }),
-            wrapped.boundedToUnbounded(new double[] { 1.5, 2.95 }),
-            wrapped.boundedToUnbounded(new double[] { 1.7, 2.90 })
-        }));
-
-        final PointValuePair optimum
-            = optimizer.optimize(300, wrapped, GoalType.MINIMIZE,
-                                 wrapped.boundedToUnbounded(new double[] { 1.5, 2.25 }));
-        final double[] bounded = wrapped.unboundedToBounded(optimum.getPoint());
-
-        Assert.assertEquals(biQuadratic.getBoundedXOptimum(), bounded[0], 2e-7);
-        Assert.assertEquals(biQuadratic.getBoundedYOptimum(), bounded[1], 2e-7);
-
-    }
-
-    @Test
-    public void testHalfBounded() {
-
-        final BiQuadratic biQuadratic = new BiQuadratic(4.0, 4.0,
-                                                        1.0, Double.POSITIVE_INFINITY,
-                                                        Double.NEGATIVE_INFINITY, 3.0);
-        final MultivariateFunctionMappingAdapter wrapped =
-                new MultivariateFunctionMappingAdapter(biQuadratic,
-                                                           biQuadratic.getLower(),
-                                                           biQuadratic.getUpper());
-
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-13, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[][] {
-            wrapped.boundedToUnbounded(new double[] { 1.5, 2.75 }),
-            wrapped.boundedToUnbounded(new double[] { 1.5, 2.95 }),
-            wrapped.boundedToUnbounded(new double[] { 1.7, 2.90 })
-        }));
-
-        final PointValuePair optimum
-            = optimizer.optimize(200, wrapped, GoalType.MINIMIZE,
-                                 wrapped.boundedToUnbounded(new double[] { 1.5, 2.25 }));
-        final double[] bounded = wrapped.unboundedToBounded(optimum.getPoint());
-
-        Assert.assertEquals(biQuadratic.getBoundedXOptimum(), bounded[0], 1e-7);
-        Assert.assertEquals(biQuadratic.getBoundedYOptimum(), bounded[1], 1e-7);
-
-    }
-
-    private static class BiQuadratic implements MultivariateFunction {
-
-        private final double xOptimum;
-        private final double yOptimum;
-
-        private final double xMin;
-        private final double xMax;
-        private final double yMin;
-        private final double yMax;
-
-        public BiQuadratic(final double xOptimum, final double yOptimum,
-                           final double xMin, final double xMax,
-                           final double yMin, final double yMax) {
-            this.xOptimum = xOptimum;
-            this.yOptimum = yOptimum;
-            this.xMin     = xMin;
-            this.xMax     = xMax;
-            this.yMin     = yMin;
-            this.yMax     = yMax;
-        }
-
-        public double value(double[] point) {
-
-            // the function should never be called with out of range points
-            Assert.assertTrue(point[0] >= xMin);
-            Assert.assertTrue(point[0] <= xMax);
-            Assert.assertTrue(point[1] >= yMin);
-            Assert.assertTrue(point[1] <= yMax);
-
-            final double dx = point[0] - xOptimum;
-            final double dy = point[1] - yOptimum;
-            return dx * dx + dy * dy;
-
-        }
-
-        public double[] getLower() {
-            return new double[] { xMin, yMin };
-        }
-
-        public double[] getUpper() {
-            return new double[] { xMax, yMax };
-        }
-
-        public double getBoundedXOptimum() {
-            return (xOptimum < xMin) ? xMin : ((xOptimum > xMax) ? xMax : xOptimum);
-        }
-
-        public double getBoundedYOptimum() {
-            return (yOptimum < yMin) ? yMin : ((yOptimum > yMax) ? yMax : yOptimum);
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionPenaltyAdapterTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionPenaltyAdapterTest.java b/src/test/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionPenaltyAdapterTest.java
deleted file mode 100644
index 0080bca..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionPenaltyAdapterTest.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimplePointChecker;
-import org.apache.commons.math4.optimization.direct.MultivariateFunctionPenaltyAdapter;
-import org.apache.commons.math4.optimization.direct.NelderMeadSimplex;
-import org.apache.commons.math4.optimization.direct.SimplexOptimizer;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class MultivariateFunctionPenaltyAdapterTest {
-
-    @Test
-    public void testStartSimplexInsideRange() {
-
-        final BiQuadratic biQuadratic = new BiQuadratic(2.0, 2.5, 1.0, 3.0, 2.0, 3.0);
-        final MultivariateFunctionPenaltyAdapter wrapped =
-                new MultivariateFunctionPenaltyAdapter(biQuadratic,
-                                                           biQuadratic.getLower(),
-                                                           biQuadratic.getUpper(),
-                                                           1000.0, new double[] { 100.0, 100.0 });
-
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[] { 1.0, 0.5 }));
-
-        final PointValuePair optimum
-            = optimizer.optimize(300, wrapped, GoalType.MINIMIZE, new double[] { 1.5, 2.25 });
-
-        Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
-        Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
-
-    }
-
-    @Test
-    public void testStartSimplexOutsideRange() {
-
-        final BiQuadratic biQuadratic = new BiQuadratic(2.0, 2.5, 1.0, 3.0, 2.0, 3.0);
-        final MultivariateFunctionPenaltyAdapter wrapped =
-                new MultivariateFunctionPenaltyAdapter(biQuadratic,
-                                                           biQuadratic.getLower(),
-                                                           biQuadratic.getUpper(),
-                                                           1000.0, new double[] { 100.0, 100.0 });
-
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[] { 1.0, 0.5 }));
-
-        final PointValuePair optimum
-            = optimizer.optimize(300, wrapped, GoalType.MINIMIZE, new double[] { -1.5, 4.0 });
-
-        Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
-        Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
-
-    }
-
-    @Test
-    public void testOptimumOutsideRange() {
-
-        final BiQuadratic biQuadratic = new BiQuadratic(4.0, 0.0, 1.0, 3.0, 2.0, 3.0);
-        final MultivariateFunctionPenaltyAdapter wrapped =
-                new MultivariateFunctionPenaltyAdapter(biQuadratic,
-                                                           biQuadratic.getLower(),
-                                                           biQuadratic.getUpper(),
-                                                           1000.0, new double[] { 100.0, 100.0 });
-
-        SimplexOptimizer optimizer = new SimplexOptimizer(new SimplePointChecker<PointValuePair>(1.0e-11, 1.0e-20));
-        optimizer.setSimplex(new NelderMeadSimplex(new double[] { 1.0, 0.5 }));
-
-        final PointValuePair optimum
-            = optimizer.optimize(600, wrapped, GoalType.MINIMIZE, new double[] { -1.5, 4.0 });
-
-        Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
-        Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
-
-    }
-
-    @Test
-    public void testUnbounded() {
-
-        final BiQuadratic biQuadratic = new BiQuadratic(4.0, 0.0,
-                                                        Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY,
-                                                        Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
-        final MultivariateFunctionPenaltyAdapter wrapped =
-                new MultivariateFunctionPenaltyAdapter(biQuadratic,
-                                                           biQuadratic.getLower(),
-                                                           biQuadratic.getUpper(),
-                                                           1000.0, new double[] { 100.0, 100.0 });
-
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[] { 1.0, 0.5 }));
-
-        final PointValuePair optimum
-            = optimizer.optimize(300, wrapped, GoalType.MINIMIZE, new double[] { -1.5, 4.0 });
-
-        Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
-        Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
-
-    }
-
-    @Test
-    public void testHalfBounded() {
-
-        final BiQuadratic biQuadratic = new BiQuadratic(4.0, 4.0,
-                                                        1.0, Double.POSITIVE_INFINITY,
-                                                        Double.NEGATIVE_INFINITY, 3.0);
-        final MultivariateFunctionPenaltyAdapter wrapped =
-                new MultivariateFunctionPenaltyAdapter(biQuadratic,
-                                                           biQuadratic.getLower(),
-                                                           biQuadratic.getUpper(),
-                                                           1000.0, new double[] { 100.0, 100.0 });
-
-        SimplexOptimizer optimizer = new SimplexOptimizer(new SimplePointChecker<PointValuePair>(1.0e-10, 1.0e-20));
-        optimizer.setSimplex(new NelderMeadSimplex(new double[] { 1.0, 0.5 }));
-
-        final PointValuePair optimum
-            = optimizer.optimize(400, wrapped, GoalType.MINIMIZE, new double[] { -1.5, 4.0 });
-
-        Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
-        Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
-
-    }
-
-    private static class BiQuadratic implements MultivariateFunction {
-
-        private final double xOptimum;
-        private final double yOptimum;
-
-        private final double xMin;
-        private final double xMax;
-        private final double yMin;
-        private final double yMax;
-
-        public BiQuadratic(final double xOptimum, final double yOptimum,
-                           final double xMin, final double xMax,
-                           final double yMin, final double yMax) {
-            this.xOptimum = xOptimum;
-            this.yOptimum = yOptimum;
-            this.xMin     = xMin;
-            this.xMax     = xMax;
-            this.yMin     = yMin;
-            this.yMax     = yMax;
-        }
-
-        public double value(double[] point) {
-
-            // the function should never be called with out of range points
-            Assert.assertTrue(point[0] >= xMin);
-            Assert.assertTrue(point[0] <= xMax);
-            Assert.assertTrue(point[1] >= yMin);
-            Assert.assertTrue(point[1] <= yMax);
-
-            final double dx = point[0] - xOptimum;
-            final double dy = point[1] - yOptimum;
-            return dx * dx + dy * dy;
-
-        }
-
-        public double[] getLower() {
-            return new double[] { xMin, yMin };
-        }
-
-        public double[] getUpper() {
-            return new double[] { xMax, yMax };
-        }
-
-        public double getBoundedXOptimum() {
-            return (xOptimum < xMin) ? xMin : ((xOptimum > xMax) ? xMax : xOptimum);
-        }
-
-        public double getBoundedYOptimum() {
-            return (yOptimum < yMin) ? yMin : ((yOptimum > yMax) ? yMax : yOptimum);
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/direct/PowellOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/direct/PowellOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/direct/PowellOptimizerTest.java
deleted file mode 100644
index 227277f..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/direct/PowellOptimizerTest.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.direct;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.analysis.SumSincFunction;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.MultivariateOptimizer;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.direct.PowellOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test for {@link PowellOptimizer}.
- */
-@Deprecated
-public class PowellOptimizerTest {
-
-    @Test
-    public void testSumSinc() {
-        final MultivariateFunction func = new SumSincFunction(-1);
-
-        int dim = 2;
-        final double[] minPoint = new double[dim];
-        for (int i = 0; i < dim; i++) {
-            minPoint[i] = 0;
-        }
-
-        double[] init = new double[dim];
-
-        // Initial is minimum.
-        for (int i = 0; i < dim; i++) {
-            init[i] = minPoint[i];
-        }
-        doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-9);
-
-        // Initial is far from minimum.
-        for (int i = 0; i < dim; i++) {
-            init[i] = minPoint[i] + 3;
-        }
-        doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-5);
-        // More stringent line search tolerance enhances the precision
-        // of the result.
-        doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-9, 1e-7);
-    }
-
-    @Test
-    public void testQuadratic() {
-        final MultivariateFunction func = new MultivariateFunction() {
-                public double value(double[] x) {
-                    final double a = x[0] - 1;
-                    final double b = x[1] - 1;
-                    return a * a + b * b + 1;
-                }
-            };
-
-        int dim = 2;
-        final double[] minPoint = new double[dim];
-        for (int i = 0; i < dim; i++) {
-            minPoint[i] = 1;
-        }
-
-        double[] init = new double[dim];
-
-        // Initial is minimum.
-        for (int i = 0; i < dim; i++) {
-            init[i] = minPoint[i];
-        }
-        doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-8);
-
-        // Initial is far from minimum.
-        for (int i = 0; i < dim; i++) {
-            init[i] = minPoint[i] - 20;
-        }
-        doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-8);
-    }
-
-    @Test
-    public void testMaximizeQuadratic() {
-        final MultivariateFunction func = new MultivariateFunction() {
-                public double value(double[] x) {
-                    final double a = x[0] - 1;
-                    final double b = x[1] - 1;
-                    return -a * a - b * b + 1;
-                }
-            };
-
-        int dim = 2;
-        final double[] maxPoint = new double[dim];
-        for (int i = 0; i < dim; i++) {
-            maxPoint[i] = 1;
-        }
-
-        double[] init = new double[dim];
-
-        // Initial is minimum.
-        for (int i = 0; i < dim; i++) {
-            init[i] = maxPoint[i];
-        }
-        doTest(func, maxPoint, init,  GoalType.MAXIMIZE, 1e-9, 1e-8);
-
-        // Initial is far from minimum.
-        for (int i = 0; i < dim; i++) {
-            init[i] = maxPoint[i] - 20;
-        }
-        doTest(func, maxPoint, init, GoalType.MAXIMIZE, 1e-9, 1e-8);
-    }
-
-    /**
-     * Ensure that we do not increase the number of function evaluations when
-     * the function values are scaled up.
-     * Note that the tolerances parameters passed to the constructor must
-     * still hold sensible values because they are used to set the line search
-     * tolerances.
-     */
-    @Test
-    public void testRelativeToleranceOnScaledValues() {
-        final MultivariateFunction func = new MultivariateFunction() {
-                public double value(double[] x) {
-                    final double a = x[0] - 1;
-                    final double b = x[1] - 1;
-                    return a * a * FastMath.sqrt(FastMath.abs(a)) + b * b + 1;
-                }
-            };
-
-        int dim = 2;
-        final double[] minPoint = new double[dim];
-        for (int i = 0; i < dim; i++) {
-            minPoint[i] = 1;
-        }
-
-        double[] init = new double[dim];
-        // Initial is far from minimum.
-        for (int i = 0; i < dim; i++) {
-            init[i] = minPoint[i] - 20;
-        }
-
-        final double relTol = 1e-10;
-
-        final int maxEval = 1000;
-        // Very small absolute tolerance to rely solely on the relative
-        // tolerance as a stopping criterion
-        final MultivariateOptimizer optim = new PowellOptimizer(relTol, 1e-100);
-
-        final PointValuePair funcResult = optim.optimize(maxEval, func, GoalType.MINIMIZE, init);
-        final double funcValue = func.value(funcResult.getPoint());
-        final int funcEvaluations = optim.getEvaluations();
-
-        final double scale = 1e10;
-        final MultivariateFunction funcScaled = new MultivariateFunction() {
-                public double value(double[] x) {
-                    return scale * func.value(x);
-                }
-            };
-
-        final PointValuePair funcScaledResult = optim.optimize(maxEval, funcScaled, GoalType.MINIMIZE, init);
-        final double funcScaledValue = funcScaled.value(funcScaledResult.getPoint());
-        final int funcScaledEvaluations = optim.getEvaluations();
-
-        // Check that both minima provide the same objective funciton values,
-        // within the relative function tolerance.
-        Assert.assertEquals(1, funcScaledValue / (scale * funcValue), relTol);
-
-        // Check that the numbers of evaluations are the same.
-        Assert.assertEquals(funcEvaluations, funcScaledEvaluations);
-    }
-
-    /**
-     * @param func Function to optimize.
-     * @param optimum Expected optimum.
-     * @param init Starting point.
-     * @param goal Minimization or maximization.
-     * @param fTol Tolerance (relative error on the objective function) for
-     * "Powell" algorithm.
-     * @param pointTol Tolerance for checking that the optimum is correct.
-     */
-    private void doTest(MultivariateFunction func,
-                        double[] optimum,
-                        double[] init,
-                        GoalType goal,
-                        double fTol,
-                        double pointTol) {
-        final MultivariateOptimizer optim = new PowellOptimizer(fTol, Math.ulp(1d));
-
-        final PointValuePair result = optim.optimize(1000, func, goal, init);
-        final double[] point = result.getPoint();
-
-        for (int i = 0, dim = optimum.length; i < dim; i++) {
-            Assert.assertEquals("found[" + i + "]=" + point[i] + " value=" + result.getValue(),
-                                optimum[i], point[i], pointTol);
-        }
-    }
-
-    /**
-     * @param func Function to optimize.
-     * @param optimum Expected optimum.
-     * @param init Starting point.
-     * @param goal Minimization or maximization.
-     * @param fTol Tolerance (relative error on the objective function) for
-     * "Powell" algorithm.
-     * @param fLineTol Tolerance (relative error on the objective function)
-     * for the internal line search algorithm.
-     * @param pointTol Tolerance for checking that the optimum is correct.
-     */
-    private void doTest(MultivariateFunction func,
-                        double[] optimum,
-                        double[] init,
-                        GoalType goal,
-                        double fTol,
-                        double fLineTol,
-                        double pointTol) {
-        final MultivariateOptimizer optim = new PowellOptimizer(fTol, Math.ulp(1d),
-                                                                fLineTol, Math.ulp(1d));
-
-        final PointValuePair result = optim.optimize(1000, func, goal, init);
-        final double[] point = result.getPoint();
-
-        for (int i = 0, dim = optimum.length; i < dim; i++) {
-            Assert.assertEquals("found[" + i + "]=" + point[i] + " value=" + result.getValue(),
-                                optimum[i], point[i], pointTol);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/direct/SimplexOptimizerMultiDirectionalTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/direct/SimplexOptimizerMultiDirectionalTest.java b/src/test/java/org/apache/commons/math4/optimization/direct/SimplexOptimizerMultiDirectionalTest.java
deleted file mode 100644
index 2ae7eaf..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/direct/SimplexOptimizerMultiDirectionalTest.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleValueChecker;
-import org.apache.commons.math4.optimization.direct.MultiDirectionalSimplex;
-import org.apache.commons.math4.optimization.direct.SimplexOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class SimplexOptimizerMultiDirectionalTest {
-    @Test
-    public void testMinimize1() {
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-11, 1e-30);
-        optimizer.setSimplex(new MultiDirectionalSimplex(new double[] { 0.2, 0.2 }));
-        final FourExtrema fourExtrema = new FourExtrema();
-
-        final PointValuePair optimum
-            = optimizer.optimize(200, fourExtrema, GoalType.MINIMIZE, new double[] { -3, 0 });
-        Assert.assertEquals(fourExtrema.xM, optimum.getPoint()[0], 4e-6);
-        Assert.assertEquals(fourExtrema.yP, optimum.getPoint()[1], 3e-6);
-        Assert.assertEquals(fourExtrema.valueXmYp, optimum.getValue(), 8e-13);
-        Assert.assertTrue(optimizer.getEvaluations() > 120);
-        Assert.assertTrue(optimizer.getEvaluations() < 150);
-    }
-
-    @Test
-    public void testMinimize2() {
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-11, 1e-30);
-        optimizer.setSimplex(new MultiDirectionalSimplex(new double[] { 0.2, 0.2 }));
-        final FourExtrema fourExtrema = new FourExtrema();
-
-        final PointValuePair optimum
-            =  optimizer.optimize(200, fourExtrema, GoalType.MINIMIZE, new double[] { 1, 0 });
-        Assert.assertEquals(fourExtrema.xP, optimum.getPoint()[0], 2e-8);
-        Assert.assertEquals(fourExtrema.yM, optimum.getPoint()[1], 3e-6);
-        Assert.assertEquals(fourExtrema.valueXpYm, optimum.getValue(), 2e-12);
-        Assert.assertTrue(optimizer.getEvaluations() > 120);
-        Assert.assertTrue(optimizer.getEvaluations() < 150);
-    }
-
-    @Test
-    public void testMaximize1() {
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-11, 1e-30);
-        optimizer.setSimplex(new MultiDirectionalSimplex(new double[] { 0.2, 0.2 }));
-        final FourExtrema fourExtrema = new FourExtrema();
-
-        final PointValuePair optimum
-            = optimizer.optimize(200, fourExtrema, GoalType.MAXIMIZE, new double[] { -3.0, 0.0 });
-        Assert.assertEquals(fourExtrema.xM, optimum.getPoint()[0], 7e-7);
-        Assert.assertEquals(fourExtrema.yM, optimum.getPoint()[1], 3e-7);
-        Assert.assertEquals(fourExtrema.valueXmYm, optimum.getValue(), 2e-14);
-        Assert.assertTrue(optimizer.getEvaluations() > 120);
-        Assert.assertTrue(optimizer.getEvaluations() < 150);
-    }
-
-    @Test
-    public void testMaximize2() {
-        SimplexOptimizer optimizer = new SimplexOptimizer(new SimpleValueChecker(1e-15, 1e-30));
-        optimizer.setSimplex(new MultiDirectionalSimplex(new double[] { 0.2, 0.2 }));
-        final FourExtrema fourExtrema = new FourExtrema();
-
-        final PointValuePair optimum
-            = optimizer.optimize(200, fourExtrema, GoalType.MAXIMIZE, new double[] { 1, 0 });
-        Assert.assertEquals(fourExtrema.xP, optimum.getPoint()[0], 2e-8);
-        Assert.assertEquals(fourExtrema.yP, optimum.getPoint()[1], 3e-6);
-        Assert.assertEquals(fourExtrema.valueXpYp, optimum.getValue(), 2e-12);
-        Assert.assertTrue(optimizer.getEvaluations() > 180);
-        Assert.assertTrue(optimizer.getEvaluations() < 220);
-    }
-
-    @Test
-    public void testRosenbrock() {
-        MultivariateFunction rosenbrock =
-            new MultivariateFunction() {
-                public double value(double[] x) {
-                    ++count;
-                    double a = x[1] - x[0] * x[0];
-                    double b = 1.0 - x[0];
-                    return 100 * a * a + b * b;
-                }
-            };
-
-        count = 0;
-        SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
-        optimizer.setSimplex(new MultiDirectionalSimplex(new double[][] {
-                    { -1.2,  1.0 }, { 0.9, 1.2 } , {  3.5, -2.3 }
-                }));
-        PointValuePair optimum =
-            optimizer.optimize(100, rosenbrock, GoalType.MINIMIZE, new double[] { -1.2, 1 });
-
-        Assert.assertEquals(count, optimizer.getEvaluations());
-        Assert.assertTrue(optimizer.getEvaluations() > 50);
-        Assert.assertTrue(optimizer.getEvaluations() < 100);
-        Assert.assertTrue(optimum.getValue() > 1e-2);
-    }
-
-    @Test
-    public void testPowell() {
-        MultivariateFunction powell =
-            new MultivariateFunction() {
-                public double value(double[] x) {
-                    ++count;
-                    double a = x[0] + 10 * x[1];
-                    double b = x[2] - x[3];
-                    double c = x[1] - 2 * x[2];
-                    double d = x[0] - x[3];
-                    return a * a + 5 * b * b + c * c * c * c + 10 * d * d * d * d;
-                }
-            };
-
-        count = 0;
-        SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
-        optimizer.setSimplex(new MultiDirectionalSimplex(4));
-        PointValuePair optimum =
-            optimizer.optimize(1000, powell, GoalType.MINIMIZE, new double[] { 3, -1, 0, 1 });
-        Assert.assertEquals(count, optimizer.getEvaluations());
-        Assert.assertTrue(optimizer.getEvaluations() > 800);
-        Assert.assertTrue(optimizer.getEvaluations() < 900);
-        Assert.assertTrue(optimum.getValue() > 1e-2);
-    }
-
-    @Test
-    public void testMath283() {
-        // fails because MultiDirectional.iterateSimplex is looping forever
-        // the while(true) should be replaced with a convergence check
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-14, 1e-14);
-        optimizer.setSimplex(new MultiDirectionalSimplex(2));
-        final Gaussian2D function = new Gaussian2D(0, 0, 1);
-        PointValuePair estimate = optimizer.optimize(1000, function,
-                                                         GoalType.MAXIMIZE, function.getMaximumPosition());
-        final double EPSILON = 1e-5;
-        final double expectedMaximum = function.getMaximum();
-        final double actualMaximum = estimate.getValue();
-        Assert.assertEquals(expectedMaximum, actualMaximum, EPSILON);
-
-        final double[] expectedPosition = function.getMaximumPosition();
-        final double[] actualPosition = estimate.getPoint();
-        Assert.assertEquals(expectedPosition[0], actualPosition[0], EPSILON );
-        Assert.assertEquals(expectedPosition[1], actualPosition[1], EPSILON );
-    }
-
-    private static class FourExtrema implements MultivariateFunction {
-        // The following function has 4 local extrema.
-        final double xM = -3.841947088256863675365;
-        final double yM = -1.391745200270734924416;
-        final double xP =  0.2286682237349059125691;
-        final double yP = -yM;
-        final double valueXmYm = 0.2373295333134216789769; // Local maximum.
-        final double valueXmYp = -valueXmYm; // Local minimum.
-        final double valueXpYm = -0.7290400707055187115322; // Global minimum.
-        final double valueXpYp = -valueXpYm; // Global maximum.
-
-        public double value(double[] variables) {
-            final double x = variables[0];
-            final double y = variables[1];
-            return (x == 0 || y == 0) ? 0 :
-                FastMath.atan(x) * FastMath.atan(x + 2) * FastMath.atan(y) * FastMath.atan(y) / (x * y);
-        }
-    }
-
-    private static class Gaussian2D implements MultivariateFunction {
-        private final double[] maximumPosition;
-        private final double std;
-
-        public Gaussian2D(double xOpt, double yOpt, double std) {
-            maximumPosition = new double[] { xOpt, yOpt };
-            this.std = std;
-        }
-
-        public double getMaximum() {
-            return value(maximumPosition);
-        }
-
-        public double[] getMaximumPosition() {
-            return maximumPosition.clone();
-        }
-
-        public double value(double[] point) {
-            final double x = point[0], y = point[1];
-            final double twoS2 = 2.0 * std * std;
-            return 1.0 / (twoS2 * FastMath.PI) * FastMath.exp(-(x * x + y * y) / twoS2);
-        }
-    }
-
-    private int count;
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/direct/SimplexOptimizerNelderMeadTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/direct/SimplexOptimizerNelderMeadTest.java b/src/test/java/org/apache/commons/math4/optimization/direct/SimplexOptimizerNelderMeadTest.java
deleted file mode 100644
index 80a8476..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/direct/SimplexOptimizerNelderMeadTest.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.analysis.MultivariateVectorFunction;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.linear.Array2DRowRealMatrix;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.LeastSquaresConverter;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.direct.NelderMeadSimplex;
-import org.apache.commons.math4.optimization.direct.SimplexOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class SimplexOptimizerNelderMeadTest {
-    @Test
-    public void testMinimize1() {
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[] { 0.2, 0.2 }));
-        final FourExtrema fourExtrema = new FourExtrema();
-
-        final PointValuePair optimum
-            = optimizer.optimize(100, fourExtrema, GoalType.MINIMIZE, new double[] { -3, 0 });
-        Assert.assertEquals(fourExtrema.xM, optimum.getPoint()[0], 2e-7);
-        Assert.assertEquals(fourExtrema.yP, optimum.getPoint()[1], 2e-5);
-        Assert.assertEquals(fourExtrema.valueXmYp, optimum.getValue(), 6e-12);
-        Assert.assertTrue(optimizer.getEvaluations() > 60);
-        Assert.assertTrue(optimizer.getEvaluations() < 90);
-    }
-
-    @Test
-    public void testMinimize2() {
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[] { 0.2, 0.2 }));
-        final FourExtrema fourExtrema = new FourExtrema();
-
-        final PointValuePair optimum
-            = optimizer.optimize(100, fourExtrema, GoalType.MINIMIZE, new double[] { 1, 0 });
-        Assert.assertEquals(fourExtrema.xP, optimum.getPoint()[0], 5e-6);
-        Assert.assertEquals(fourExtrema.yM, optimum.getPoint()[1], 6e-6);
-        Assert.assertEquals(fourExtrema.valueXpYm, optimum.getValue(), 1e-11);
-        Assert.assertTrue(optimizer.getEvaluations() > 60);
-        Assert.assertTrue(optimizer.getEvaluations() < 90);
-    }
-
-    @Test
-    public void testMaximize1() {
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[] { 0.2, 0.2 }));
-        final FourExtrema fourExtrema = new FourExtrema();
-
-        final PointValuePair optimum
-            = optimizer.optimize(100, fourExtrema, GoalType.MAXIMIZE, new double[] { -3, 0 });
-        Assert.assertEquals(fourExtrema.xM, optimum.getPoint()[0], 1e-5);
-        Assert.assertEquals(fourExtrema.yM, optimum.getPoint()[1], 3e-6);
-        Assert.assertEquals(fourExtrema.valueXmYm, optimum.getValue(), 3e-12);
-        Assert.assertTrue(optimizer.getEvaluations() > 60);
-        Assert.assertTrue(optimizer.getEvaluations() < 90);
-    }
-
-    @Test
-    public void testMaximize2() {
-        SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[] { 0.2, 0.2 }));
-        final FourExtrema fourExtrema = new FourExtrema();
-
-        final PointValuePair optimum
-            = optimizer.optimize(100, fourExtrema, GoalType.MAXIMIZE, new double[] { 1, 0 });
-        Assert.assertEquals(fourExtrema.xP, optimum.getPoint()[0], 4e-6);
-        Assert.assertEquals(fourExtrema.yP, optimum.getPoint()[1], 5e-6);
-        Assert.assertEquals(fourExtrema.valueXpYp, optimum.getValue(), 7e-12);
-        Assert.assertTrue(optimizer.getEvaluations() > 60);
-        Assert.assertTrue(optimizer.getEvaluations() < 90);
-    }
-
-    @Test
-    public void testRosenbrock() {
-
-        Rosenbrock rosenbrock = new Rosenbrock();
-        SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
-        optimizer.setSimplex(new NelderMeadSimplex(new double[][] {
-                    { -1.2,  1 }, { 0.9, 1.2 } , {  3.5, -2.3 }
-                }));
-        PointValuePair optimum =
-            optimizer.optimize(100, rosenbrock, GoalType.MINIMIZE, new double[] { -1.2, 1 });
-
-        Assert.assertEquals(rosenbrock.getCount(), optimizer.getEvaluations());
-        Assert.assertTrue(optimizer.getEvaluations() > 40);
-        Assert.assertTrue(optimizer.getEvaluations() < 50);
-        Assert.assertTrue(optimum.getValue() < 8e-4);
-    }
-
-    @Test
-    public void testPowell() {
-
-        Powell powell = new Powell();
-        SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
-        optimizer.setSimplex(new NelderMeadSimplex(4));
-        PointValuePair optimum =
-            optimizer.optimize(200, powell, GoalType.MINIMIZE, new double[] { 3, -1, 0, 1 });
-        Assert.assertEquals(powell.getCount(), optimizer.getEvaluations());
-        Assert.assertTrue(optimizer.getEvaluations() > 110);
-        Assert.assertTrue(optimizer.getEvaluations() < 130);
-        Assert.assertTrue(optimum.getValue() < 2e-3);
-    }
-
-    @Test
-    public void testLeastSquares1() {
-
-        final RealMatrix factors =
-            new Array2DRowRealMatrix(new double[][] {
-                    { 1, 0 },
-                    { 0, 1 }
-                }, false);
-        LeastSquaresConverter ls = new LeastSquaresConverter(new MultivariateVectorFunction() {
-                public double[] value(double[] variables) {
-                    return factors.operate(variables);
-                }
-            }, new double[] { 2.0, -3.0 });
-        SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-6);
-        optimizer.setSimplex(new NelderMeadSimplex(2));
-        PointValuePair optimum =
-            optimizer.optimize(200, ls, GoalType.MINIMIZE, new double[] { 10, 10 });
-        Assert.assertEquals( 2, optimum.getPointRef()[0], 3e-5);
-        Assert.assertEquals(-3, optimum.getPointRef()[1], 4e-4);
-        Assert.assertTrue(optimizer.getEvaluations() > 60);
-        Assert.assertTrue(optimizer.getEvaluations() < 80);
-        Assert.assertTrue(optimum.getValue() < 1.0e-6);
-    }
-
-    @Test
-    public void testLeastSquares2() {
-
-        final RealMatrix factors =
-            new Array2DRowRealMatrix(new double[][] {
-                    { 1, 0 },
-                    { 0, 1 }
-                }, false);
-        LeastSquaresConverter ls = new LeastSquaresConverter(new MultivariateVectorFunction() {
-                public double[] value(double[] variables) {
-                    return factors.operate(variables);
-                }
-            }, new double[] { 2, -3 }, new double[] { 10, 0.1 });
-        SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-6);
-        optimizer.setSimplex(new NelderMeadSimplex(2));
-        PointValuePair optimum =
-            optimizer.optimize(200, ls, GoalType.MINIMIZE, new double[] { 10, 10 });
-        Assert.assertEquals( 2, optimum.getPointRef()[0], 5e-5);
-        Assert.assertEquals(-3, optimum.getPointRef()[1], 8e-4);
-        Assert.assertTrue(optimizer.getEvaluations() > 60);
-        Assert.assertTrue(optimizer.getEvaluations() < 80);
-        Assert.assertTrue(optimum.getValue() < 1e-6);
-    }
-
-    @Test
-    public void testLeastSquares3() {
-
-        final RealMatrix factors =
-            new Array2DRowRealMatrix(new double[][] {
-                    { 1, 0 },
-                    { 0, 1 }
-                }, false);
-        LeastSquaresConverter ls = new LeastSquaresConverter(new MultivariateVectorFunction() {
-                public double[] value(double[] variables) {
-                    return factors.operate(variables);
-                }
-            }, new double[] { 2, -3 }, new Array2DRowRealMatrix(new double [][] {
-                    { 1, 1.2 }, { 1.2, 2 }
-                }));
-        SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-6);
-        optimizer.setSimplex(new NelderMeadSimplex(2));
-        PointValuePair optimum =
-            optimizer.optimize(200, ls, GoalType.MINIMIZE, new double[] { 10, 10 });
-        Assert.assertEquals( 2, optimum.getPointRef()[0], 2e-3);
-        Assert.assertEquals(-3, optimum.getPointRef()[1], 8e-4);
-        Assert.assertTrue(optimizer.getEvaluations() > 60);
-        Assert.assertTrue(optimizer.getEvaluations() < 80);
-        Assert.assertTrue(optimum.getValue() < 1e-6);
-    }
-
-    @Test(expected = TooManyEvaluationsException.class)
-    public void testMaxIterations() {
-        Powell powell = new Powell();
-        SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
-        optimizer.setSimplex(new NelderMeadSimplex(4));
-        optimizer.optimize(20, powell, GoalType.MINIMIZE, new double[] { 3, -1, 0, 1 });
-    }
-
-    private static class FourExtrema implements MultivariateFunction {
-        // The following function has 4 local extrema.
-        final double xM = -3.841947088256863675365;
-        final double yM = -1.391745200270734924416;
-        final double xP =  0.2286682237349059125691;
-        final double yP = -yM;
-        final double valueXmYm = 0.2373295333134216789769; // Local maximum.
-        final double valueXmYp = -valueXmYm; // Local minimum.
-        final double valueXpYm = -0.7290400707055187115322; // Global minimum.
-        final double valueXpYp = -valueXpYm; // Global maximum.
-
-        public double value(double[] variables) {
-            final double x = variables[0];
-            final double y = variables[1];
-            return (x == 0 || y == 0) ? 0 :
-                FastMath.atan(x) * FastMath.atan(x + 2) * FastMath.atan(y) * FastMath.atan(y) / (x * y);
-        }
-    }
-
-    private static class Rosenbrock implements MultivariateFunction {
-        private int count;
-
-        public Rosenbrock() {
-            count = 0;
-        }
-
-        public double value(double[] x) {
-            ++count;
-            double a = x[1] - x[0] * x[0];
-            double b = 1.0 - x[0];
-            return 100 * a * a + b * b;
-        }
-
-        public int getCount() {
-            return count;
-        }
-    }
-
-    private static class Powell implements MultivariateFunction {
-        private int count;
-
-        public Powell() {
-            count = 0;
-        }
-
-        public double value(double[] x) {
-            ++count;
-            double a = x[0] + 10 * x[1];
-            double b = x[2] - x[3];
-            double c = x[1] - 2 * x[2];
-            double d = x[0] - x[3];
-            return a * a + 5 * b * b + c * c * c * c + 10 * d * d * d * d;
-        }
-
-        public int getCount() {
-            return count;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/fitting/CurveFitterTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/fitting/CurveFitterTest.java b/src/test/java/org/apache/commons/math4/optimization/fitting/CurveFitterTest.java
deleted file mode 100644
index 3857fc7..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/fitting/CurveFitterTest.java
+++ /dev/null
@@ -1,154 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.commons.math4.optimization.fitting;
-
-import org.apache.commons.math4.analysis.ParametricUnivariateFunction;
-import org.apache.commons.math4.optimization.fitting.CurveFitter;
-import org.apache.commons.math4.optimization.general.LevenbergMarquardtOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Test;
-
-@Deprecated
-public class CurveFitterTest {
-
-    @Test
-    public void testMath303() {
-
-        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
-        CurveFitter<ParametricUnivariateFunction> fitter = new CurveFitter<ParametricUnivariateFunction>(optimizer);
-        fitter.addObservedPoint(2.805d, 0.6934785852953367d);
-        fitter.addObservedPoint(2.74333333333333d, 0.6306772025518496d);
-        fitter.addObservedPoint(1.655d, 0.9474675497289684);
-        fitter.addObservedPoint(1.725d, 0.9013594835804194d);
-
-        ParametricUnivariateFunction sif = new SimpleInverseFunction();
-
-        double[] initialguess1 = new double[1];
-        initialguess1[0] = 1.0d;
-        Assert.assertEquals(1, fitter.fit(sif, initialguess1).length);
-
-        double[] initialguess2 = new double[2];
-        initialguess2[0] = 1.0d;
-        initialguess2[1] = .5d;
-        Assert.assertEquals(2, fitter.fit(sif, initialguess2).length);
-
-    }
-
-    @Test
-    public void testMath304() {
-
-        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
-        CurveFitter<ParametricUnivariateFunction> fitter = new CurveFitter<ParametricUnivariateFunction>(optimizer);
-        fitter.addObservedPoint(2.805d, 0.6934785852953367d);
-        fitter.addObservedPoint(2.74333333333333d, 0.6306772025518496d);
-        fitter.addObservedPoint(1.655d, 0.9474675497289684);
-        fitter.addObservedPoint(1.725d, 0.9013594835804194d);
-
-        ParametricUnivariateFunction sif = new SimpleInverseFunction();
-
-        double[] initialguess1 = new double[1];
-        initialguess1[0] = 1.0d;
-        Assert.assertEquals(1.6357215104109237, fitter.fit(sif, initialguess1)[0], 1.0e-14);
-
-        double[] initialguess2 = new double[1];
-        initialguess2[0] = 10.0d;
-        Assert.assertEquals(1.6357215104109237, fitter.fit(sif, initialguess1)[0], 1.0e-14);
-
-    }
-
-    @Test
-    public void testMath372() {
-        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
-        CurveFitter<ParametricUnivariateFunction> curveFitter = new CurveFitter<ParametricUnivariateFunction>(optimizer);
-
-        curveFitter.addObservedPoint( 15,  4443);
-        curveFitter.addObservedPoint( 31,  8493);
-        curveFitter.addObservedPoint( 62, 17586);
-        curveFitter.addObservedPoint(125, 30582);
-        curveFitter.addObservedPoint(250, 45087);
-        curveFitter.addObservedPoint(500, 50683);
-
-        ParametricUnivariateFunction f = new ParametricUnivariateFunction() {
-
-            public double value(double x, double ... parameters) {
-
-                double a = parameters[0];
-                double b = parameters[1];
-                double c = parameters[2];
-                double d = parameters[3];
-
-                return d + ((a - d) / (1 + FastMath.pow(x / c, b)));
-            }
-
-            public double[] gradient(double x, double ... parameters) {
-
-                double a = parameters[0];
-                double b = parameters[1];
-                double c = parameters[2];
-                double d = parameters[3];
-
-                double[] gradients = new double[4];
-                double den = 1 + FastMath.pow(x / c, b);
-
-                // derivative with respect to a
-                gradients[0] = 1 / den;
-
-                // derivative with respect to b
-                // in the reported (invalid) issue, there was a sign error here
-                gradients[1] = -((a - d) * FastMath.pow(x / c, b) * FastMath.log(x / c)) / (den * den);
-
-                // derivative with respect to c
-                gradients[2] = (b * FastMath.pow(x / c, b - 1) * (x / (c * c)) * (a - d)) / (den * den);
-
-                // derivative with respect to d
-                gradients[3] = 1 - (1 / den);
-
-                return gradients;
-
-            }
-        };
-
-        double[] initialGuess = new double[] { 1500, 0.95, 65, 35000 };
-        double[] estimatedParameters = curveFitter.fit(f, initialGuess);
-
-        Assert.assertEquals( 2411.00, estimatedParameters[0], 500.00);
-        Assert.assertEquals(    1.62, estimatedParameters[1],   0.04);
-        Assert.assertEquals(  111.22, estimatedParameters[2],   0.30);
-        Assert.assertEquals(55347.47, estimatedParameters[3], 300.00);
-        Assert.assertTrue(optimizer.getRMS() < 600.0);
-
-    }
-
-    private static class SimpleInverseFunction implements ParametricUnivariateFunction {
-
-        public double value(double x, double ... parameters) {
-            return parameters[0] / x + (parameters.length < 2 ? 0 : parameters[1]);
-        }
-
-        public double[] gradient(double x, double ... doubles) {
-            double[] gradientVector = new double[doubles.length];
-            gradientVector[0] = 1 / x;
-            if (doubles.length >= 2) {
-                gradientVector[1] = 1;
-            }
-            return gradientVector;
-        }
-    }
-
-}


[06/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/fitting/GaussianFitterTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/fitting/GaussianFitterTest.java b/src/test/java/org/apache/commons/math4/optimization/fitting/GaussianFitterTest.java
deleted file mode 100644
index ed38f60..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/fitting/GaussianFitterTest.java
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.fitting;
-
-import org.apache.commons.math4.exception.MathIllegalArgumentException;
-import org.apache.commons.math4.optimization.fitting.GaussianFitter;
-import org.apache.commons.math4.optimization.general.LevenbergMarquardtOptimizer;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Tests {@link GaussianFitter}.
- *
- * @since 2.2
- */
-@Deprecated
-public class GaussianFitterTest {
-    /** Good data. */
-    protected static final double[][] DATASET1 = new double[][] {
-        {4.0254623,  531026.0},
-        {4.02804905, 664002.0},
-        {4.02934242, 787079.0},
-        {4.03128248, 984167.0},
-        {4.03386923, 1294546.0},
-        {4.03580929, 1560230.0},
-        {4.03839603, 1887233.0},
-        {4.0396894,  2113240.0},
-        {4.04162946, 2375211.0},
-        {4.04421621, 2687152.0},
-        {4.04550958, 2862644.0},
-        {4.04744964, 3078898.0},
-        {4.05003639, 3327238.0},
-        {4.05132976, 3461228.0},
-        {4.05326982, 3580526.0},
-        {4.05585657, 3576946.0},
-        {4.05779662, 3439750.0},
-        {4.06038337, 3220296.0},
-        {4.06167674, 3070073.0},
-        {4.0636168,  2877648.0},
-        {4.06620355, 2595848.0},
-        {4.06749692, 2390157.0},
-        {4.06943698, 2175960.0},
-        {4.07202373, 1895104.0},
-        {4.0733171,  1687576.0},
-        {4.07525716, 1447024.0},
-        {4.0778439,  1130879.0},
-        {4.07978396, 904900.0},
-        {4.08237071, 717104.0},
-        {4.08366408, 620014.0}
-    };
-    /** Poor data: right of peak not symmetric with left of peak. */
-    protected static final double[][] DATASET2 = new double[][] {
-        {-20.15,   1523.0},
-        {-19.65,   1566.0},
-        {-19.15,   1592.0},
-        {-18.65,   1927.0},
-        {-18.15,   3089.0},
-        {-17.65,   6068.0},
-        {-17.15,  14239.0},
-        {-16.65,  34124.0},
-        {-16.15,  64097.0},
-        {-15.65, 110352.0},
-        {-15.15, 164742.0},
-        {-14.65, 209499.0},
-        {-14.15, 267274.0},
-        {-13.65, 283290.0},
-        {-13.15, 275363.0},
-        {-12.65, 258014.0},
-        {-12.15, 225000.0},
-        {-11.65, 200000.0},
-        {-11.15, 190000.0},
-        {-10.65, 185000.0},
-        {-10.15, 180000.0},
-        { -9.65, 179000.0},
-        { -9.15, 178000.0},
-        { -8.65, 177000.0},
-        { -8.15, 176000.0},
-        { -7.65, 175000.0},
-        { -7.15, 174000.0},
-        { -6.65, 173000.0},
-        { -6.15, 172000.0},
-        { -5.65, 171000.0},
-        { -5.15, 170000.0}
-    };
-    /** Poor data: long tails. */
-    protected static final double[][] DATASET3 = new double[][] {
-        {-90.15,   1513.0},
-        {-80.15,   1514.0},
-        {-70.15,   1513.0},
-        {-60.15,   1514.0},
-        {-50.15,   1513.0},
-        {-40.15,   1514.0},
-        {-30.15,   1513.0},
-        {-20.15,   1523.0},
-        {-19.65,   1566.0},
-        {-19.15,   1592.0},
-        {-18.65,   1927.0},
-        {-18.15,   3089.0},
-        {-17.65,   6068.0},
-        {-17.15,  14239.0},
-        {-16.65,  34124.0},
-        {-16.15,  64097.0},
-        {-15.65, 110352.0},
-        {-15.15, 164742.0},
-        {-14.65, 209499.0},
-        {-14.15, 267274.0},
-        {-13.65, 283290.0},
-        {-13.15, 275363.0},
-        {-12.65, 258014.0},
-        {-12.15, 214073.0},
-        {-11.65, 182244.0},
-        {-11.15, 136419.0},
-        {-10.65,  97823.0},
-        {-10.15,  58930.0},
-        { -9.65,  35404.0},
-        { -9.15,  16120.0},
-        { -8.65,   9823.0},
-        { -8.15,   5064.0},
-        { -7.65,   2575.0},
-        { -7.15,   1642.0},
-        { -6.65,   1101.0},
-        { -6.15,    812.0},
-        { -5.65,    690.0},
-        { -5.15,    565.0},
-        {  5.15,    564.0},
-        { 15.15,    565.0},
-        { 25.15,    564.0},
-        { 35.15,    565.0},
-        { 45.15,    564.0},
-        { 55.15,    565.0},
-        { 65.15,    564.0},
-        { 75.15,    565.0}
-    };
-    /** Poor data: right of peak is missing. */
-    protected static final double[][] DATASET4 = new double[][] {
-        {-20.15,   1523.0},
-        {-19.65,   1566.0},
-        {-19.15,   1592.0},
-        {-18.65,   1927.0},
-        {-18.15,   3089.0},
-        {-17.65,   6068.0},
-        {-17.15,  14239.0},
-        {-16.65,  34124.0},
-        {-16.15,  64097.0},
-        {-15.65, 110352.0},
-        {-15.15, 164742.0},
-        {-14.65, 209499.0},
-        {-14.15, 267274.0},
-        {-13.65, 283290.0}
-    };
-    /** Good data, but few points. */
-    protected static final double[][] DATASET5 = new double[][] {
-        {4.0254623,  531026.0},
-        {4.03128248, 984167.0},
-        {4.03839603, 1887233.0},
-        {4.04421621, 2687152.0},
-        {4.05132976, 3461228.0},
-        {4.05326982, 3580526.0},
-        {4.05779662, 3439750.0},
-        {4.0636168,  2877648.0},
-        {4.06943698, 2175960.0},
-        {4.07525716, 1447024.0},
-        {4.08237071, 717104.0},
-        {4.08366408, 620014.0}
-    };
-
-    /**
-     * Basic.
-     */
-    @Test
-    public void testFit01() {
-        GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
-        addDatasetToGaussianFitter(DATASET1, fitter);
-        double[] parameters = fitter.fit();
-
-        Assert.assertEquals(3496978.1837704973, parameters[0], 1e-4);
-        Assert.assertEquals(4.054933085999146, parameters[1], 1e-4);
-        Assert.assertEquals(0.015039355620304326, parameters[2], 1e-4);
-    }
-
-    /**
-     * Zero points is not enough observed points.
-     */
-    @Test(expected=MathIllegalArgumentException.class)
-    public void testFit02() {
-        GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
-        fitter.fit();
-    }
-    
-    /**
-     * Two points is not enough observed points.
-     */
-    @Test(expected=MathIllegalArgumentException.class)
-    public void testFit03() {
-        GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
-        addDatasetToGaussianFitter(new double[][] {
-            {4.0254623,  531026.0},
-            {4.02804905, 664002.0}},
-            fitter);
-        fitter.fit();
-    }
-    
-    /**
-     * Poor data: right of peak not symmetric with left of peak.
-     */
-    @Test
-    public void testFit04() {
-        GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
-        addDatasetToGaussianFitter(DATASET2, fitter);
-        double[] parameters = fitter.fit();
-
-        Assert.assertEquals(233003.2967252038, parameters[0], 1e-4);
-        Assert.assertEquals(-10.654887521095983, parameters[1], 1e-4);
-        Assert.assertEquals(4.335937353196641, parameters[2], 1e-4);
-    }  
-    
-    /**
-     * Poor data: long tails.
-     */
-    @Test
-    public void testFit05() {
-        GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
-        addDatasetToGaussianFitter(DATASET3, fitter);
-        double[] parameters = fitter.fit();
-
-        Assert.assertEquals(283863.81929180305, parameters[0], 1e-4);
-        Assert.assertEquals(-13.29641995105174, parameters[1], 1e-4);
-        Assert.assertEquals(1.7297330293549908, parameters[2], 1e-4);
-    }
-    
-    /**
-     * Poor data: right of peak is missing.
-     */
-    @Test
-    public void testFit06() {
-        GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
-        addDatasetToGaussianFitter(DATASET4, fitter);
-        double[] parameters = fitter.fit();
-
-        Assert.assertEquals(285250.66754309234, parameters[0], 1e-4);
-        Assert.assertEquals(-13.528375695228455, parameters[1], 1e-4);
-        Assert.assertEquals(1.5204344894331614, parameters[2], 1e-4);
-    }    
-
-    /**
-     * Basic with smaller dataset.
-     */
-    @Test
-    public void testFit07() {
-        GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
-        addDatasetToGaussianFitter(DATASET5, fitter);
-        double[] parameters = fitter.fit();
-
-        Assert.assertEquals(3514384.729342235, parameters[0], 1e-4);
-        Assert.assertEquals(4.054970307455625, parameters[1], 1e-4);
-        Assert.assertEquals(0.015029412832160017, parameters[2], 1e-4);
-    }
-
-    @Test
-    public void testMath519() {
-        // The optimizer will try negative sigma values but "GaussianFitter"
-        // will catch the raised exceptions and return NaN values instead.
-
-        final double[] data = { 
-            1.1143831578403364E-29,
-            4.95281403484594E-28,
-            1.1171347211930288E-26,
-            1.7044813962636277E-25,
-            1.9784716574832164E-24,
-            1.8630236407866774E-23,
-            1.4820532905097742E-22,
-            1.0241963854632831E-21,
-            6.275077366673128E-21,
-            3.461808994532493E-20,
-            1.7407124684715706E-19,
-            8.056687953553974E-19,
-            3.460193945992071E-18,
-            1.3883326374011525E-17,
-            5.233894983671116E-17,
-            1.8630791465263745E-16,
-            6.288759227922111E-16,
-            2.0204433920597856E-15,
-            6.198768938576155E-15,
-            1.821419346860626E-14,
-            5.139176445538471E-14,
-            1.3956427429045787E-13,
-            3.655705706448139E-13,
-            9.253753324779779E-13,
-            2.267636001476696E-12,
-            5.3880460095836855E-12,
-            1.2431632654852931E-11
-        };
-
-        GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
-        for (int i = 0; i < data.length; i++) {
-            fitter.addObservedPoint(i, data[i]);
-        }
-        final double[] p = fitter.fit();
-
-        Assert.assertEquals(53.1572792, p[1], 1e-7);
-        Assert.assertEquals(5.75214622, p[2], 1e-8);
-    }
-
-    @Test
-    public void testMath798() {
-        final GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
-
-        // When the data points are not commented out below, the fit stalls.
-        // This is expected however, since the whole dataset hardly looks like
-        // a Gaussian.
-        // When commented out, the fit proceeds fine.
-
-        fitter.addObservedPoint(0.23, 395.0);
-        //fitter.addObservedPoint(0.68, 0.0);
-        fitter.addObservedPoint(1.14, 376.0);
-        //fitter.addObservedPoint(1.59, 0.0);
-        fitter.addObservedPoint(2.05, 163.0);
-        //fitter.addObservedPoint(2.50, 0.0);
-        fitter.addObservedPoint(2.95, 49.0);
-        //fitter.addObservedPoint(3.41, 0.0);
-        fitter.addObservedPoint(3.86, 16.0);
-        //fitter.addObservedPoint(4.32, 0.0);
-        fitter.addObservedPoint(4.77, 1.0);
-
-        final double[] p = fitter.fit();
-
-        // Values are copied from a previous run of this test.
-        Assert.assertEquals(420.8397296167364, p[0], 1e-12);
-        Assert.assertEquals(0.603770729862231, p[1], 1e-15);
-        Assert.assertEquals(1.0786447936766612, p[2], 1e-14);
-    }
-    
-    /**
-     * Adds the specified points to specified <code>GaussianFitter</code>
-     * instance.
-     *
-     * @param points data points where first dimension is a point index and
-     *        second dimension is an array of length two representing the point
-     *        with the first value corresponding to X and the second value
-     *        corresponding to Y
-     * @param fitter fitter to which the points in <code>points</code> should be
-     *        added as observed points
-     */
-    protected static void addDatasetToGaussianFitter(double[][] points,
-                                                     GaussianFitter fitter) {
-        for (int i = 0; i < points.length; i++) {
-            fitter.addObservedPoint(points[i][0], points[i][1]);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/fitting/HarmonicFitterTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/fitting/HarmonicFitterTest.java b/src/test/java/org/apache/commons/math4/optimization/fitting/HarmonicFitterTest.java
deleted file mode 100644
index 31a9bcc..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/fitting/HarmonicFitterTest.java
+++ /dev/null
@@ -1,203 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.commons.math4.optimization.fitting;
-
-import java.util.Random;
-
-import org.apache.commons.math4.analysis.function.HarmonicOscillator;
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.optimization.fitting.HarmonicFitter;
-import org.apache.commons.math4.optimization.fitting.WeightedObservedPoint;
-import org.apache.commons.math4.optimization.general.LevenbergMarquardtOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.MathUtils;
-import org.junit.Test;
-import org.junit.Assert;
-
-@Deprecated
-public class HarmonicFitterTest {
-    @Test(expected=NumberIsTooSmallException.class)
-    public void testPreconditions1() {
-        HarmonicFitter fitter =
-            new HarmonicFitter(new LevenbergMarquardtOptimizer());
-
-        fitter.fit();
-    }
-
-    // This test fails (throwing "ConvergenceException" instead).
-//     @Test(expected=ZeroException.class)
-//     public void testPreconditions2() {
-//         HarmonicFitter fitter =
-//             new HarmonicFitter(new LevenbergMarquardtOptimizer());
-
-//         final double x = 1.2;
-//         fitter.addObservedPoint(1, x, 1);
-//         fitter.addObservedPoint(1, x, -1);
-//         fitter.addObservedPoint(1, x, 0.5);
-//         fitter.addObservedPoint(1, x, 0);
-
-//         final double[] fitted = fitter.fit();
-//     }
-
-    @Test
-    public void testNoError() {
-        final double a = 0.2;
-        final double w = 3.4;
-        final double p = 4.1;
-        HarmonicOscillator f = new HarmonicOscillator(a, w, p);
-
-        HarmonicFitter fitter =
-            new HarmonicFitter(new LevenbergMarquardtOptimizer());
-        for (double x = 0.0; x < 1.3; x += 0.01) {
-            fitter.addObservedPoint(1, x, f.value(x));
-        }
-
-        final double[] fitted = fitter.fit();
-        Assert.assertEquals(a, fitted[0], 1.0e-13);
-        Assert.assertEquals(w, fitted[1], 1.0e-13);
-        Assert.assertEquals(p, MathUtils.normalizeAngle(fitted[2], p), 1e-13);
-
-        HarmonicOscillator ff = new HarmonicOscillator(fitted[0], fitted[1], fitted[2]);
-
-        for (double x = -1.0; x < 1.0; x += 0.01) {
-            Assert.assertTrue(FastMath.abs(f.value(x) - ff.value(x)) < 1e-13);
-        }
-    }
-
-    @Test
-    public void test1PercentError() {
-        Random randomizer = new Random(64925784252l);
-        final double a = 0.2;
-        final double w = 3.4;
-        final double p = 4.1;
-        HarmonicOscillator f = new HarmonicOscillator(a, w, p);
-
-        HarmonicFitter fitter =
-            new HarmonicFitter(new LevenbergMarquardtOptimizer());
-        for (double x = 0.0; x < 10.0; x += 0.1) {
-            fitter.addObservedPoint(1, x,
-                                    f.value(x) + 0.01 * randomizer.nextGaussian());
-        }
-
-        final double[] fitted = fitter.fit();
-        Assert.assertEquals(a, fitted[0], 7.6e-4);
-        Assert.assertEquals(w, fitted[1], 2.7e-3);
-        Assert.assertEquals(p, MathUtils.normalizeAngle(fitted[2], p), 1.3e-2);
-    }
-
-    @Test
-    public void testTinyVariationsData() {
-        Random randomizer = new Random(64925784252l);
-
-        HarmonicFitter fitter =
-            new HarmonicFitter(new LevenbergMarquardtOptimizer());
-        for (double x = 0.0; x < 10.0; x += 0.1) {
-            fitter.addObservedPoint(1, x, 1e-7 * randomizer.nextGaussian());
-        }
-
-        fitter.fit();
-        // This test serves to cover the part of the code of "guessAOmega"
-        // when the algorithm using integrals fails.
-    }
-
-    @Test
-    public void testInitialGuess() {
-        Random randomizer = new Random(45314242l);
-        final double a = 0.2;
-        final double w = 3.4;
-        final double p = 4.1;
-        HarmonicOscillator f = new HarmonicOscillator(a, w, p);
-
-        HarmonicFitter fitter =
-            new HarmonicFitter(new LevenbergMarquardtOptimizer());
-        for (double x = 0.0; x < 10.0; x += 0.1) {
-            fitter.addObservedPoint(1, x,
-                                    f.value(x) + 0.01 * randomizer.nextGaussian());
-        }
-
-        final double[] fitted = fitter.fit(new double[] { 0.15, 3.6, 4.5 });
-        Assert.assertEquals(a, fitted[0], 1.2e-3);
-        Assert.assertEquals(w, fitted[1], 3.3e-3);
-        Assert.assertEquals(p, MathUtils.normalizeAngle(fitted[2], p), 1.7e-2);
-    }
-
-    @Test
-    public void testUnsorted() {
-        Random randomizer = new Random(64925784252l);
-        final double a = 0.2;
-        final double w = 3.4;
-        final double p = 4.1;
-        HarmonicOscillator f = new HarmonicOscillator(a, w, p);
-
-        HarmonicFitter fitter =
-            new HarmonicFitter(new LevenbergMarquardtOptimizer());
-
-        // build a regularly spaced array of measurements
-        int size = 100;
-        double[] xTab = new double[size];
-        double[] yTab = new double[size];
-        for (int i = 0; i < size; ++i) {
-            xTab[i] = 0.1 * i;
-            yTab[i] = f.value(xTab[i]) + 0.01 * randomizer.nextGaussian();
-        }
-
-        // shake it
-        for (int i = 0; i < size; ++i) {
-            int i1 = randomizer.nextInt(size);
-            int i2 = randomizer.nextInt(size);
-            double xTmp = xTab[i1];
-            double yTmp = yTab[i1];
-            xTab[i1] = xTab[i2];
-            yTab[i1] = yTab[i2];
-            xTab[i2] = xTmp;
-            yTab[i2] = yTmp;
-        }
-
-        // pass it to the fitter
-        for (int i = 0; i < size; ++i) {
-            fitter.addObservedPoint(1, xTab[i], yTab[i]);
-        }
-
-        final double[] fitted = fitter.fit();
-        Assert.assertEquals(a, fitted[0], 7.6e-4);
-        Assert.assertEquals(w, fitted[1], 3.5e-3);
-        Assert.assertEquals(p, MathUtils.normalizeAngle(fitted[2], p), 1.5e-2);
-    }
-
-    @Test(expected=MathIllegalStateException.class)
-    public void testMath844() {
-        final double[] y = { 0, 1, 2, 3, 2, 1,
-                             0, -1, -2, -3, -2, -1,
-                             0, 1, 2, 3, 2, 1,
-                             0, -1, -2, -3, -2, -1,
-                             0, 1, 2, 3, 2, 1, 0 };
-        final int len = y.length;
-        final WeightedObservedPoint[] points = new WeightedObservedPoint[len];
-        for (int i = 0; i < len; i++) {
-            points[i] = new WeightedObservedPoint(1, i, y[i]);
-        }
-
-        // The guesser fails because the function is far from an harmonic
-        // function: It is a triangular periodic function with amplitude 3
-        // and period 12, and all sample points are taken at integer abscissae
-        // so function values all belong to the integer subset {-3, -2, -1, 0,
-        // 1, 2, 3}.
-        new HarmonicFitter.ParameterGuesser(points);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/fitting/PolynomialFitterTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/fitting/PolynomialFitterTest.java b/src/test/java/org/apache/commons/math4/optimization/fitting/PolynomialFitterTest.java
deleted file mode 100644
index 5f87c6f..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/fitting/PolynomialFitterTest.java
+++ /dev/null
@@ -1,288 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.commons.math4.optimization.fitting;
-
-import java.util.Random;
-
-import org.apache.commons.math4.TestUtils;
-import org.apache.commons.math4.analysis.polynomials.PolynomialFunction;
-import org.apache.commons.math4.analysis.polynomials.PolynomialFunction.Parametric;
-import org.apache.commons.math4.distribution.RealDistribution;
-import org.apache.commons.math4.distribution.UniformRealDistribution;
-import org.apache.commons.math4.exception.ConvergenceException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.optimization.DifferentiableMultivariateVectorOptimizer;
-import org.apache.commons.math4.optimization.SimpleVectorValueChecker;
-import org.apache.commons.math4.optimization.fitting.CurveFitter;
-import org.apache.commons.math4.optimization.fitting.PolynomialFitter;
-import org.apache.commons.math4.optimization.general.GaussNewtonOptimizer;
-import org.apache.commons.math4.optimization.general.LevenbergMarquardtOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test for class {@link CurveFitter} where the function to fit is a
- * polynomial.
- */
-@Deprecated
-public class PolynomialFitterTest {
-    @Test
-    public void testFit() {
-        final RealDistribution rng = new UniformRealDistribution(-100, 100);
-        rng.reseedRandomGenerator(64925784252L);
-
-        final LevenbergMarquardtOptimizer optim = new LevenbergMarquardtOptimizer();
-        final PolynomialFitter fitter = new PolynomialFitter(optim);
-        final double[] coeff = { 12.9, -3.4, 2.1 }; // 12.9 - 3.4 x + 2.1 x^2
-        final PolynomialFunction f = new PolynomialFunction(coeff);
-
-        // Collect data from a known polynomial.
-        for (int i = 0; i < 100; i++) {
-            final double x = rng.sample();
-            fitter.addObservedPoint(x, f.value(x));
-        }
-
-        // Start fit from initial guesses that are far from the optimal values.
-        final double[] best = fitter.fit(new double[] { -1e-20, 3e15, -5e25 });
-
-        TestUtils.assertEquals("best != coeff", coeff, best, 1e-12);
-    }
-
-    @Test
-    public void testNoError() {
-        Random randomizer = new Random(64925784252l);
-        for (int degree = 1; degree < 10; ++degree) {
-            PolynomialFunction p = buildRandomPolynomial(degree, randomizer);
-
-            PolynomialFitter fitter = new PolynomialFitter(new LevenbergMarquardtOptimizer());
-            for (int i = 0; i <= degree; ++i) {
-                fitter.addObservedPoint(1.0, i, p.value(i));
-            }
-
-            final double[] init = new double[degree + 1];
-            PolynomialFunction fitted = new PolynomialFunction(fitter.fit(init));
-
-            for (double x = -1.0; x < 1.0; x += 0.01) {
-                double error = FastMath.abs(p.value(x) - fitted.value(x)) /
-                               (1.0 + FastMath.abs(p.value(x)));
-                Assert.assertEquals(0.0, error, 1.0e-6);
-            }
-        }
-    }
-
-    @Test
-    public void testSmallError() {
-        Random randomizer = new Random(53882150042l);
-        double maxError = 0;
-        for (int degree = 0; degree < 10; ++degree) {
-            PolynomialFunction p = buildRandomPolynomial(degree, randomizer);
-
-            PolynomialFitter fitter = new PolynomialFitter(new LevenbergMarquardtOptimizer());
-            for (double x = -1.0; x < 1.0; x += 0.01) {
-                fitter.addObservedPoint(1.0, x,
-                                        p.value(x) + 0.1 * randomizer.nextGaussian());
-            }
-
-            final double[] init = new double[degree + 1];
-            PolynomialFunction fitted = new PolynomialFunction(fitter.fit(init));
-
-            for (double x = -1.0; x < 1.0; x += 0.01) {
-                double error = FastMath.abs(p.value(x) - fitted.value(x)) /
-                              (1.0 + FastMath.abs(p.value(x)));
-                maxError = FastMath.max(maxError, error);
-                Assert.assertTrue(FastMath.abs(error) < 0.1);
-            }
-        }
-        Assert.assertTrue(maxError > 0.01);
-    }
-
-    @Test
-    public void testMath798() {
-        final double tol = 1e-14;
-        final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(tol, tol);
-        final double[] init = new double[] { 0, 0 };
-        final int maxEval = 3;
-
-        final double[] lm = doMath798(new LevenbergMarquardtOptimizer(checker), maxEval, init);
-        final double[] gn = doMath798(new GaussNewtonOptimizer(checker), maxEval, init);
-
-        for (int i = 0; i <= 1; i++) {
-            Assert.assertEquals(lm[i], gn[i], tol);
-        }
-    }
-
-    /**
-     * This test shows that the user can set the maximum number of iterations
-     * to avoid running for too long.
-     * But in the test case, the real problem is that the tolerance is way too
-     * stringent.
-     */
-    @Test(expected=TooManyEvaluationsException.class)
-    public void testMath798WithToleranceTooLow() {
-        final double tol = 1e-100;
-        final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(tol, tol);
-        final double[] init = new double[] { 0, 0 };
-        final int maxEval = 10000; // Trying hard to fit.
-
-        doMath798(new GaussNewtonOptimizer(checker), maxEval, init);
-    }
-
-    /**
-     * This test shows that the user can set the maximum number of iterations
-     * to avoid running for too long.
-     * Even if the real problem is that the tolerance is way too stringent, it
-     * is possible to get the best solution so far, i.e. a checker will return
-     * the point when the maximum iteration count has been reached.
-     */
-    @Test
-    public void testMath798WithToleranceTooLowButNoException() {
-        final double tol = 1e-100;
-        final double[] init = new double[] { 0, 0 };
-        final int maxEval = 10000; // Trying hard to fit.
-        final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(tol, tol, maxEval);
-
-        final double[] lm = doMath798(new LevenbergMarquardtOptimizer(checker), maxEval, init);
-        final double[] gn = doMath798(new GaussNewtonOptimizer(checker), maxEval, init);
-
-        for (int i = 0; i <= 1; i++) {
-            Assert.assertEquals(lm[i], gn[i], 1e-15);
-        }
-    }
-
-    /**
-     * @param optimizer Optimizer.
-     * @param maxEval Maximum number of function evaluations.
-     * @param init First guess.
-     * @return the solution found by the given optimizer.
-     */
-    private double[] doMath798(DifferentiableMultivariateVectorOptimizer optimizer,
-                               int maxEval,
-                               double[] init) {
-        final CurveFitter<Parametric> fitter = new CurveFitter<Parametric>(optimizer);
-
-        fitter.addObservedPoint(-0.2, -7.12442E-13);
-        fitter.addObservedPoint(-0.199, -4.33397E-13);
-        fitter.addObservedPoint(-0.198, -2.823E-13);
-        fitter.addObservedPoint(-0.197, -1.40405E-13);
-        fitter.addObservedPoint(-0.196, -7.80821E-15);
-        fitter.addObservedPoint(-0.195, 6.20484E-14);
-        fitter.addObservedPoint(-0.194, 7.24673E-14);
-        fitter.addObservedPoint(-0.193, 1.47152E-13);
-        fitter.addObservedPoint(-0.192, 1.9629E-13);
-        fitter.addObservedPoint(-0.191, 2.12038E-13);
-        fitter.addObservedPoint(-0.19, 2.46906E-13);
-        fitter.addObservedPoint(-0.189, 2.77495E-13);
-        fitter.addObservedPoint(-0.188, 2.51281E-13);
-        fitter.addObservedPoint(-0.187, 2.64001E-13);
-        fitter.addObservedPoint(-0.186, 2.8882E-13);
-        fitter.addObservedPoint(-0.185, 3.13604E-13);
-        fitter.addObservedPoint(-0.184, 3.14248E-13);
-        fitter.addObservedPoint(-0.183, 3.1172E-13);
-        fitter.addObservedPoint(-0.182, 3.12912E-13);
-        fitter.addObservedPoint(-0.181, 3.06761E-13);
-        fitter.addObservedPoint(-0.18, 2.8559E-13);
-        fitter.addObservedPoint(-0.179, 2.86806E-13);
-        fitter.addObservedPoint(-0.178, 2.985E-13);
-        fitter.addObservedPoint(-0.177, 2.67148E-13);
-        fitter.addObservedPoint(-0.176, 2.94173E-13);
-        fitter.addObservedPoint(-0.175, 3.27528E-13);
-        fitter.addObservedPoint(-0.174, 3.33858E-13);
-        fitter.addObservedPoint(-0.173, 2.97511E-13);
-        fitter.addObservedPoint(-0.172, 2.8615E-13);
-        fitter.addObservedPoint(-0.171, 2.84624E-13);
-
-        final double[] coeff = fitter.fit(maxEval,
-                                          new PolynomialFunction.Parametric(),
-                                          init);
-        return coeff;
-    }
-
-    @Test
-    public void testRedundantSolvable() {
-        // Levenberg-Marquardt should handle redundant information gracefully
-        checkUnsolvableProblem(new LevenbergMarquardtOptimizer(), true);
-    }
-
-    @Test
-    public void testRedundantUnsolvable() {
-        // Gauss-Newton should not be able to solve redundant information
-        checkUnsolvableProblem(new GaussNewtonOptimizer(true, new SimpleVectorValueChecker(1e-15, 1e-15)), false);
-    }
-
-    @Test
-    public void testLargeSample() {
-        Random randomizer = new Random(0x5551480dca5b369bl);
-        double maxError = 0;
-        for (int degree = 0; degree < 10; ++degree) {
-            PolynomialFunction p = buildRandomPolynomial(degree, randomizer);
-
-            PolynomialFitter fitter = new PolynomialFitter(new LevenbergMarquardtOptimizer());
-            for (int i = 0; i < 40000; ++i) {
-                double x = -1.0 + i / 20000.0;
-                fitter.addObservedPoint(1.0, x,
-                                        p.value(x) + 0.1 * randomizer.nextGaussian());
-            }
-
-            final double[] init = new double[degree + 1];
-            PolynomialFunction fitted = new PolynomialFunction(fitter.fit(init));
-
-            for (double x = -1.0; x < 1.0; x += 0.01) {
-                double error = FastMath.abs(p.value(x) - fitted.value(x)) /
-                              (1.0 + FastMath.abs(p.value(x)));
-                maxError = FastMath.max(maxError, error);
-                Assert.assertTrue(FastMath.abs(error) < 0.01);
-            }
-        }
-        Assert.assertTrue(maxError > 0.001);
-    }
-
-    private void checkUnsolvableProblem(DifferentiableMultivariateVectorOptimizer optimizer,
-                                        boolean solvable) {
-        Random randomizer = new Random(1248788532l);
-        for (int degree = 0; degree < 10; ++degree) {
-            PolynomialFunction p = buildRandomPolynomial(degree, randomizer);
-
-            PolynomialFitter fitter = new PolynomialFitter(optimizer);
-
-            // reusing the same point over and over again does not bring
-            // information, the problem cannot be solved in this case for
-            // degrees greater than 1 (but one point is sufficient for
-            // degree 0)
-            for (double x = -1.0; x < 1.0; x += 0.01) {
-                fitter.addObservedPoint(1.0, 0.0, p.value(0.0));
-            }
-
-            try {
-                final double[] init = new double[degree + 1];
-                fitter.fit(init);
-                Assert.assertTrue(solvable || (degree == 0));
-            } catch(ConvergenceException e) {
-                Assert.assertTrue((! solvable) && (degree > 0));
-            }
-        }
-    }
-
-    private PolynomialFunction buildRandomPolynomial(int degree, Random randomizer) {
-        final double[] coefficients = new double[degree + 1];
-        for (int i = 0; i <= degree; ++i) {
-            coefficients[i] = randomizer.nextGaussian();
-        }
-        return new PolynomialFunction(coefficients);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerAbstractTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerAbstractTest.java b/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerAbstractTest.java
deleted file mode 100644
index 3f0f5c1..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerAbstractTest.java
+++ /dev/null
@@ -1,524 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.general;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.Arrays;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.exception.ConvergenceException;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
-import org.apache.commons.math4.linear.BlockRealMatrix;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.general.AbstractLeastSquaresOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * <p>Some of the unit tests are re-implementations of the MINPACK <a
- * href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
- * href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
- * The redistribution policy for MINPACK is available <a
- * href="http://www.netlib.org/minpack/disclaimer">here</a>, for
- * convenience, it is reproduced below.</p>
-
- * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
- * <tr><td>
- *    Minpack Copyright Notice (1999) University of Chicago.
- *    All rights reserved
- * </td></tr>
- * <tr><td>
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * <ol>
- *  <li>Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.</li>
- * <li>Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials provided
- *     with the distribution.</li>
- * <li>The end-user documentation included with the redistribution, if any,
- *     must include the following acknowledgment:
- *     <code>This product includes software developed by the University of
- *           Chicago, as Operator of Argonne National Laboratory.</code>
- *     Alternately, this acknowledgment may appear in the software itself,
- *     if and wherever such third-party acknowledgments normally appear.</li>
- * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
- *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
- *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
- *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
- *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
- *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
- *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
- *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
- *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
- *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
- *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
- *     BE CORRECTED.</strong></li>
- * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
- *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
- *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
- *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
- *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
- *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
- *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
- *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
- *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
- *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
- * <ol></td></tr>
- * </table>
-
- * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
- * @author Burton S. Garbow (original fortran minpack tests)
- * @author Kenneth E. Hillstrom (original fortran minpack tests)
- * @author Jorge J. More (original fortran minpack tests)
- * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
- */
-@Deprecated
-public abstract class AbstractLeastSquaresOptimizerAbstractTest {
-
-    public abstract AbstractLeastSquaresOptimizer createOptimizer();
-
-    @Test
-    public void testTrivial() {
-        LinearProblem problem =
-            new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1 }, new double[] { 0 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals(1.5, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(3.0, optimum.getValue()[0], 1.0e-10);
-        try {
-            optimizer.guessParametersErrors();
-            Assert.fail("an exception should have been thrown");
-        } catch (NumberIsTooSmallException ee) {
-            // expected behavior
-        }
-    }
-
-    @Test
-    public void testQRColumnsPermutation() {
-
-        LinearProblem problem =
-            new LinearProblem(new double[][] { { 1.0, -1.0 }, { 0.0, 2.0 }, { 1.0, -2.0 } },
-                              new double[] { 4.0, 6.0, 1.0 });
-
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals(7.0, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(3.0, optimum.getPoint()[1], 1.0e-10);
-        Assert.assertEquals(4.0, optimum.getValue()[0], 1.0e-10);
-        Assert.assertEquals(6.0, optimum.getValue()[1], 1.0e-10);
-        Assert.assertEquals(1.0, optimum.getValue()[2], 1.0e-10);
-    }
-
-    @Test
-    public void testNoDependency() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                { 2, 0, 0, 0, 0, 0 },
-                { 0, 2, 0, 0, 0, 0 },
-                { 0, 0, 2, 0, 0, 0 },
-                { 0, 0, 0, 2, 0, 0 },
-                { 0, 0, 0, 0, 2, 0 },
-                { 0, 0, 0, 0, 0, 2 }
-        }, new double[] { 0.0, 1.1, 2.2, 3.3, 4.4, 5.5 });
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1, 1, 1, 1 },
-                               new double[] { 0, 0, 0, 0, 0, 0 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        for (int i = 0; i < problem.target.length; ++i) {
-            Assert.assertEquals(0.55 * i, optimum.getPoint()[i], 1.0e-10);
-        }
-    }
-
-    @Test
-    public void testOneSet() {
-
-        LinearProblem problem = new LinearProblem(new double[][] {
-                {  1,  0, 0 },
-                { -1,  1, 0 },
-                {  0, -1, 1 }
-        }, new double[] { 1, 1, 1});
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0, 0 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals(1.0, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(2.0, optimum.getPoint()[1], 1.0e-10);
-        Assert.assertEquals(3.0, optimum.getPoint()[2], 1.0e-10);
-    }
-
-    @Test
-    public void testTwoSets() {
-        double epsilon = 1.0e-7;
-        LinearProblem problem = new LinearProblem(new double[][] {
-                {  2,  1,   0,  4,       0, 0 },
-                { -4, -2,   3, -7,       0, 0 },
-                {  4,  1,  -2,  8,       0, 0 },
-                {  0, -3, -12, -1,       0, 0 },
-                {  0,  0,   0,  0, epsilon, 1 },
-                {  0,  0,   0,  0,       1, 1 }
-        }, new double[] { 2, -9, 2, 2, 1 + epsilon * epsilon, 2});
-
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1, 1, 1, 1 },
-                               new double[] { 0, 0, 0, 0, 0, 0 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals( 3.0, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals( 4.0, optimum.getPoint()[1], 1.0e-10);
-        Assert.assertEquals(-1.0, optimum.getPoint()[2], 1.0e-10);
-        Assert.assertEquals(-2.0, optimum.getPoint()[3], 1.0e-10);
-        Assert.assertEquals( 1.0 + epsilon, optimum.getPoint()[4], 1.0e-10);
-        Assert.assertEquals( 1.0 - epsilon, optimum.getPoint()[5], 1.0e-10);
-    }
-
-    @Test(expected=ConvergenceException.class)
-    public void testNonInvertible() throws Exception {
-
-        LinearProblem problem = new LinearProblem(new double[][] {
-                {  1, 2, -3 },
-                {  2, 1,  3 },
-                { -3, 0, -9 }
-        }, new double[] { 1, 1, 1 });
-
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-
-        optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0, 0 });
-    }
-
-    @Test
-    public void testIllConditioned() {
-        LinearProblem problem1 = new LinearProblem(new double[][] {
-                { 10.0, 7.0,  8.0,  7.0 },
-                {  7.0, 5.0,  6.0,  5.0 },
-                {  8.0, 6.0, 10.0,  9.0 },
-                {  7.0, 5.0,  9.0, 10.0 }
-        }, new double[] { 32, 23, 33, 31 });
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum1 =
-            optimizer.optimize(100, problem1, problem1.target, new double[] { 1, 1, 1, 1 },
-                               new double[] { 0, 1, 2, 3 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals(1.0, optimum1.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(1.0, optimum1.getPoint()[1], 1.0e-10);
-        Assert.assertEquals(1.0, optimum1.getPoint()[2], 1.0e-10);
-        Assert.assertEquals(1.0, optimum1.getPoint()[3], 1.0e-10);
-
-        LinearProblem problem2 = new LinearProblem(new double[][] {
-                { 10.00, 7.00, 8.10, 7.20 },
-                {  7.08, 5.04, 6.00, 5.00 },
-                {  8.00, 5.98, 9.89, 9.00 },
-                {  6.99, 4.99, 9.00, 9.98 }
-        }, new double[] { 32, 23, 33, 31 });
-        PointVectorValuePair optimum2 =
-            optimizer.optimize(100, problem2, problem2.target, new double[] { 1, 1, 1, 1 },
-                               new double[] { 0, 1, 2, 3 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals(-81.0, optimum2.getPoint()[0], 1.0e-8);
-        Assert.assertEquals(137.0, optimum2.getPoint()[1], 1.0e-8);
-        Assert.assertEquals(-34.0, optimum2.getPoint()[2], 1.0e-8);
-        Assert.assertEquals( 22.0, optimum2.getPoint()[3], 1.0e-8);
-    }
-
-    @Test
-    public void testMoreEstimatedParametersSimple() {
-
-        LinearProblem problem = new LinearProblem(new double[][] {
-                { 3.0, 2.0,  0.0, 0.0 },
-                { 0.0, 1.0, -1.0, 1.0 },
-                { 2.0, 0.0,  1.0, 0.0 }
-        }, new double[] { 7.0, 3.0, 5.0 });
-
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1 },
-                new double[] { 7, 6, 5, 4 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-    }
-
-    @Test
-    public void testMoreEstimatedParametersUnsorted() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                { 1.0, 1.0,  0.0,  0.0, 0.0,  0.0 },
-                { 0.0, 0.0,  1.0,  1.0, 1.0,  0.0 },
-                { 0.0, 0.0,  0.0,  0.0, 1.0, -1.0 },
-                { 0.0, 0.0, -1.0,  1.0, 0.0,  1.0 },
-                { 0.0, 0.0,  0.0, -1.0, 1.0,  0.0 }
-       }, new double[] { 3.0, 12.0, -1.0, 7.0, 1.0 });
-
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1, 1, 1 },
-                               new double[] { 2, 2, 2, 2, 2, 2 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals(3.0, optimum.getPointRef()[2], 1.0e-10);
-        Assert.assertEquals(4.0, optimum.getPointRef()[3], 1.0e-10);
-        Assert.assertEquals(5.0, optimum.getPointRef()[4], 1.0e-10);
-        Assert.assertEquals(6.0, optimum.getPointRef()[5], 1.0e-10);
-    }
-
-    @Test
-    public void testRedundantEquations() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                { 1.0,  1.0 },
-                { 1.0, -1.0 },
-                { 1.0,  3.0 }
-        }, new double[] { 3.0, 1.0, 5.0 });
-
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1 },
-                               new double[] { 1, 1 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals(2.0, optimum.getPointRef()[0], 1.0e-10);
-        Assert.assertEquals(1.0, optimum.getPointRef()[1], 1.0e-10);
-    }
-
-    @Test
-    public void testInconsistentEquations() {
-        LinearProblem problem = new LinearProblem(new double[][] {
-                { 1.0,  1.0 },
-                { 1.0, -1.0 },
-                { 1.0,  3.0 }
-        }, new double[] { 3.0, 1.0, 4.0 });
-
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 1, 1 });
-        Assert.assertTrue(optimizer.getRMS() > 0.1);
-    }
-
-    @Test(expected=DimensionMismatchException.class)
-    public void testInconsistentSizes1() {
-        LinearProblem problem =
-            new LinearProblem(new double[][] { { 1, 0 }, { 0, 1 } }, new double[] { -1, 1 });
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1, 1 }, new double[] { 0, 0 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals(-1, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(+1, optimum.getPoint()[1], 1.0e-10);
-
-        optimizer.optimize(100, problem, problem.target,
-                           new double[] { 1 },
-                           new double[] { 0, 0 });
-    }
-
-    @Test(expected=DimensionMismatchException.class)
-    public void testInconsistentSizes2() {
-        LinearProblem problem =
-            new LinearProblem(new double[][] { { 1, 0 }, { 0, 1 } }, new double[] { -1, 1 });
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, problem, problem.target, new double[] { 1, 1 }, new double[] { 0, 0 });
-        Assert.assertEquals(0, optimizer.getRMS(), 1.0e-10);
-        Assert.assertEquals(-1, optimum.getPoint()[0], 1.0e-10);
-        Assert.assertEquals(+1, optimum.getPoint()[1], 1.0e-10);
-
-        optimizer.optimize(100, problem, new double[] { 1 },
-                           new double[] { 1 },
-                           new double[] { 0, 0 });
-    }
-
-    @Test
-    public void testCircleFitting() {
-        CircleVectorial circle = new CircleVectorial();
-        circle.addPoint( 30.0,  68.0);
-        circle.addPoint( 50.0,  -6.0);
-        circle.addPoint(110.0, -20.0);
-        circle.addPoint( 35.0,  15.0);
-        circle.addPoint( 45.0,  97.0);
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum
-            = optimizer.optimize(100, circle, new double[] { 0, 0, 0, 0, 0 }, new double[] { 1, 1, 1, 1, 1 },
-                                 new double[] { 98.680, 47.345 });
-        Assert.assertTrue(optimizer.getEvaluations() < 10);
-        Assert.assertTrue(optimizer.getJacobianEvaluations() < 10);
-        double rms = optimizer.getRMS();
-        Assert.assertEquals(1.768262623567235,  FastMath.sqrt(circle.getN()) * rms,  1.0e-10);
-        Vector2D center = new Vector2D(optimum.getPointRef()[0], optimum.getPointRef()[1]);
-        Assert.assertEquals(69.96016176931406, circle.getRadius(center), 1.0e-6);
-        Assert.assertEquals(96.07590211815305, center.getX(),            1.0e-6);
-        Assert.assertEquals(48.13516790438953, center.getY(),            1.0e-6);
-        double[][] cov = optimizer.computeCovariances(optimum.getPoint(), 1e-14);
-        Assert.assertEquals(1.839, cov[0][0], 0.001);
-        Assert.assertEquals(0.731, cov[0][1], 0.001);
-        Assert.assertEquals(cov[0][1], cov[1][0], 1.0e-14);
-        Assert.assertEquals(0.786, cov[1][1], 0.001);
-
-        // add perfect measurements and check errors are reduced
-        double  r = circle.getRadius(center);
-        for (double d= 0; d < 2 * FastMath.PI; d += 0.01) {
-            circle.addPoint(center.getX() + r * FastMath.cos(d), center.getY() + r * FastMath.sin(d));
-        }
-        double[] target = new double[circle.getN()];
-        Arrays.fill(target, 0.0);
-        double[] weights = new double[circle.getN()];
-        Arrays.fill(weights, 2.0);
-        optimum = optimizer.optimize(100, circle, target, weights, new double[] { 98.680, 47.345 });
-        cov = optimizer.computeCovariances(optimum.getPoint(), 1e-14);
-        Assert.assertEquals(0.0016, cov[0][0], 0.001);
-        Assert.assertEquals(3.2e-7, cov[0][1], 1.0e-9);
-        Assert.assertEquals(cov[0][1], cov[1][0], 1.0e-14);
-        Assert.assertEquals(0.0016, cov[1][1], 0.001);
-    }
-
-    @Test
-    public void testCircleFittingBadInit() {
-        CircleVectorial circle = new CircleVectorial();
-        double[][] points = circlePoints;
-        double[] target = new double[points.length];
-        Arrays.fill(target, 0.0);
-        double[] weights = new double[points.length];
-        Arrays.fill(weights, 2.0);
-        for (int i = 0; i < points.length; ++i) {
-            circle.addPoint(points[i][0], points[i][1]);
-        }
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum
-            = optimizer.optimize(100, circle, target, weights, new double[] { -12, -12 });
-        Vector2D center = new Vector2D(optimum.getPointRef()[0], optimum.getPointRef()[1]);
-        Assert.assertTrue(optimizer.getEvaluations() < 25);
-        Assert.assertTrue(optimizer.getJacobianEvaluations() < 20);
-        Assert.assertEquals( 0.043, optimizer.getRMS(), 1.0e-3);
-        Assert.assertEquals( 0.292235,  circle.getRadius(center), 1.0e-6);
-        Assert.assertEquals(-0.151738,  center.getX(),            1.0e-6);
-        Assert.assertEquals( 0.2075001, center.getY(),            1.0e-6);
-    }
-
-    @Test
-    public void testCircleFittingGoodInit() {
-        CircleVectorial circle = new CircleVectorial();
-        double[][] points = circlePoints;
-        double[] target = new double[points.length];
-        Arrays.fill(target, 0.0);
-        double[] weights = new double[points.length];
-        Arrays.fill(weights, 2.0);
-        for (int i = 0; i < points.length; ++i) {
-            circle.addPoint(points[i][0], points[i][1]);
-        }
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum =
-            optimizer.optimize(100, circle, target, weights, new double[] { 0, 0 });
-        Assert.assertEquals(-0.1517383071957963, optimum.getPointRef()[0], 1.0e-6);
-        Assert.assertEquals(0.2074999736353867,  optimum.getPointRef()[1], 1.0e-6);
-        Assert.assertEquals(0.04268731682389561, optimizer.getRMS(),       1.0e-8);
-    }
-
-    private final double[][] circlePoints = new double[][] {
-        {-0.312967,  0.072366}, {-0.339248,  0.132965}, {-0.379780,  0.202724},
-        {-0.390426,  0.260487}, {-0.361212,  0.328325}, {-0.346039,  0.392619},
-        {-0.280579,  0.444306}, {-0.216035,  0.470009}, {-0.149127,  0.493832},
-        {-0.075133,  0.483271}, {-0.007759,  0.452680}, { 0.060071,  0.410235},
-        { 0.103037,  0.341076}, { 0.118438,  0.273884}, { 0.131293,  0.192201},
-        { 0.115869,  0.129797}, { 0.072223,  0.058396}, { 0.022884,  0.000718},
-        {-0.053355, -0.020405}, {-0.123584, -0.032451}, {-0.216248, -0.032862},
-        {-0.278592, -0.005008}, {-0.337655,  0.056658}, {-0.385899,  0.112526},
-        {-0.405517,  0.186957}, {-0.415374,  0.262071}, {-0.387482,  0.343398},
-        {-0.347322,  0.397943}, {-0.287623,  0.458425}, {-0.223502,  0.475513},
-        {-0.135352,  0.478186}, {-0.061221,  0.483371}, { 0.003711,  0.422737},
-        { 0.065054,  0.375830}, { 0.108108,  0.297099}, { 0.123882,  0.222850},
-        { 0.117729,  0.134382}, { 0.085195,  0.056820}, { 0.029800, -0.019138},
-        {-0.027520, -0.072374}, {-0.102268, -0.091555}, {-0.200299, -0.106578},
-        {-0.292731, -0.091473}, {-0.356288, -0.051108}, {-0.420561,  0.014926},
-        {-0.471036,  0.074716}, {-0.488638,  0.182508}, {-0.485990,  0.254068},
-        {-0.463943,  0.338438}, {-0.406453,  0.404704}, {-0.334287,  0.466119},
-        {-0.254244,  0.503188}, {-0.161548,  0.495769}, {-0.075733,  0.495560},
-        { 0.001375,  0.434937}, { 0.082787,  0.385806}, { 0.115490,  0.323807},
-        { 0.141089,  0.223450}, { 0.138693,  0.131703}, { 0.126415,  0.049174},
-        { 0.066518, -0.010217}, {-0.005184, -0.070647}, {-0.080985, -0.103635},
-        {-0.177377, -0.116887}, {-0.260628, -0.100258}, {-0.335756, -0.056251},
-        {-0.405195, -0.000895}, {-0.444937,  0.085456}, {-0.484357,  0.175597},
-        {-0.472453,  0.248681}, {-0.438580,  0.347463}, {-0.402304,  0.422428},
-        {-0.326777,  0.479438}, {-0.247797,  0.505581}, {-0.152676,  0.519380},
-        {-0.071754,  0.516264}, { 0.015942,  0.472802}, { 0.076608,  0.419077},
-        { 0.127673,  0.330264}, { 0.159951,  0.262150}, { 0.153530,  0.172681},
-        { 0.140653,  0.089229}, { 0.078666,  0.024981}, { 0.023807, -0.037022},
-        {-0.048837, -0.077056}, {-0.127729, -0.075338}, {-0.221271, -0.067526}
-    };
-
-    public void doTestStRD(final StatisticalReferenceDataset dataset,
-        final double errParams, final double errParamsSd) {
-        final AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        final double[] w = new double[dataset.getNumObservations()];
-        Arrays.fill(w, 1.0);
-
-        final double[][] data = dataset.getData();
-        final double[] initial = dataset.getStartingPoint(0);
-        final MultivariateDifferentiableVectorFunction problem;
-        problem = dataset.getLeastSquaresProblem();
-        final PointVectorValuePair optimum;
-        optimum = optimizer.optimize(100, problem, data[1], w, initial);
-
-        final double[] actual = optimum.getPoint();
-        for (int i = 0; i < actual.length; i++) {
-            double expected = dataset.getParameter(i);
-            double delta = FastMath.abs(errParams * expected);
-            Assert.assertEquals(dataset.getName() + ", param #" + i,
-                                expected, actual[i], delta);
-        }
-    }
-
-    @Test
-    public void testKirby2() throws IOException {
-        doTestStRD(StatisticalReferenceDatasetFactory.createKirby2(), 1E-7, 1E-7);
-    }
-
-    @Test
-    public void testHahn1() throws IOException {
-        doTestStRD(StatisticalReferenceDatasetFactory.createHahn1(), 1E-7, 1E-4);
-    }
-
-    static class LinearProblem implements MultivariateDifferentiableVectorFunction, Serializable {
-
-        private static final long serialVersionUID = 703247177355019415L;
-        final RealMatrix factors;
-        final double[] target;
-        public LinearProblem(double[][] factors, double[] target) {
-            this.factors = new BlockRealMatrix(factors);
-            this.target  = target;
-        }
-
-        public double[] value(double[] variables) {
-            return factors.operate(variables);
-        }
-
-        public DerivativeStructure[] value(DerivativeStructure[] variables) {
-            DerivativeStructure[] value = new DerivativeStructure[factors.getRowDimension()];
-            for (int i = 0; i < value.length; ++i) {
-                value[i] = variables[0].getField().getZero();
-                for (int j = 0; j < factors.getColumnDimension(); ++j) {
-                    value[i] = value[i].add(variables[j].multiply(factors.getEntry(i, j)));
-                }
-                
-            }
-            return value;
-        }
-
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerTest.java
deleted file mode 100644
index e965ac3..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerTest.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law
- * or agreed to in writing, software distributed under the License is
- * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
- */
-package org.apache.commons.math4.optimization.general;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.junit.Assert;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.general.AbstractLeastSquaresOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Test;
-
-@Deprecated
-public class AbstractLeastSquaresOptimizerTest {
-
-    public static AbstractLeastSquaresOptimizer createOptimizer() {
-        return new AbstractLeastSquaresOptimizer(null) {
-
-            @Override
-            protected PointVectorValuePair doOptimize() {
-                final double[] params = getStartPoint();
-                final double[] res = computeResiduals(computeObjectiveValue(params));
-                setCost(computeCost(res));
-                return new PointVectorValuePair(params, null);
-            }
-        };
-    }
-
-    @Test
-    public void testGetChiSquare() throws IOException {
-        final StatisticalReferenceDataset dataset;
-        dataset = StatisticalReferenceDatasetFactory.createKirby2();
-        final AbstractLeastSquaresOptimizer optimizer;
-        optimizer = createOptimizer();
-        final double[] a = dataset.getParameters();
-        final double[] y = dataset.getData()[1];
-        final double[] w = new double[y.length];
-        Arrays.fill(w, 1.0);
-
-        optimizer.optimize(1, dataset.getLeastSquaresProblem(), y, w, a);
-        final double expected = dataset.getResidualSumOfSquares();
-        final double actual = optimizer.getChiSquare();
-        Assert.assertEquals(dataset.getName(), expected, actual,
-                            1E-11 * expected);
-    }
-
-    @Test
-    public void testGetRMS() throws IOException {
-        final StatisticalReferenceDataset dataset;
-        dataset = StatisticalReferenceDatasetFactory.createKirby2();
-        final AbstractLeastSquaresOptimizer optimizer;
-        optimizer = createOptimizer();
-        final double[] a = dataset.getParameters();
-        final double[] y = dataset.getData()[1];
-        final double[] w = new double[y.length];
-        Arrays.fill(w, 1.0);
-
-        optimizer.optimize(1, dataset.getLeastSquaresProblem(), y, w, a);
-        final double expected = FastMath
-            .sqrt(dataset.getResidualSumOfSquares() /
-                  dataset.getNumObservations());
-        final double actual = optimizer.getRMS();
-        Assert.assertEquals(dataset.getName(), expected, actual,
-                            1E-11 * expected);
-    }
-
-    @Test
-    public void testComputeSigma() throws IOException {
-        final StatisticalReferenceDataset dataset;
-        dataset = StatisticalReferenceDatasetFactory.createKirby2();
-        final AbstractLeastSquaresOptimizer optimizer;
-        optimizer = createOptimizer();
-        final double[] a = dataset.getParameters();
-        final double[] y = dataset.getData()[1];
-        final double[] w = new double[y.length];
-        Arrays.fill(w, 1.0);
-
-        final int dof = y.length - a.length;
-        final PointVectorValuePair optimum = optimizer.optimize(1, dataset.getLeastSquaresProblem(), y, w, a);
-        final double[] sig = optimizer.computeSigma(optimum.getPoint(), 1e-14);
-        final double[] expected = dataset.getParametersStandardDeviations();
-        for (int i = 0; i < sig.length; i++) {
-            final double actual = FastMath.sqrt(optimizer.getChiSquare() / dof) * sig[i];
-            Assert.assertEquals(dataset.getName() + ", parameter #" + i,
-                                expected[i], actual, 1e-7 * expected[i]);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerTestValidation.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerTestValidation.java b/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerTestValidation.java
deleted file mode 100644
index da39013..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/AbstractLeastSquaresOptimizerTestValidation.java
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law
- * or agreed to in writing, software distributed under the License is
- * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
- */
-package org.apache.commons.math4.optimization.general;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.ArrayList;
-import java.awt.geom.Point2D;
-
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.general.AbstractLeastSquaresOptimizer;
-import org.apache.commons.math4.stat.descriptive.StatisticalSummary;
-import org.apache.commons.math4.stat.descriptive.SummaryStatistics;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * This class demonstrates the main functionality of the
- * {@link AbstractLeastSquaresOptimizer}, common to the
- * optimizer implementations in package
- * {@link org.apache.commons.math4.optimization.general}.
- * <br/>
- * Not enabled by default, as the class name does not end with "Test".
- * <br/>
- * Invoke by running
- * <pre><code>
- *  mvn test -Dtest=AbstractLeastSquaresOptimizerTestValidation
- * </code></pre>
- * or by running
- * <pre><code>
- *  mvn test -Dtest=AbstractLeastSquaresOptimizerTestValidation -DargLine="-DmcRuns=1234 -server"
- * </code></pre>
- */
-@Deprecated
-public class AbstractLeastSquaresOptimizerTestValidation {
-    private static final int MONTE_CARLO_RUNS = Integer.parseInt(System.getProperty("mcRuns",
-                                                                                    "100"));
-
-    /**
-     * Using a Monte-Carlo procedure, this test checks the error estimations
-     * as provided by the square-root of the diagonal elements of the
-     * covariance matrix.
-     * <br/>
-     * The test generates sets of observations, each sampled from
-     * a Gaussian distribution.
-     * <br/>
-     * The optimization problem solved is defined in class
-     * {@link StraightLineProblem}.
-     * <br/>
-     * The output (on stdout) will be a table summarizing the distribution
-     * of parameters generated by the Monte-Carlo process and by the direct
-     * estimation provided by the diagonal elements of the covariance matrix.
-     */
-    @Test
-    public void testParametersErrorMonteCarloObservations() {
-        // Error on the observations.
-        final double yError = 15;
-
-        // True values of the parameters.
-        final double slope = 123.456;
-        final double offset = -98.765;
-
-        // Samples generator.
-        final RandomStraightLinePointGenerator lineGenerator
-            = new RandomStraightLinePointGenerator(slope, offset,
-                                                   yError,
-                                                   -1e3, 1e4,
-                                                   138577L);
-
-        // Number of observations.
-        final int numObs = 100; // XXX Should be a command-line option.
-        // number of parameters.
-        final int numParams = 2;
-
-        // Parameters found for each of Monte-Carlo run.
-        final SummaryStatistics[] paramsFoundByDirectSolution = new SummaryStatistics[numParams];
-        // Sigma estimations (square-root of the diagonal elements of the
-        // covariance matrix), for each Monte-Carlo run.
-        final SummaryStatistics[] sigmaEstimate = new SummaryStatistics[numParams];
-
-        // Initialize statistics accumulators.
-        for (int i = 0; i < numParams; i++) {
-            paramsFoundByDirectSolution[i] = new SummaryStatistics();
-            sigmaEstimate[i] = new SummaryStatistics();
-        }
-
-        // Dummy optimizer (to compute the covariance matrix).
-        final AbstractLeastSquaresOptimizer optim = new DummyOptimizer();
-        final double[] init = { slope, offset };
-
-        // Monte-Carlo (generates many sets of observations).
-        final int mcRepeat = MONTE_CARLO_RUNS;
-        int mcCount = 0;
-        while (mcCount < mcRepeat) {
-            // Observations.
-            final Point2D.Double[] obs = lineGenerator.generate(numObs);
-
-            final StraightLineProblem problem = new StraightLineProblem(yError);
-            for (int i = 0; i < numObs; i++) {
-                final Point2D.Double p = obs[i];
-                problem.addPoint(p.x, p.y);
-            }
-
-            // Direct solution (using simple regression).
-            final double[] regress = problem.solve();
-
-            // Estimation of the standard deviation (diagonal elements of the
-            // covariance matrix).
-            final PointVectorValuePair optimum = optim.optimize(Integer.MAX_VALUE,
-                           problem, problem.target(), problem.weight(), init);
-            final double[] sigma = optim.computeSigma(optimum.getPoint(), 1e-14);
-
-            // Accumulate statistics.
-            for (int i = 0; i < numParams; i++) {
-                paramsFoundByDirectSolution[i].addValue(regress[i]);
-                sigmaEstimate[i].addValue(sigma[i]);
-            }
-
-            // Next Monte-Carlo.
-            ++mcCount;
-        }
-
-        // Print statistics.
-        final String line = "--------------------------------------------------------------";
-        System.out.println("                 True value       Mean        Std deviation");
-        for (int i = 0; i < numParams; i++) {
-            System.out.println(line);
-            System.out.println("Parameter #" + i);
-
-            StatisticalSummary s = paramsFoundByDirectSolution[i].getSummary();
-            System.out.printf("              %+.6e   %+.6e   %+.6e\n",
-                              init[i],
-                              s.getMean(),
-                              s.getStandardDeviation());
-
-            s = sigmaEstimate[i].getSummary();
-            System.out.printf("sigma: %+.6e (%+.6e)\n",
-                              s.getMean(),
-                              s.getStandardDeviation());
-        }
-        System.out.println(line);
-
-        // Check the error estimation.
-        for (int i = 0; i < numParams; i++) {
-            Assert.assertEquals(paramsFoundByDirectSolution[i].getSummary().getStandardDeviation(),
-                                sigmaEstimate[i].getSummary().getMean(),
-                                8e-2);
-        }
-    }
-
-    /**
-     * In this test, the set of observations is fixed.
-     * Using a Monte-Carlo procedure, it generates sets of parameters,
-     * and determine the parameter change that will result in the
-     * normalized chi-square becoming larger by one than the value from
-     * the best fit solution.
-     * <br/>
-     * The optimization problem solved is defined in class
-     * {@link StraightLineProblem}.
-     * <br/>
-     * The output (on stdout) will be a list of lines containing:
-     * <ul>
-     *  <li>slope of the straight line,</li>
-     *  <li>intercept of the straight line,</li>
-     *  <li>chi-square of the solution defined by the above two values.</li>
-     * </ul>
-     * The output is separated into two blocks (with a blank line between
-     * them); the first block will contain all parameter sets for which
-     * {@code chi2 < chi2_b + 1}
-     * and the second block, all sets for which
-     * {@code chi2 >= chi2_b + 1}
-     * where {@code chi2_b} is the lowest chi-square (corresponding to the
-     * best solution).
-     */
-    @Test
-    public void testParametersErrorMonteCarloParameters() {
-        // Error on the observations.
-        final double yError = 15;
-
-        // True values of the parameters.
-        final double slope = 123.456;
-        final double offset = -98.765;
-
-        // Samples generator.
-        final RandomStraightLinePointGenerator lineGenerator
-            = new RandomStraightLinePointGenerator(slope, offset,
-                                                   yError,
-                                                   -1e3, 1e4,
-                                                   13839013L);
-
-        // Number of observations.
-        final int numObs = 10;
-        // number of parameters.
-
-        // Create a single set of observations.
-        final Point2D.Double[] obs = lineGenerator.generate(numObs);
-
-        final StraightLineProblem problem = new StraightLineProblem(yError);
-        for (int i = 0; i < numObs; i++) {
-            final Point2D.Double p = obs[i];
-            problem.addPoint(p.x, p.y);
-        }
-
-        // Direct solution (using simple regression).
-        final double[] regress = problem.solve();
-
-        // Dummy optimizer (to compute the chi-square).
-        final AbstractLeastSquaresOptimizer optim = new DummyOptimizer();
-        // Get chi-square of the best parameters set for the given set of
-        // observations.
-        final double bestChi2N = getChi2N(optim, problem, regress);
-        final double[] sigma = optim.computeSigma(regress, 1e-14);
-
-        // Monte-Carlo (generates a grid of parameters).
-        final int mcRepeat = MONTE_CARLO_RUNS;
-        final int gridSize = (int) FastMath.sqrt(mcRepeat);
-
-        // Parameters found for each of Monte-Carlo run.
-        // Index 0 = slope
-        // Index 1 = offset
-        // Index 2 = normalized chi2
-        final List<double[]> paramsAndChi2 = new ArrayList<double[]>(gridSize * gridSize);
-
-        final double slopeRange = 10 * sigma[0];
-        final double offsetRange = 10 * sigma[1];
-        final double minSlope = slope - 0.5 * slopeRange;
-        final double minOffset = offset - 0.5 * offsetRange;
-        final double deltaSlope =  slopeRange/ gridSize;
-        final double deltaOffset = offsetRange / gridSize;
-        for (int i = 0; i < gridSize; i++) {
-            final double s = minSlope + i * deltaSlope;
-            for (int j = 0; j < gridSize; j++) {
-                final double o = minOffset + j * deltaOffset;
-                final double chi2N = getChi2N(optim, problem, new double[] {s, o});
-
-                paramsAndChi2.add(new double[] {s, o, chi2N});
-            }
-        }
-
-        // Output (for use with "gnuplot").
-
-        // Some info.
-
-        // For plotting separately sets of parameters that have a large chi2.
-        final double chi2NPlusOne = bestChi2N + 1;
-        int numLarger = 0;
-
-        final String lineFmt = "%+.10e %+.10e   %.8e\n";
-
-        // Point with smallest chi-square.
-        System.out.printf(lineFmt, regress[0], regress[1], bestChi2N);
-        System.out.println(); // Empty line.
-
-        // Points within the confidence interval.
-        for (double[] d : paramsAndChi2) {
-            if (d[2] <= chi2NPlusOne) {
-                System.out.printf(lineFmt, d[0], d[1], d[2]);
-            }
-        }
-        System.out.println(); // Empty line.
-
-        // Points outside the confidence interval.
-        for (double[] d : paramsAndChi2) {
-            if (d[2] > chi2NPlusOne) {
-                ++numLarger;
-                System.out.printf(lineFmt, d[0], d[1], d[2]);
-            }
-        }
-        System.out.println(); // Empty line.
-
-        System.out.println("# sigma=" + Arrays.toString(sigma));
-        System.out.println("# " + numLarger + " sets filtered out");
-    }
-
-    /**
-     * @return the normalized chi-square.
-     */
-    private double getChi2N(AbstractLeastSquaresOptimizer optim,
-                            StraightLineProblem problem,
-                            double[] params) {
-        final double[] t = problem.target();
-        final double[] w = problem.weight();
-
-        optim.optimize(Integer.MAX_VALUE, problem, t, w, params);
-
-        return optim.getChiSquare() / (t.length - params.length);
-    }
-}
-
-/**
- * A dummy optimizer.
- * Used for computing the covariance matrix.
- */
-@Deprecated
-class DummyOptimizer extends AbstractLeastSquaresOptimizer {
-    public DummyOptimizer() {
-        super(null);
-    }
-
-    /**
-     * This method does nothing and returns a dummy value.
-     */
-    @Override
-    public PointVectorValuePair doOptimize() {
-        final double[] params = getStartPoint();
-        final double[] res = computeResiduals(computeObjectiveValue(params));
-        setCost(computeCost(res));
-        return new PointVectorValuePair(params, null);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/CircleProblem.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/CircleProblem.java b/src/test/java/org/apache/commons/math4/optimization/general/CircleProblem.java
deleted file mode 100644
index f4bb05a..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/CircleProblem.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import java.util.ArrayList;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
-import org.apache.commons.math4.util.FastMath;
-
-/**
- * Class that models a circle.
- * The parameters of problem are:
- * <ul>
- *  <li>the x-coordinate of the circle center,</li>
- *  <li>the y-coordinate of the circle center,</li>
- *  <li>the radius of the circle.</li>
- * </ul>
- * The model functions are:
- * <ul>
- *  <li>for each triplet (cx, cy, r), the (x, y) coordinates of a point on the
- *   corresponding circle.</li>
- * </ul>
- */
-@Deprecated
-class CircleProblem implements MultivariateDifferentiableVectorFunction {
-    /** Cloud of points assumed to be fitted by a circle. */
-    private final ArrayList<Vector2D> points;
-    /** Error on the x-coordinate of the points. */
-    private final double xSigma;
-    /** Error on the y-coordinate of the points. */
-    private final double ySigma;
-
-    /**
-     * @param xError Assumed error for the x-coordinate of the circle points.
-     * @param yError Assumed error for the y-coordinate of the circle points.
-     */
-    public CircleProblem(double xError,
-                         double yError) {
-        points = new ArrayList<Vector2D>();
-        xSigma = xError;
-        ySigma = yError;
-    }
-
-    public void addPoint(Vector2D p) {
-        points.add(p);
-    }
-
-    public double[] target() {
-        final double[] t = new double[points.size() * 2];
-        for (int i = 0; i < points.size(); i++) {
-            final Vector2D p = points.get(i);
-            final int index = i * 2;
-            t[index]     = p.getX();
-            t[index + 1] = p.getY();
-        }
-
-        return t;
-    }
-
-    public double[] weight() {
-        final double wX = 1 / (xSigma * xSigma);
-        final double wY = 1 / (ySigma * ySigma);
-        final double[] w = new double[points.size() * 2];
-        for (int i = 0; i < points.size(); i++) {
-            final int index = i * 2;
-            w[index] = wX;
-            w[index + 1] = wY;
-        }
-
-        return w;
-    }
-
-    public double[] value(double[] params) {
-        final double cx = params[0];
-        final double cy = params[1];
-        final double r = params[2];
-
-        final double[] model = new double[points.size() * 2];
-
-        for (int i = 0; i < points.size(); i++) {
-            final Vector2D p = points.get(i);
-
-            // Find the circle point closest to the observed point
-            // (observed points are points add through the addPoint method above)
-            final double dX = cx - p.getX();
-            final double dY = cy - p.getY();
-            final double scaling = r / FastMath.hypot(dX, dY);
-            final int index  = i * 2;
-            model[index]     = cx - scaling * dX;
-            model[index + 1] = cy - scaling * dY;
-
-        }
-
-        return model;
-    }
-
-    public DerivativeStructure[] value(DerivativeStructure[] params) {
-        final DerivativeStructure cx = params[0];
-        final DerivativeStructure cy = params[1];
-        final DerivativeStructure r = params[2];
-
-        final DerivativeStructure[] model = new DerivativeStructure[points.size() * 2];
-
-        for (int i = 0; i < points.size(); i++) {
-            final Vector2D p = points.get(i);
-
-            // Find the circle point closest to the observed point
-            // (observed points are points add through the addPoint method above)
-            final DerivativeStructure dX = cx.subtract(p.getX());
-            final DerivativeStructure dY = cy.subtract(p.getY());
-            final DerivativeStructure scaling = r.divide(dX.multiply(dX).add(dY.multiply(dY)).sqrt());
-            final int index  = i * 2;
-            model[index]     = cx.subtract(scaling.multiply(dX));
-            model[index + 1] = cy.subtract(scaling.multiply(dY));
-
-        }
-
-        return model;
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/CircleScalar.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/CircleScalar.java b/src/test/java/org/apache/commons/math4/optimization/general/CircleScalar.java
deleted file mode 100644
index 2727218..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/CircleScalar.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import java.util.ArrayList;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableFunction;
-import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
-
-/**
- * Class used in the tests.
- */
-@Deprecated
-public class CircleScalar implements MultivariateDifferentiableFunction {
-    private ArrayList<Vector2D> points;
-
-    public CircleScalar() {
-        points  = new ArrayList<Vector2D>();
-    }
-
-    public void addPoint(double px, double py) {
-        points.add(new Vector2D(px, py));
-    }
-
-    public double getRadius(Vector2D center) {
-        double r = 0;
-        for (Vector2D point : points) {
-            r += point.distance(center);
-        }
-        return r / points.size();
-    }
-
-    private DerivativeStructure distance(Vector2D point,
-                                         DerivativeStructure cx, DerivativeStructure cy) {
-        DerivativeStructure dx = cx.subtract(point.getX());
-        DerivativeStructure dy = cy.subtract(point.getY());
-        return dx.multiply(dx).add(dy.multiply(dy)).sqrt();
-    }
-
-    public DerivativeStructure getRadius(DerivativeStructure cx, DerivativeStructure cy) {
-        DerivativeStructure r = cx.getField().getZero();
-        for (Vector2D point : points) {
-            r = r.add(distance(point, cx, cy));
-        }
-        return r.divide(points.size());
-    }
-
-    public double value(double[] variables)  {
-        Vector2D center = new Vector2D(variables[0], variables[1]);
-        double radius = getRadius(center);
-
-        double sum = 0;
-        for (Vector2D point : points) {
-            double di = point.distance(center) - radius;
-            sum += di * di;
-        }
-
-        return sum;
-    }
-
-    public DerivativeStructure value(DerivativeStructure[] variables)  {
-        DerivativeStructure radius = getRadius(variables[0], variables[1]);
-
-        DerivativeStructure sum = variables[0].getField().getZero();
-        for (Vector2D point : points) {
-            DerivativeStructure di = distance(point, variables[0], variables[1]).subtract(radius);
-            sum = sum.add(di.multiply(di));
-        }
-
-        return sum;
-    }
-
-}


[13/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/CMAESOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/CMAESOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/direct/CMAESOptimizer.java
deleted file mode 100644
index 17d84af..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/CMAESOptimizer.java
+++ /dev/null
@@ -1,1441 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.NotPositiveException;
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.exception.OutOfRangeException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.linear.Array2DRowRealMatrix;
-import org.apache.commons.math4.linear.EigenDecomposition;
-import org.apache.commons.math4.linear.MatrixUtils;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.MultivariateOptimizer;
-import org.apache.commons.math4.optimization.OptimizationData;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleValueChecker;
-import org.apache.commons.math4.random.MersenneTwister;
-import org.apache.commons.math4.random.RandomGenerator;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.MathArrays;
-
-/**
- * <p>An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
- * for non-linear, non-convex, non-smooth, global function minimization.
- * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method
- * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or
- * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local
- * optima, outlier, etc.) of the objective function. Like a
- * quasi-Newton method, the CMA-ES learns and applies a variable metric
- * on the underlying search space. Unlike a quasi-Newton method, the
- * CMA-ES neither estimates nor uses gradients, making it considerably more
- * reliable in terms of finding a good, or even close to optimal, solution.</p>
- *
- * <p>In general, on smooth objective functions the CMA-ES is roughly ten times
- * slower than BFGS (counting objective function evaluations, no gradients provided).
- * For up to <math>N=10</math> variables also the derivative-free simplex
- * direct search method (Nelder and Mead) can be faster, but it is
- * far less reliable than CMA-ES.</p>
- *
- * <p>The CMA-ES is particularly well suited for non-separable
- * and/or badly conditioned problems. To observe the advantage of CMA compared
- * to a conventional evolution strategy, it will usually take about
- * <math>30 N</math> function evaluations. On difficult problems the complete
- * optimization (a single run) is expected to take <em>roughly</em> between
- * <math>30 N</math> and <math>300 N<sup>2</sup></math>
- * function evaluations.</p>
- *
- * <p>This implementation is translated and adapted from the Matlab version
- * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.</p>
- *
- * For more information, please refer to the following links:
- * <ul>
- *  <li><a href="http://www.lri.fr/~hansen/cmaes.m">Matlab code</a></li>
- *  <li><a href="http://www.lri.fr/~hansen/cmaesintro.html">Introduction to CMA-ES</a></li>
- *  <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li>
- * </ul>
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class CMAESOptimizer
-    extends BaseAbstractMultivariateSimpleBoundsOptimizer<MultivariateFunction>
-    implements MultivariateOptimizer {
-    /** Default value for {@link #checkFeasableCount}: {@value}. */
-    public static final int DEFAULT_CHECKFEASABLECOUNT = 0;
-    /** Default value for {@link #stopFitness}: {@value}. */
-    public static final double DEFAULT_STOPFITNESS = 0;
-    /** Default value for {@link #isActiveCMA}: {@value}. */
-    public static final boolean DEFAULT_ISACTIVECMA = true;
-    /** Default value for {@link #maxIterations}: {@value}. */
-    public static final int DEFAULT_MAXITERATIONS = 30000;
-    /** Default value for {@link #diagonalOnly}: {@value}. */
-    public static final int DEFAULT_DIAGONALONLY = 0;
-    /** Default value for {@link #random}. */
-    public static final RandomGenerator DEFAULT_RANDOMGENERATOR = new MersenneTwister();
-
-    // global search parameters
-    /**
-     * Population size, offspring number. The primary strategy parameter to play
-     * with, which can be increased from its default value. Increasing the
-     * population size improves global search properties in exchange to speed.
-     * Speed decreases, as a rule, at most linearly with increasing population
-     * size. It is advisable to begin with the default small population size.
-     */
-    private int lambda; // population size
-    /**
-     * Covariance update mechanism, default is active CMA. isActiveCMA = true
-     * turns on "active CMA" with a negative update of the covariance matrix and
-     * checks for positive definiteness. OPTS.CMA.active = 2 does not check for
-     * pos. def. and is numerically faster. Active CMA usually speeds up the
-     * adaptation.
-     */
-    private boolean isActiveCMA;
-    /**
-     * Determines how often a new random offspring is generated in case it is
-     * not feasible / beyond the defined limits, default is 0.
-     */
-    private int checkFeasableCount;
-    /**
-     * @see Sigma
-     */
-    private double[] inputSigma;
-    /** Number of objective variables/problem dimension */
-    private int dimension;
-    /**
-     * Defines the number of initial iterations, where the covariance matrix
-     * remains diagonal and the algorithm has internally linear time complexity.
-     * diagonalOnly = 1 means keeping the covariance matrix always diagonal and
-     * this setting also exhibits linear space complexity. This can be
-     * particularly useful for dimension > 100.
-     * @see <a href="http://hal.archives-ouvertes.fr/inria-00287367/en">A Simple Modification in CMA-ES</a>
-     */
-    private int diagonalOnly = 0;
-    /** Number of objective variables/problem dimension */
-    private boolean isMinimize = true;
-    /** Indicates whether statistic data is collected. */
-    private boolean generateStatistics = false;
-
-    // termination criteria
-    /** Maximal number of iterations allowed. */
-    private int maxIterations;
-    /** Limit for fitness value. */
-    private double stopFitness;
-    /** Stop if x-changes larger stopTolUpX. */
-    private double stopTolUpX;
-    /** Stop if x-change smaller stopTolX. */
-    private double stopTolX;
-    /** Stop if fun-changes smaller stopTolFun. */
-    private double stopTolFun;
-    /** Stop if back fun-changes smaller stopTolHistFun. */
-    private double stopTolHistFun;
-
-    // selection strategy parameters
-    /** Number of parents/points for recombination. */
-    private int mu; //
-    /** log(mu + 0.5), stored for efficiency. */
-    private double logMu2;
-    /** Array for weighted recombination. */
-    private RealMatrix weights;
-    /** Variance-effectiveness of sum w_i x_i. */
-    private double mueff; //
-
-    // dynamic strategy parameters and constants
-    /** Overall standard deviation - search volume. */
-    private double sigma;
-    /** Cumulation constant. */
-    private double cc;
-    /** Cumulation constant for step-size. */
-    private double cs;
-    /** Damping for step-size. */
-    private double damps;
-    /** Learning rate for rank-one update. */
-    private double ccov1;
-    /** Learning rate for rank-mu update' */
-    private double ccovmu;
-    /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */
-    private double chiN;
-    /** Learning rate for rank-one update - diagonalOnly */
-    private double ccov1Sep;
-    /** Learning rate for rank-mu update - diagonalOnly */
-    private double ccovmuSep;
-
-    // CMA internal values - updated each generation
-    /** Objective variables. */
-    private RealMatrix xmean;
-    /** Evolution path. */
-    private RealMatrix pc;
-    /** Evolution path for sigma. */
-    private RealMatrix ps;
-    /** Norm of ps, stored for efficiency. */
-    private double normps;
-    /** Coordinate system. */
-    private RealMatrix B;
-    /** Scaling. */
-    private RealMatrix D;
-    /** B*D, stored for efficiency. */
-    private RealMatrix BD;
-    /** Diagonal of sqrt(D), stored for efficiency. */
-    private RealMatrix diagD;
-    /** Covariance matrix. */
-    private RealMatrix C;
-    /** Diagonal of C, used for diagonalOnly. */
-    private RealMatrix diagC;
-    /** Number of iterations already performed. */
-    private int iterations;
-
-    /** History queue of best values. */
-    private double[] fitnessHistory;
-    /** Size of history queue of best values. */
-    private int historySize;
-
-    /** Random generator. */
-    private RandomGenerator random;
-
-    /** History of sigma values. */
-    private List<Double> statisticsSigmaHistory = new ArrayList<Double>();
-    /** History of mean matrix. */
-    private List<RealMatrix> statisticsMeanHistory = new ArrayList<RealMatrix>();
-    /** History of fitness values. */
-    private List<Double> statisticsFitnessHistory = new ArrayList<Double>();
-    /** History of D matrix. */
-    private List<RealMatrix> statisticsDHistory = new ArrayList<RealMatrix>();
-
-    /**
-     * Default constructor, uses default parameters
-     *
-     * @deprecated As of version 3.1: Parameter {@code lambda} must be
-     * passed with the call to {@link #optimize(int,MultivariateFunction,GoalType,OptimizationData[])
-     * optimize} (whereas in the current code it is set to an undocumented value).
-     */
-    @Deprecated
-    public CMAESOptimizer() {
-        this(0);
-    }
-
-    /**
-     * @param lambda Population size.
-     * @deprecated As of version 3.1: Parameter {@code lambda} must be
-     * passed with the call to {@link #optimize(int,MultivariateFunction,GoalType,OptimizationData[])
-     * optimize} (whereas in the current code it is set to an undocumented value)..
-     */
-    @Deprecated
-    public CMAESOptimizer(int lambda) {
-        this(lambda, null, DEFAULT_MAXITERATIONS, DEFAULT_STOPFITNESS,
-             DEFAULT_ISACTIVECMA, DEFAULT_DIAGONALONLY,
-             DEFAULT_CHECKFEASABLECOUNT, DEFAULT_RANDOMGENERATOR,
-             false, null);
-    }
-
-    /**
-     * @param lambda Population size.
-     * @param inputSigma Initial standard deviations to sample new points
-     * around the initial guess.
-     * @deprecated As of version 3.1: Parameters {@code lambda} and {@code inputSigma} must be
-     * passed with the call to {@link #optimize(int,MultivariateFunction,GoalType,OptimizationData[])
-     * optimize}.
-     */
-    @Deprecated
-    public CMAESOptimizer(int lambda, double[] inputSigma) {
-        this(lambda, inputSigma, DEFAULT_MAXITERATIONS, DEFAULT_STOPFITNESS,
-             DEFAULT_ISACTIVECMA, DEFAULT_DIAGONALONLY,
-             DEFAULT_CHECKFEASABLECOUNT, DEFAULT_RANDOMGENERATOR, false);
-    }
-
-    /**
-     * @param lambda Population size.
-     * @param inputSigma Initial standard deviations to sample new points
-     * around the initial guess.
-     * @param maxIterations Maximal number of iterations.
-     * @param stopFitness Whether to stop if objective function value is smaller than
-     * {@code stopFitness}.
-     * @param isActiveCMA Chooses the covariance matrix update method.
-     * @param diagonalOnly Number of initial iterations, where the covariance matrix
-     * remains diagonal.
-     * @param checkFeasableCount Determines how often new random objective variables are
-     * generated in case they are out of bounds.
-     * @param random Random generator.
-     * @param generateStatistics Whether statistic data is collected.
-     * @deprecated See {@link SimpleValueChecker#SimpleValueChecker()}
-     */
-    @Deprecated
-    public CMAESOptimizer(int lambda, double[] inputSigma,
-                          int maxIterations, double stopFitness,
-                          boolean isActiveCMA, int diagonalOnly, int checkFeasableCount,
-                          RandomGenerator random, boolean generateStatistics) {
-        this(lambda, inputSigma, maxIterations, stopFitness, isActiveCMA,
-             diagonalOnly, checkFeasableCount, random, generateStatistics,
-             new SimpleValueChecker());
-    }
-
-    /**
-     * @param lambda Population size.
-     * @param inputSigma Initial standard deviations to sample new points
-     * around the initial guess.
-     * @param maxIterations Maximal number of iterations.
-     * @param stopFitness Whether to stop if objective function value is smaller than
-     * {@code stopFitness}.
-     * @param isActiveCMA Chooses the covariance matrix update method.
-     * @param diagonalOnly Number of initial iterations, where the covariance matrix
-     * remains diagonal.
-     * @param checkFeasableCount Determines how often new random objective variables are
-     * generated in case they are out of bounds.
-     * @param random Random generator.
-     * @param generateStatistics Whether statistic data is collected.
-     * @param checker Convergence checker.
-     * @deprecated As of version 3.1: Parameters {@code lambda} and {@code inputSigma} must be
-     * passed with the call to {@link #optimize(int,MultivariateFunction,GoalType,OptimizationData[])
-     * optimize}.
-     */
-    @Deprecated
-    public CMAESOptimizer(int lambda, double[] inputSigma,
-                          int maxIterations, double stopFitness,
-                          boolean isActiveCMA, int diagonalOnly, int checkFeasableCount,
-                          RandomGenerator random, boolean generateStatistics,
-                          ConvergenceChecker<PointValuePair> checker) {
-        super(checker);
-        this.lambda = lambda;
-        this.inputSigma = inputSigma == null ? null : (double[]) inputSigma.clone();
-        this.maxIterations = maxIterations;
-        this.stopFitness = stopFitness;
-        this.isActiveCMA = isActiveCMA;
-        this.diagonalOnly = diagonalOnly;
-        this.checkFeasableCount = checkFeasableCount;
-        this.random = random;
-        this.generateStatistics = generateStatistics;
-    }
-
-    /**
-     * @param maxIterations Maximal number of iterations.
-     * @param stopFitness Whether to stop if objective function value is smaller than
-     * {@code stopFitness}.
-     * @param isActiveCMA Chooses the covariance matrix update method.
-     * @param diagonalOnly Number of initial iterations, where the covariance matrix
-     * remains diagonal.
-     * @param checkFeasableCount Determines how often new random objective variables are
-     * generated in case they are out of bounds.
-     * @param random Random generator.
-     * @param generateStatistics Whether statistic data is collected.
-     * @param checker Convergence checker.
-     *
-     * @since 3.1
-     */
-    public CMAESOptimizer(int maxIterations,
-                          double stopFitness,
-                          boolean isActiveCMA,
-                          int diagonalOnly,
-                          int checkFeasableCount,
-                          RandomGenerator random,
-                          boolean generateStatistics,
-                          ConvergenceChecker<PointValuePair> checker) {
-        super(checker);
-        this.maxIterations = maxIterations;
-        this.stopFitness = stopFitness;
-        this.isActiveCMA = isActiveCMA;
-        this.diagonalOnly = diagonalOnly;
-        this.checkFeasableCount = checkFeasableCount;
-        this.random = random;
-        this.generateStatistics = generateStatistics;
-    }
-
-    /**
-     * @return History of sigma values.
-     */
-    public List<Double> getStatisticsSigmaHistory() {
-        return statisticsSigmaHistory;
-    }
-
-    /**
-     * @return History of mean matrix.
-     */
-    public List<RealMatrix> getStatisticsMeanHistory() {
-        return statisticsMeanHistory;
-    }
-
-    /**
-     * @return History of fitness values.
-     */
-    public List<Double> getStatisticsFitnessHistory() {
-        return statisticsFitnessHistory;
-    }
-
-    /**
-     * @return History of D matrix.
-     */
-    public List<RealMatrix> getStatisticsDHistory() {
-        return statisticsDHistory;
-    }
-
-    /**
-     * Input sigma values.
-     * They define the initial coordinate-wise standard deviations for
-     * sampling new search points around the initial guess.
-     * It is suggested to set them to the estimated distance from the
-     * initial to the desired optimum.
-     * Small values induce the search to be more local (and very small
-     * values are more likely to find a local optimum close to the initial
-     * guess).
-     * Too small values might however lead to early termination.
-     * @since 3.1
-     */
-    public static class Sigma implements OptimizationData {
-        /** Sigma values. */
-        private final double[] sigma;
-
-        /**
-         * @param s Sigma values.
-         * @throws NotPositiveException if any of the array entries is smaller
-         * than zero.
-         */
-        public Sigma(double[] s)
-            throws NotPositiveException {
-            for (int i = 0; i < s.length; i++) {
-                if (s[i] < 0) {
-                    throw new NotPositiveException(s[i]);
-                }
-            }
-
-            sigma = s.clone();
-        }
-
-        /**
-         * @return the sigma values.
-         */
-        public double[] getSigma() {
-            return sigma.clone();
-        }
-    }
-
-    /**
-     * Population size.
-     * The number of offspring is the primary strategy parameter.
-     * In the absence of better clues, a good default could be an
-     * integer close to {@code 4 + 3 ln(n)}, where {@code n} is the
-     * number of optimized parameters.
-     * Increasing the population size improves global search properties
-     * at the expense of speed (which in general decreases at most
-     * linearly with increasing population size).
-     * @since 3.1
-     */
-    public static class PopulationSize implements OptimizationData {
-        /** Population size. */
-        private final int lambda;
-
-        /**
-         * @param size Population size.
-         * @throws NotStrictlyPositiveException if {@code size <= 0}.
-         */
-        public PopulationSize(int size)
-            throws NotStrictlyPositiveException {
-            if (size <= 0) {
-                throw new NotStrictlyPositiveException(size);
-            }
-            lambda = size;
-        }
-
-        /**
-         * @return the population size.
-         */
-        public int getPopulationSize() {
-            return lambda;
-        }
-    }
-
-    /**
-     * Optimize an objective function.
-     *
-     * @param maxEval Allowed number of evaluations of the objective function.
-     * @param f Objective function.
-     * @param goalType Optimization type.
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link org.apache.commons.math4.optimization.InitialGuess InitialGuess}</li>
-     *  <li>{@link Sigma}</li>
-     *  <li>{@link PopulationSize}</li>
-     * </ul>
-     * @return the point/value pair giving the optimal value for objective
-     * function.
-     */
-    @Override
-    protected PointValuePair optimizeInternal(int maxEval, MultivariateFunction f,
-                                              GoalType goalType,
-                                              OptimizationData... optData) {
-        // Scan "optData" for the input specific to this optimizer.
-        parseOptimizationData(optData);
-
-        // The parent's method will retrieve the common parameters from
-        // "optData" and call "doOptimize".
-        return super.optimizeInternal(maxEval, f, goalType, optData);
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected PointValuePair doOptimize() {
-        checkParameters();
-         // -------------------- Initialization --------------------------------
-        isMinimize = getGoalType().equals(GoalType.MINIMIZE);
-        final FitnessFunction fitfun = new FitnessFunction();
-        final double[] guess = getStartPoint();
-        // number of objective variables/problem dimension
-        dimension = guess.length;
-        initializeCMA(guess);
-        iterations = 0;
-        double bestValue = fitfun.value(guess);
-        push(fitnessHistory, bestValue);
-        PointValuePair optimum = new PointValuePair(getStartPoint(),
-                isMinimize ? bestValue : -bestValue);
-        PointValuePair lastResult = null;
-
-        // -------------------- Generation Loop --------------------------------
-
-        generationLoop:
-        for (iterations = 1; iterations <= maxIterations; iterations++) {
-            // Generate and evaluate lambda offspring
-            final RealMatrix arz = randn1(dimension, lambda);
-            final RealMatrix arx = zeros(dimension, lambda);
-            final double[] fitness = new double[lambda];
-            // generate random offspring
-            for (int k = 0; k < lambda; k++) {
-                RealMatrix arxk = null;
-                for (int i = 0; i < checkFeasableCount + 1; i++) {
-                    if (diagonalOnly <= 0) {
-                        arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
-                                         .scalarMultiply(sigma)); // m + sig * Normal(0,C)
-                    } else {
-                        arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
-                                         .scalarMultiply(sigma));
-                    }
-                    if (i >= checkFeasableCount ||
-                        fitfun.isFeasible(arxk.getColumn(0))) {
-                        break;
-                    }
-                    // regenerate random arguments for row
-                    arz.setColumn(k, randn(dimension));
-                }
-                copyColumn(arxk, 0, arx, k);
-                try {
-                    fitness[k] = fitfun.value(arx.getColumn(k)); // compute fitness
-                } catch (TooManyEvaluationsException e) {
-                    break generationLoop;
-                }
-            }
-            // Sort by fitness and compute weighted mean into xmean
-            final int[] arindex = sortedIndices(fitness);
-            // Calculate new xmean, this is selection and recombination
-            final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
-            final RealMatrix bestArx = selectColumns(arx, MathArrays.copyOf(arindex, mu));
-            xmean = bestArx.multiply(weights);
-            final RealMatrix bestArz = selectColumns(arz, MathArrays.copyOf(arindex, mu));
-            final RealMatrix zmean = bestArz.multiply(weights);
-            final boolean hsig = updateEvolutionPaths(zmean, xold);
-            if (diagonalOnly <= 0) {
-                updateCovariance(hsig, bestArx, arz, arindex, xold);
-            } else {
-                updateCovarianceDiagonalOnly(hsig, bestArz);
-            }
-            // Adapt step size sigma - Eq. (5)
-            sigma *= FastMath.exp(FastMath.min(1, (normps/chiN - 1) * cs / damps));
-            final double bestFitness = fitness[arindex[0]];
-            final double worstFitness = fitness[arindex[arindex.length - 1]];
-            if (bestValue > bestFitness) {
-                bestValue = bestFitness;
-                lastResult = optimum;
-                optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)),
-                                             isMinimize ? bestFitness : -bestFitness);
-                if (getConvergenceChecker() != null && lastResult != null &&
-                    getConvergenceChecker().converged(iterations, optimum, lastResult)) {
-                    break generationLoop;
-                }
-            }
-            // handle termination criteria
-            // Break, if fitness is good enough
-            if (stopFitness != 0 && bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
-                break generationLoop;
-            }
-            final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
-            final double[] pcCol = pc.getColumn(0);
-            for (int i = 0; i < dimension; i++) {
-                if (sigma * FastMath.max(FastMath.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
-                    break;
-                }
-                if (i >= dimension - 1) {
-                    break generationLoop;
-                }
-            }
-            for (int i = 0; i < dimension; i++) {
-                if (sigma * sqrtDiagC[i] > stopTolUpX) {
-                    break generationLoop;
-                }
-            }
-            final double historyBest = min(fitnessHistory);
-            final double historyWorst = max(fitnessHistory);
-            if (iterations > 2 &&
-                FastMath.max(historyWorst, worstFitness) -
-                FastMath.min(historyBest, bestFitness) < stopTolFun) {
-                break generationLoop;
-            }
-            if (iterations > fitnessHistory.length &&
-                historyWorst-historyBest < stopTolHistFun) {
-                break generationLoop;
-            }
-            // condition number of the covariance matrix exceeds 1e14
-            if (max(diagD)/min(diagD) > 1e7) {
-                break generationLoop;
-            }
-            // user defined termination
-            if (getConvergenceChecker() != null) {
-                final PointValuePair current
-                    = new PointValuePair(bestArx.getColumn(0),
-                                         isMinimize ? bestFitness : -bestFitness);
-                if (lastResult != null &&
-                    getConvergenceChecker().converged(iterations, current, lastResult)) {
-                    break generationLoop;
-                    }
-                lastResult = current;
-            }
-            // Adjust step size in case of equal function values (flat fitness)
-            if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) {
-                sigma *= FastMath.exp(0.2 + cs / damps);
-            }
-            if (iterations > 2 && FastMath.max(historyWorst, bestFitness) -
-                FastMath.min(historyBest, bestFitness) == 0) {
-                sigma *= FastMath.exp(0.2 + cs / damps);
-            }
-            // store best in history
-            push(fitnessHistory,bestFitness);
-            fitfun.setValueRange(worstFitness-bestFitness);
-            if (generateStatistics) {
-                statisticsSigmaHistory.add(sigma);
-                statisticsFitnessHistory.add(bestFitness);
-                statisticsMeanHistory.add(xmean.transpose());
-                statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
-            }
-        }
-        return optimum;
-    }
-
-    /**
-     * Scans the list of (required and optional) optimization data that
-     * characterize the problem.
-     *
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link Sigma}</li>
-     *  <li>{@link PopulationSize}</li>
-     * </ul>
-     */
-    private void parseOptimizationData(OptimizationData... optData) {
-        // The existing values (as set by the previous call) are reused if
-        // not provided in the argument list.
-        for (OptimizationData data : optData) {
-            if (data instanceof Sigma) {
-                inputSigma = ((Sigma) data).getSigma();
-                continue;
-            }
-            if (data instanceof PopulationSize) {
-                lambda = ((PopulationSize) data).getPopulationSize();
-                continue;
-            }
-        }
-    }
-
-    /**
-     * Checks dimensions and values of boundaries and inputSigma if defined.
-     */
-    private void checkParameters() {
-        final double[] init = getStartPoint();
-        final double[] lB = getLowerBound();
-        final double[] uB = getUpperBound();
-
-        if (inputSigma != null) {
-            if (inputSigma.length != init.length) {
-                throw new DimensionMismatchException(inputSigma.length, init.length);
-            }
-            for (int i = 0; i < init.length; i++) {
-                if (inputSigma[i] < 0) {
-                    // XXX Remove this block in 4.0 (check performed in "Sigma" class).
-                    throw new NotPositiveException(inputSigma[i]);
-                }
-                if (inputSigma[i] > uB[i] - lB[i]) {
-                    throw new OutOfRangeException(inputSigma[i], 0, uB[i] - lB[i]);
-                }
-            }
-        }
-    }
-
-    /**
-     * Initialization of the dynamic search parameters
-     *
-     * @param guess Initial guess for the arguments of the fitness function.
-     */
-    private void initializeCMA(double[] guess) {
-        if (lambda <= 0) {
-            // XXX Line below to replace the current one in 4.0 (MATH-879).
-            // throw new NotStrictlyPositiveException(lambda);
-            lambda = 4 + (int) (3 * FastMath.log(dimension));
-        }
-        // initialize sigma
-        final double[][] sigmaArray = new double[guess.length][1];
-        for (int i = 0; i < guess.length; i++) {
-            // XXX Line below to replace the current one in 4.0 (MATH-868).
-            // sigmaArray[i][0] = inputSigma[i];
-            sigmaArray[i][0] = inputSigma == null ? 0.3 : inputSigma[i];
-        }
-        final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
-        sigma = max(insigma); // overall standard deviation
-
-        // initialize termination criteria
-        stopTolUpX = 1e3 * max(insigma);
-        stopTolX = 1e-11 * max(insigma);
-        stopTolFun = 1e-12;
-        stopTolHistFun = 1e-13;
-
-        // initialize selection strategy parameters
-        mu = lambda / 2; // number of parents/points for recombination
-        logMu2 = FastMath.log(mu + 0.5);
-        weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2);
-        double sumw = 0;
-        double sumwq = 0;
-        for (int i = 0; i < mu; i++) {
-            double w = weights.getEntry(i, 0);
-            sumw += w;
-            sumwq += w * w;
-        }
-        weights = weights.scalarMultiply(1 / sumw);
-        mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i
-
-        // initialize dynamic strategy parameters and constants
-        cc = (4 + mueff / dimension) /
-                (dimension + 4 + 2 * mueff / dimension);
-        cs = (mueff + 2) / (dimension + mueff + 3.);
-        damps = (1 + 2 * FastMath.max(0, FastMath.sqrt((mueff - 1) /
-                                                       (dimension + 1)) - 1)) *
-            FastMath.max(0.3,
-                         1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment
-        ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff);
-        ccovmu = FastMath.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) /
-                              ((dimension + 2) * (dimension + 2) + mueff));
-        ccov1Sep = FastMath.min(1, ccov1 * (dimension + 1.5) / 3);
-        ccovmuSep = FastMath.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3);
-        chiN = FastMath.sqrt(dimension) *
-            (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension));
-        // intialize CMA internal values - updated each generation
-        xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables
-        diagD = insigma.scalarMultiply(1 / sigma);
-        diagC = square(diagD);
-        pc = zeros(dimension, 1); // evolution paths for C and sigma
-        ps = zeros(dimension, 1); // B defines the coordinate system
-        normps = ps.getFrobeniusNorm();
-
-        B = eye(dimension, dimension);
-        D = ones(dimension, 1); // diagonal D defines the scaling
-        BD = times(B, repmat(diagD.transpose(), dimension, 1));
-        C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance
-        historySize = 10 + (int) (3 * 10 * dimension / (double) lambda);
-        fitnessHistory = new double[historySize]; // history of fitness values
-        for (int i = 0; i < historySize; i++) {
-            fitnessHistory[i] = Double.MAX_VALUE;
-        }
-    }
-
-    /**
-     * Update of the evolution paths ps and pc.
-     *
-     * @param zmean Weighted row matrix of the gaussian random numbers generating
-     * the current offspring.
-     * @param xold xmean matrix of the previous generation.
-     * @return hsig flag indicating a small correction.
-     */
-    private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) {
-        ps = ps.scalarMultiply(1 - cs).add(
-                B.multiply(zmean).scalarMultiply(FastMath.sqrt(cs * (2 - cs) * mueff)));
-        normps = ps.getFrobeniusNorm();
-        final boolean hsig = normps /
-            FastMath.sqrt(1 - FastMath.pow(1 - cs, 2 * iterations)) /
-            chiN < 1.4 + 2 / ((double) dimension + 1);
-        pc = pc.scalarMultiply(1 - cc);
-        if (hsig) {
-            pc = pc.add(xmean.subtract(xold).scalarMultiply(FastMath.sqrt(cc * (2 - cc) * mueff) / sigma));
-        }
-        return hsig;
-    }
-
-    /**
-     * Update of the covariance matrix C for diagonalOnly > 0
-     *
-     * @param hsig Flag indicating a small correction.
-     * @param bestArz Fitness-sorted matrix of the gaussian random values of the
-     * current offspring.
-     */
-    private void updateCovarianceDiagonalOnly(boolean hsig,
-                                              final RealMatrix bestArz) {
-        // minor correction if hsig==false
-        double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc);
-        oldFac += 1 - ccov1Sep - ccovmuSep;
-        diagC = diagC.scalarMultiply(oldFac) // regard old matrix
-            .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update
-            .add((times(diagC, square(bestArz).multiply(weights))) // plus rank mu update
-                 .scalarMultiply(ccovmuSep));
-        diagD = sqrt(diagC); // replaces eig(C)
-        if (diagonalOnly > 1 &&
-            iterations > diagonalOnly) {
-            // full covariance matrix from now on
-            diagonalOnly = 0;
-            B = eye(dimension, dimension);
-            BD = diag(diagD);
-            C = diag(diagC);
-        }
-    }
-
-    /**
-     * Update of the covariance matrix C.
-     *
-     * @param hsig Flag indicating a small correction.
-     * @param bestArx Fitness-sorted matrix of the argument vectors producing the
-     * current offspring.
-     * @param arz Unsorted matrix containing the gaussian random values of the
-     * current offspring.
-     * @param arindex Indices indicating the fitness-order of the current offspring.
-     * @param xold xmean matrix of the previous generation.
-     */
-    private void updateCovariance(boolean hsig, final RealMatrix bestArx,
-                                  final RealMatrix arz, final int[] arindex,
-                                  final RealMatrix xold) {
-        double negccov = 0;
-        if (ccov1 + ccovmu > 0) {
-            final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
-                .scalarMultiply(1 / sigma); // mu difference vectors
-            final RealMatrix roneu = pc.multiply(pc.transpose())
-                .scalarMultiply(ccov1); // rank one update
-            // minor correction if hsig==false
-            double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc);
-            oldFac += 1 - ccov1 - ccovmu;
-            if (isActiveCMA) {
-                // Adapt covariance matrix C active CMA
-                negccov = (1 - ccovmu) * 0.25 * mueff / (FastMath.pow(dimension + 2, 1.5) + 2 * mueff);
-                // keep at least 0.66 in all directions, small popsize are most
-                // critical
-                final double negminresidualvariance = 0.66;
-                // where to make up for the variance loss
-                final double negalphaold = 0.5;
-                // prepare vectors, compute negative updating matrix Cneg
-                final int[] arReverseIndex = reverse(arindex);
-                RealMatrix arzneg = selectColumns(arz, MathArrays.copyOf(arReverseIndex, mu));
-                RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
-                final int[] idxnorms = sortedIndices(arnorms.getRow(0));
-                final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
-                final int[] idxReverse = reverse(idxnorms);
-                final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
-                arnorms = divide(arnormsReverse, arnormsSorted);
-                final int[] idxInv = inverse(idxnorms);
-                final RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
-                // check and set learning rate negccov
-                final double negcovMax = (1 - negminresidualvariance) /
-                    square(arnormsInv).multiply(weights).getEntry(0, 0);
-                if (negccov > negcovMax) {
-                    negccov = negcovMax;
-                }
-                arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
-                final RealMatrix artmp = BD.multiply(arzneg);
-                final RealMatrix Cneg = artmp.multiply(diag(weights)).multiply(artmp.transpose());
-                oldFac += negalphaold * negccov;
-                C = C.scalarMultiply(oldFac)
-                    .add(roneu) // regard old matrix
-                    .add(arpos.scalarMultiply( // plus rank one update
-                                              ccovmu + (1 - negalphaold) * negccov) // plus rank mu update
-                         .multiply(times(repmat(weights, 1, dimension),
-                                         arpos.transpose())))
-                    .subtract(Cneg.scalarMultiply(negccov));
-            } else {
-                // Adapt covariance matrix C - nonactive
-                C = C.scalarMultiply(oldFac) // regard old matrix
-                    .add(roneu) // plus rank one update
-                    .add(arpos.scalarMultiply(ccovmu) // plus rank mu update
-                         .multiply(times(repmat(weights, 1, dimension),
-                                         arpos.transpose())));
-            }
-        }
-        updateBD(negccov);
-    }
-
-    /**
-     * Update B and D from C.
-     *
-     * @param negccov Negative covariance factor.
-     */
-    private void updateBD(double negccov) {
-        if (ccov1 + ccovmu + negccov > 0 &&
-            (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) {
-            // to achieve O(N^2)
-            C = triu(C, 0).add(triu(C, 1).transpose());
-            // enforce symmetry to prevent complex numbers
-            final EigenDecomposition eig = new EigenDecomposition(C);
-            B = eig.getV(); // eigen decomposition, B==normalized eigenvectors
-            D = eig.getD();
-            diagD = diag(D);
-            if (min(diagD) <= 0) {
-                for (int i = 0; i < dimension; i++) {
-                    if (diagD.getEntry(i, 0) < 0) {
-                        diagD.setEntry(i, 0, 0);
-                    }
-                }
-                final double tfac = max(diagD) / 1e14;
-                C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
-                diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
-            }
-            if (max(diagD) > 1e14 * min(diagD)) {
-                final double tfac = max(diagD) / 1e14 - min(diagD);
-                C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
-                diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
-            }
-            diagC = diag(C);
-            diagD = sqrt(diagD); // D contains standard deviations now
-            BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2)
-        }
-    }
-
-    /**
-     * Pushes the current best fitness value in a history queue.
-     *
-     * @param vals History queue.
-     * @param val Current best fitness value.
-     */
-    private static void push(double[] vals, double val) {
-        for (int i = vals.length-1; i > 0; i--) {
-            vals[i] = vals[i-1];
-        }
-        vals[0] = val;
-    }
-
-    /**
-     * Sorts fitness values.
-     *
-     * @param doubles Array of values to be sorted.
-     * @return a sorted array of indices pointing into doubles.
-     */
-    private int[] sortedIndices(final double[] doubles) {
-        final DoubleIndex[] dis = new DoubleIndex[doubles.length];
-        for (int i = 0; i < doubles.length; i++) {
-            dis[i] = new DoubleIndex(doubles[i], i);
-        }
-        Arrays.sort(dis);
-        final int[] indices = new int[doubles.length];
-        for (int i = 0; i < doubles.length; i++) {
-            indices[i] = dis[i].index;
-        }
-        return indices;
-    }
-
-    /**
-     * Used to sort fitness values. Sorting is always in lower value first
-     * order.
-     */
-    private static class DoubleIndex implements Comparable<DoubleIndex> {
-        /** Value to compare. */
-        private final double value;
-        /** Index into sorted array. */
-        private final int index;
-
-        /**
-         * @param value Value to compare.
-         * @param index Index into sorted array.
-         */
-        DoubleIndex(double value, int index) {
-            this.value = value;
-            this.index = index;
-        }
-
-        /** {@inheritDoc} */
-        public int compareTo(DoubleIndex o) {
-            return Double.compare(value, o.value);
-        }
-
-        /** {@inheritDoc} */
-        @Override
-        public boolean equals(Object other) {
-
-            if (this == other) {
-                return true;
-            }
-
-            if (other instanceof DoubleIndex) {
-                return Double.compare(value, ((DoubleIndex) other).value) == 0;
-            }
-
-            return false;
-        }
-
-        /** {@inheritDoc} */
-        @Override
-        public int hashCode() {
-            long bits = Double.doubleToLongBits(value);
-            return (int) ((1438542 ^ (bits >>> 32) ^ bits) & 0xffffffff);
-        }
-    }
-
-    /**
-     * Normalizes fitness values to the range [0,1]. Adds a penalty to the
-     * fitness value if out of range. The penalty is adjusted by calling
-     * setValueRange().
-     */
-    private class FitnessFunction {
-        /** Determines the penalty for boundary violations */
-        private double valueRange;
-        /**
-         * Flag indicating whether the objective variables are forced into their
-         * bounds if defined
-         */
-        private final boolean isRepairMode;
-
-        /** Simple constructor.
-         */
-        public FitnessFunction() {
-            valueRange = 1;
-            isRepairMode = true;
-        }
-
-        /**
-         * @param point Normalized objective variables.
-         * @return the objective value + penalty for violated bounds.
-         */
-        public double value(final double[] point) {
-            double value;
-            if (isRepairMode) {
-                double[] repaired = repair(point);
-                value = CMAESOptimizer.this.computeObjectiveValue(repaired) +
-                    penalty(point, repaired);
-            } else {
-                value = CMAESOptimizer.this.computeObjectiveValue(point);
-            }
-            return isMinimize ? value : -value;
-        }
-
-        /**
-         * @param x Normalized objective variables.
-         * @return {@code true} if in bounds.
-         */
-        public boolean isFeasible(final double[] x) {
-            final double[] lB = CMAESOptimizer.this.getLowerBound();
-            final double[] uB = CMAESOptimizer.this.getUpperBound();
-
-            for (int i = 0; i < x.length; i++) {
-                if (x[i] < lB[i]) {
-                    return false;
-                }
-                if (x[i] > uB[i]) {
-                    return false;
-                }
-            }
-            return true;
-        }
-
-        /**
-         * @param valueRange Adjusts the penalty computation.
-         */
-        public void setValueRange(double valueRange) {
-            this.valueRange = valueRange;
-        }
-
-        /**
-         * @param x Normalized objective variables.
-         * @return the repaired (i.e. all in bounds) objective variables.
-         */
-        private double[] repair(final double[] x) {
-            final double[] lB = CMAESOptimizer.this.getLowerBound();
-            final double[] uB = CMAESOptimizer.this.getUpperBound();
-
-            final double[] repaired = new double[x.length];
-            for (int i = 0; i < x.length; i++) {
-                if (x[i] < lB[i]) {
-                    repaired[i] = lB[i];
-                } else if (x[i] > uB[i]) {
-                    repaired[i] = uB[i];
-                } else {
-                    repaired[i] = x[i];
-                }
-            }
-            return repaired;
-        }
-
-        /**
-         * @param x Normalized objective variables.
-         * @param repaired Repaired objective variables.
-         * @return Penalty value according to the violation of the bounds.
-         */
-        private double penalty(final double[] x, final double[] repaired) {
-            double penalty = 0;
-            for (int i = 0; i < x.length; i++) {
-                double diff = FastMath.abs(x[i] - repaired[i]);
-                penalty += diff * valueRange;
-            }
-            return isMinimize ? penalty : -penalty;
-        }
-    }
-
-    // -----Matrix utility functions similar to the Matlab build in functions------
-
-    /**
-     * @param m Input matrix
-     * @return Matrix representing the element-wise logarithm of m.
-     */
-    private static RealMatrix log(final RealMatrix m) {
-        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
-        for (int r = 0; r < m.getRowDimension(); r++) {
-            for (int c = 0; c < m.getColumnDimension(); c++) {
-                d[r][c] = FastMath.log(m.getEntry(r, c));
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param m Input matrix.
-     * @return Matrix representing the element-wise square root of m.
-     */
-    private static RealMatrix sqrt(final RealMatrix m) {
-        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
-        for (int r = 0; r < m.getRowDimension(); r++) {
-            for (int c = 0; c < m.getColumnDimension(); c++) {
-                d[r][c] = FastMath.sqrt(m.getEntry(r, c));
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param m Input matrix.
-     * @return Matrix representing the element-wise square of m.
-     */
-    private static RealMatrix square(final RealMatrix m) {
-        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
-        for (int r = 0; r < m.getRowDimension(); r++) {
-            for (int c = 0; c < m.getColumnDimension(); c++) {
-                double e = m.getEntry(r, c);
-                d[r][c] = e * e;
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param m Input matrix 1.
-     * @param n Input matrix 2.
-     * @return the matrix where the elements of m and n are element-wise multiplied.
-     */
-    private static RealMatrix times(final RealMatrix m, final RealMatrix n) {
-        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
-        for (int r = 0; r < m.getRowDimension(); r++) {
-            for (int c = 0; c < m.getColumnDimension(); c++) {
-                d[r][c] = m.getEntry(r, c) * n.getEntry(r, c);
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param m Input matrix 1.
-     * @param n Input matrix 2.
-     * @return Matrix where the elements of m and n are element-wise divided.
-     */
-    private static RealMatrix divide(final RealMatrix m, final RealMatrix n) {
-        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
-        for (int r = 0; r < m.getRowDimension(); r++) {
-            for (int c = 0; c < m.getColumnDimension(); c++) {
-                d[r][c] = m.getEntry(r, c) / n.getEntry(r, c);
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param m Input matrix.
-     * @param cols Columns to select.
-     * @return Matrix representing the selected columns.
-     */
-    private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) {
-        final double[][] d = new double[m.getRowDimension()][cols.length];
-        for (int r = 0; r < m.getRowDimension(); r++) {
-            for (int c = 0; c < cols.length; c++) {
-                d[r][c] = m.getEntry(r, cols[c]);
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param m Input matrix.
-     * @param k Diagonal position.
-     * @return Upper triangular part of matrix.
-     */
-    private static RealMatrix triu(final RealMatrix m, int k) {
-        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
-        for (int r = 0; r < m.getRowDimension(); r++) {
-            for (int c = 0; c < m.getColumnDimension(); c++) {
-                d[r][c] = r <= c - k ? m.getEntry(r, c) : 0;
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param m Input matrix.
-     * @return Row matrix representing the sums of the rows.
-     */
-    private static RealMatrix sumRows(final RealMatrix m) {
-        final double[][] d = new double[1][m.getColumnDimension()];
-        for (int c = 0; c < m.getColumnDimension(); c++) {
-            double sum = 0;
-            for (int r = 0; r < m.getRowDimension(); r++) {
-                sum += m.getEntry(r, c);
-            }
-            d[0][c] = sum;
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param m Input matrix.
-     * @return the diagonal n-by-n matrix if m is a column matrix or the column
-     * matrix representing the diagonal if m is a n-by-n matrix.
-     */
-    private static RealMatrix diag(final RealMatrix m) {
-        if (m.getColumnDimension() == 1) {
-            final double[][] d = new double[m.getRowDimension()][m.getRowDimension()];
-            for (int i = 0; i < m.getRowDimension(); i++) {
-                d[i][i] = m.getEntry(i, 0);
-            }
-            return new Array2DRowRealMatrix(d, false);
-        } else {
-            final double[][] d = new double[m.getRowDimension()][1];
-            for (int i = 0; i < m.getColumnDimension(); i++) {
-                d[i][0] = m.getEntry(i, i);
-            }
-            return new Array2DRowRealMatrix(d, false);
-        }
-    }
-
-    /**
-     * Copies a column from m1 to m2.
-     *
-     * @param m1 Source matrix.
-     * @param col1 Source column.
-     * @param m2 Target matrix.
-     * @param col2 Target column.
-     */
-    private static void copyColumn(final RealMatrix m1, int col1,
-                                   RealMatrix m2, int col2) {
-        for (int i = 0; i < m1.getRowDimension(); i++) {
-            m2.setEntry(i, col2, m1.getEntry(i, col1));
-        }
-    }
-
-    /**
-     * @param n Number of rows.
-     * @param m Number of columns.
-     * @return n-by-m matrix filled with 1.
-     */
-    private static RealMatrix ones(int n, int m) {
-        final double[][] d = new double[n][m];
-        for (int r = 0; r < n; r++) {
-            Arrays.fill(d[r], 1);
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param n Number of rows.
-     * @param m Number of columns.
-     * @return n-by-m matrix of 0 values out of diagonal, and 1 values on
-     * the diagonal.
-     */
-    private static RealMatrix eye(int n, int m) {
-        final double[][] d = new double[n][m];
-        for (int r = 0; r < n; r++) {
-            if (r < m) {
-                d[r][r] = 1;
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param n Number of rows.
-     * @param m Number of columns.
-     * @return n-by-m matrix of zero values.
-     */
-    private static RealMatrix zeros(int n, int m) {
-        return new Array2DRowRealMatrix(n, m);
-    }
-
-    /**
-     * @param mat Input matrix.
-     * @param n Number of row replicates.
-     * @param m Number of column replicates.
-     * @return a matrix which replicates the input matrix in both directions.
-     */
-    private static RealMatrix repmat(final RealMatrix mat, int n, int m) {
-        final int rd = mat.getRowDimension();
-        final int cd = mat.getColumnDimension();
-        final double[][] d = new double[n * rd][m * cd];
-        for (int r = 0; r < n * rd; r++) {
-            for (int c = 0; c < m * cd; c++) {
-                d[r][c] = mat.getEntry(r % rd, c % cd);
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param start Start value.
-     * @param end End value.
-     * @param step Step size.
-     * @return a sequence as column matrix.
-     */
-    private static RealMatrix sequence(double start, double end, double step) {
-        final int size = (int) ((end - start) / step + 1);
-        final double[][] d = new double[size][1];
-        double value = start;
-        for (int r = 0; r < size; r++) {
-            d[r][0] = value;
-            value += step;
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-
-    /**
-     * @param m Input matrix.
-     * @return the maximum of the matrix element values.
-     */
-    private static double max(final RealMatrix m) {
-        double max = -Double.MAX_VALUE;
-        for (int r = 0; r < m.getRowDimension(); r++) {
-            for (int c = 0; c < m.getColumnDimension(); c++) {
-                double e = m.getEntry(r, c);
-                if (max < e) {
-                    max = e;
-                }
-            }
-        }
-        return max;
-    }
-
-    /**
-     * @param m Input matrix.
-     * @return the minimum of the matrix element values.
-     */
-    private static double min(final RealMatrix m) {
-        double min = Double.MAX_VALUE;
-        for (int r = 0; r < m.getRowDimension(); r++) {
-            for (int c = 0; c < m.getColumnDimension(); c++) {
-                double e = m.getEntry(r, c);
-                if (min > e) {
-                    min = e;
-                }
-            }
-        }
-        return min;
-    }
-
-    /**
-     * @param m Input array.
-     * @return the maximum of the array values.
-     */
-    private static double max(final double[] m) {
-        double max = -Double.MAX_VALUE;
-        for (int r = 0; r < m.length; r++) {
-            if (max < m[r]) {
-                max = m[r];
-            }
-        }
-        return max;
-    }
-
-    /**
-     * @param m Input array.
-     * @return the minimum of the array values.
-     */
-    private static double min(final double[] m) {
-        double min = Double.MAX_VALUE;
-        for (int r = 0; r < m.length; r++) {
-            if (min > m[r]) {
-                min = m[r];
-            }
-        }
-        return min;
-    }
-
-    /**
-     * @param indices Input index array.
-     * @return the inverse of the mapping defined by indices.
-     */
-    private static int[] inverse(final int[] indices) {
-        final int[] inverse = new int[indices.length];
-        for (int i = 0; i < indices.length; i++) {
-            inverse[indices[i]] = i;
-        }
-        return inverse;
-    }
-
-    /**
-     * @param indices Input index array.
-     * @return the indices in inverse order (last is first).
-     */
-    private static int[] reverse(final int[] indices) {
-        final int[] reverse = new int[indices.length];
-        for (int i = 0; i < indices.length; i++) {
-            reverse[i] = indices[indices.length - i - 1];
-        }
-        return reverse;
-    }
-
-    /**
-     * @param size Length of random array.
-     * @return an array of Gaussian random numbers.
-     */
-    private double[] randn(int size) {
-        final double[] randn = new double[size];
-        for (int i = 0; i < size; i++) {
-            randn[i] = random.nextGaussian();
-        }
-        return randn;
-    }
-
-    /**
-     * @param size Number of rows.
-     * @param popSize Population size.
-     * @return a 2-dimensional matrix of Gaussian random numbers.
-     */
-    private RealMatrix randn1(int size, int popSize) {
-        final double[][] d = new double[size][popSize];
-        for (int r = 0; r < size; r++) {
-            for (int c = 0; c < popSize; c++) {
-                d[r][c] = random.nextGaussian();
-            }
-        }
-        return new Array2DRowRealMatrix(d, false);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/MultiDirectionalSimplex.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/MultiDirectionalSimplex.java b/src/main/java/org/apache/commons/math4/optimization/direct/MultiDirectionalSimplex.java
deleted file mode 100644
index cdc0bab..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/MultiDirectionalSimplex.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import java.util.Comparator;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.optimization.PointValuePair;
-
-/**
- * This class implements the multi-directional direct search method.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class MultiDirectionalSimplex extends AbstractSimplex {
-    /** Default value for {@link #khi}: {@value}. */
-    private static final double DEFAULT_KHI = 2;
-    /** Default value for {@link #gamma}: {@value}. */
-    private static final double DEFAULT_GAMMA = 0.5;
-    /** Expansion coefficient. */
-    private final double khi;
-    /** Contraction coefficient. */
-    private final double gamma;
-
-    /**
-     * Build a multi-directional simplex with default coefficients.
-     * The default values are 2.0 for khi and 0.5 for gamma.
-     *
-     * @param n Dimension of the simplex.
-     */
-    public MultiDirectionalSimplex(final int n) {
-        this(n, 1d);
-    }
-
-    /**
-     * Build a multi-directional simplex with default coefficients.
-     * The default values are 2.0 for khi and 0.5 for gamma.
-     *
-     * @param n Dimension of the simplex.
-     * @param sideLength Length of the sides of the default (hypercube)
-     * simplex. See {@link AbstractSimplex#AbstractSimplex(int,double)}.
-     */
-    public MultiDirectionalSimplex(final int n, double sideLength) {
-        this(n, sideLength, DEFAULT_KHI, DEFAULT_GAMMA);
-    }
-
-    /**
-     * Build a multi-directional simplex with specified coefficients.
-     *
-     * @param n Dimension of the simplex. See
-     * {@link AbstractSimplex#AbstractSimplex(int,double)}.
-     * @param khi Expansion coefficient.
-     * @param gamma Contraction coefficient.
-     */
-    public MultiDirectionalSimplex(final int n,
-                                   final double khi, final double gamma) {
-        this(n, 1d, khi, gamma);
-    }
-
-    /**
-     * Build a multi-directional simplex with specified coefficients.
-     *
-     * @param n Dimension of the simplex. See
-     * {@link AbstractSimplex#AbstractSimplex(int,double)}.
-     * @param sideLength Length of the sides of the default (hypercube)
-     * simplex. See {@link AbstractSimplex#AbstractSimplex(int,double)}.
-     * @param khi Expansion coefficient.
-     * @param gamma Contraction coefficient.
-     */
-    public MultiDirectionalSimplex(final int n, double sideLength,
-                                   final double khi, final double gamma) {
-        super(n, sideLength);
-
-        this.khi   = khi;
-        this.gamma = gamma;
-    }
-
-    /**
-     * Build a multi-directional simplex with default coefficients.
-     * The default values are 2.0 for khi and 0.5 for gamma.
-     *
-     * @param steps Steps along the canonical axes representing box edges.
-     * They may be negative but not zero. See
-     */
-    public MultiDirectionalSimplex(final double[] steps) {
-        this(steps, DEFAULT_KHI, DEFAULT_GAMMA);
-    }
-
-    /**
-     * Build a multi-directional simplex with specified coefficients.
-     *
-     * @param steps Steps along the canonical axes representing box edges.
-     * They may be negative but not zero. See
-     * {@link AbstractSimplex#AbstractSimplex(double[])}.
-     * @param khi Expansion coefficient.
-     * @param gamma Contraction coefficient.
-     */
-    public MultiDirectionalSimplex(final double[] steps,
-                                   final double khi, final double gamma) {
-        super(steps);
-
-        this.khi   = khi;
-        this.gamma = gamma;
-    }
-
-    /**
-     * Build a multi-directional simplex with default coefficients.
-     * The default values are 2.0 for khi and 0.5 for gamma.
-     *
-     * @param referenceSimplex Reference simplex. See
-     * {@link AbstractSimplex#AbstractSimplex(double[][])}.
-     */
-    public MultiDirectionalSimplex(final double[][] referenceSimplex) {
-        this(referenceSimplex, DEFAULT_KHI, DEFAULT_GAMMA);
-    }
-
-    /**
-     * Build a multi-directional simplex with specified coefficients.
-     *
-     * @param referenceSimplex Reference simplex. See
-     * {@link AbstractSimplex#AbstractSimplex(double[][])}.
-     * @param khi Expansion coefficient.
-     * @param gamma Contraction coefficient.
-     * @throws org.apache.commons.math4.exception.NotStrictlyPositiveException
-     * if the reference simplex does not contain at least one point.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if there is a dimension mismatch in the reference simplex.
-     */
-    public MultiDirectionalSimplex(final double[][] referenceSimplex,
-                                   final double khi, final double gamma) {
-        super(referenceSimplex);
-
-        this.khi   = khi;
-        this.gamma = gamma;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public void iterate(final MultivariateFunction evaluationFunction,
-                        final Comparator<PointValuePair> comparator) {
-        // Save the original simplex.
-        final PointValuePair[] original = getPoints();
-        final PointValuePair best = original[0];
-
-        // Perform a reflection step.
-        final PointValuePair reflected = evaluateNewSimplex(evaluationFunction,
-                                                                original, 1, comparator);
-        if (comparator.compare(reflected, best) < 0) {
-            // Compute the expanded simplex.
-            final PointValuePair[] reflectedSimplex = getPoints();
-            final PointValuePair expanded = evaluateNewSimplex(evaluationFunction,
-                                                                   original, khi, comparator);
-            if (comparator.compare(reflected, expanded) <= 0) {
-                // Keep the reflected simplex.
-                setPoints(reflectedSimplex);
-            }
-            // Keep the expanded simplex.
-            return;
-        }
-
-        // Compute the contracted simplex.
-        evaluateNewSimplex(evaluationFunction, original, gamma, comparator);
-
-    }
-
-    /**
-     * Compute and evaluate a new simplex.
-     *
-     * @param evaluationFunction Evaluation function.
-     * @param original Original simplex (to be preserved).
-     * @param coeff Linear coefficient.
-     * @param comparator Comparator to use to sort simplex vertices from best
-     * to poorest.
-     * @return the best point in the transformed simplex.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     */
-    private PointValuePair evaluateNewSimplex(final MultivariateFunction evaluationFunction,
-                                                  final PointValuePair[] original,
-                                                  final double coeff,
-                                                  final Comparator<PointValuePair> comparator) {
-        final double[] xSmallest = original[0].getPointRef();
-        // Perform a linear transformation on all the simplex points,
-        // except the first one.
-        setPoint(0, original[0]);
-        final int dim = getDimension();
-        for (int i = 1; i < getSize(); i++) {
-            final double[] xOriginal = original[i].getPointRef();
-            final double[] xTransformed = new double[dim];
-            for (int j = 0; j < dim; j++) {
-                xTransformed[j] = xSmallest[j] + coeff * (xSmallest[j] - xOriginal[j]);
-            }
-            setPoint(i, new PointValuePair(xTransformed, Double.NaN, false));
-        }
-
-        // Evaluate the simplex.
-        evaluate(evaluationFunction, comparator);
-
-        return getPoint(0);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionMappingAdapter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionMappingAdapter.java b/src/main/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionMappingAdapter.java
deleted file mode 100644
index d246ed4..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionMappingAdapter.java
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.analysis.function.Logit;
-import org.apache.commons.math4.analysis.function.Sigmoid;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.MathUtils;
-
-/**
- * <p>Adapter for mapping bounded {@link MultivariateFunction} to unbounded ones.</p>
- *
- * <p>
- * This adapter can be used to wrap functions subject to simple bounds on
- * parameters so they can be used by optimizers that do <em>not</em> directly
- * support simple bounds.
- * </p>
- * <p>
- * The principle is that the user function that will be wrapped will see its
- * parameters bounded as required, i.e when its {@code value} method is called
- * with argument array {@code point}, the elements array will fulfill requirement
- * {@code lower[i] <= point[i] <= upper[i]} for all i. Some of the components
- * may be unbounded or bounded only on one side if the corresponding bound is
- * set to an infinite value. The optimizer will not manage the user function by
- * itself, but it will handle this adapter and it is this adapter that will take
- * care the bounds are fulfilled. The adapter {@link #value(double[])} method will
- * be called by the optimizer with unbound parameters, and the adapter will map
- * the unbounded value to the bounded range using appropriate functions like
- * {@link Sigmoid} for double bounded elements for example.
- * </p>
- * <p>
- * As the optimizer sees only unbounded parameters, it should be noted that the
- * start point or simplex expected by the optimizer should be unbounded, so the
- * user is responsible for converting his bounded point to unbounded by calling
- * {@link #boundedToUnbounded(double[])} before providing them to the optimizer.
- * For the same reason, the point returned by the {@link
- * org.apache.commons.math4.optimization.BaseMultivariateOptimizer#optimize(int,
- * MultivariateFunction, org.apache.commons.math4.optimization.GoalType, double[])}
- * method is unbounded. So to convert this point to bounded, users must call
- * {@link #unboundedToBounded(double[])} by themselves!</p>
- * <p>
- * This adapter is only a poor man solution to simple bounds optimization constraints
- * that can be used with simple optimizers like {@link SimplexOptimizer} with {@link
- * NelderMeadSimplex} or {@link MultiDirectionalSimplex}. A better solution is to use
- * an optimizer that directly supports simple bounds like {@link CMAESOptimizer} or
- * {@link BOBYQAOptimizer}. One caveat of this poor man solution is that behavior near
- * the bounds may be numerically unstable as bounds are mapped from infinite values.
- * Another caveat is that convergence values are evaluated by the optimizer with respect
- * to unbounded variables, so there will be scales differences when converted to bounded
- * variables.
- * </p>
- *
- * @see MultivariateFunctionPenaltyAdapter
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-
-@Deprecated
-public class MultivariateFunctionMappingAdapter implements MultivariateFunction {
-
-    /** Underlying bounded function. */
-    private final MultivariateFunction bounded;
-
-    /** Mapping functions. */
-    private final Mapper[] mappers;
-
-    /** Simple constructor.
-     * @param bounded bounded function
-     * @param lower lower bounds for each element of the input parameters array
-     * (some elements may be set to {@code Double.NEGATIVE_INFINITY} for
-     * unbounded values)
-     * @param upper upper bounds for each element of the input parameters array
-     * (some elements may be set to {@code Double.POSITIVE_INFINITY} for
-     * unbounded values)
-     * @exception DimensionMismatchException if lower and upper bounds are not
-     * consistent, either according to dimension or to values
-     */
-    public MultivariateFunctionMappingAdapter(final MultivariateFunction bounded,
-                                                  final double[] lower, final double[] upper) {
-
-        // safety checks
-        MathUtils.checkNotNull(lower);
-        MathUtils.checkNotNull(upper);
-        if (lower.length != upper.length) {
-            throw new DimensionMismatchException(lower.length, upper.length);
-        }
-        for (int i = 0; i < lower.length; ++i) {
-            // note the following test is written in such a way it also fails for NaN
-            if (!(upper[i] >= lower[i])) {
-                throw new NumberIsTooSmallException(upper[i], lower[i], true);
-            }
-        }
-
-        this.bounded = bounded;
-        this.mappers = new Mapper[lower.length];
-        for (int i = 0; i < mappers.length; ++i) {
-            if (Double.isInfinite(lower[i])) {
-                if (Double.isInfinite(upper[i])) {
-                    // element is unbounded, no transformation is needed
-                    mappers[i] = new NoBoundsMapper();
-                } else {
-                    // element is simple-bounded on the upper side
-                    mappers[i] = new UpperBoundMapper(upper[i]);
-                }
-            } else {
-                if (Double.isInfinite(upper[i])) {
-                    // element is simple-bounded on the lower side
-                    mappers[i] = new LowerBoundMapper(lower[i]);
-                } else {
-                    // element is double-bounded
-                    mappers[i] = new LowerUpperBoundMapper(lower[i], upper[i]);
-                }
-            }
-        }
-
-    }
-
-    /** Map an array from unbounded to bounded.
-     * @param point unbounded value
-     * @return bounded value
-     */
-    public double[] unboundedToBounded(double[] point) {
-
-        // map unbounded input point to bounded point
-        final double[] mapped = new double[mappers.length];
-        for (int i = 0; i < mappers.length; ++i) {
-            mapped[i] = mappers[i].unboundedToBounded(point[i]);
-        }
-
-        return mapped;
-
-    }
-
-    /** Map an array from bounded to unbounded.
-     * @param point bounded value
-     * @return unbounded value
-     */
-    public double[] boundedToUnbounded(double[] point) {
-
-        // map bounded input point to unbounded point
-        final double[] mapped = new double[mappers.length];
-        for (int i = 0; i < mappers.length; ++i) {
-            mapped[i] = mappers[i].boundedToUnbounded(point[i]);
-        }
-
-        return mapped;
-
-    }
-
-    /** Compute the underlying function value from an unbounded point.
-     * <p>
-     * This method simply bounds the unbounded point using the mappings
-     * set up at construction and calls the underlying function using
-     * the bounded point.
-     * </p>
-     * @param point unbounded value
-     * @return underlying function value
-     * @see #unboundedToBounded(double[])
-     */
-    public double value(double[] point) {
-        return bounded.value(unboundedToBounded(point));
-    }
-
-    /** Mapping interface. */
-    private interface Mapper {
-
-        /** Map a value from unbounded to bounded.
-         * @param y unbounded value
-         * @return bounded value
-         */
-        double unboundedToBounded(double y);
-
-        /** Map a value from bounded to unbounded.
-         * @param x bounded value
-         * @return unbounded value
-         */
-        double boundedToUnbounded(double x);
-
-    }
-
-    /** Local class for no bounds mapping. */
-    private static class NoBoundsMapper implements Mapper {
-
-        /** Simple constructor.
-         */
-        public NoBoundsMapper() {
-        }
-
-        /** {@inheritDoc} */
-        public double unboundedToBounded(final double y) {
-            return y;
-        }
-
-        /** {@inheritDoc} */
-        public double boundedToUnbounded(final double x) {
-            return x;
-        }
-
-    }
-
-    /** Local class for lower bounds mapping. */
-    private static class LowerBoundMapper implements Mapper {
-
-        /** Low bound. */
-        private final double lower;
-
-        /** Simple constructor.
-         * @param lower lower bound
-         */
-        public LowerBoundMapper(final double lower) {
-            this.lower = lower;
-        }
-
-        /** {@inheritDoc} */
-        public double unboundedToBounded(final double y) {
-            return lower + FastMath.exp(y);
-        }
-
-        /** {@inheritDoc} */
-        public double boundedToUnbounded(final double x) {
-            return FastMath.log(x - lower);
-        }
-
-    }
-
-    /** Local class for upper bounds mapping. */
-    private static class UpperBoundMapper implements Mapper {
-
-        /** Upper bound. */
-        private final double upper;
-
-        /** Simple constructor.
-         * @param upper upper bound
-         */
-        public UpperBoundMapper(final double upper) {
-            this.upper = upper;
-        }
-
-        /** {@inheritDoc} */
-        public double unboundedToBounded(final double y) {
-            return upper - FastMath.exp(-y);
-        }
-
-        /** {@inheritDoc} */
-        public double boundedToUnbounded(final double x) {
-            return -FastMath.log(upper - x);
-        }
-
-    }
-
-    /** Local class for lower and bounds mapping. */
-    private static class LowerUpperBoundMapper implements Mapper {
-
-        /** Function from unbounded to bounded. */
-        private final UnivariateFunction boundingFunction;
-
-        /** Function from bounded to unbounded. */
-        private final UnivariateFunction unboundingFunction;
-
-        /** Simple constructor.
-         * @param lower lower bound
-         * @param upper upper bound
-         */
-        public LowerUpperBoundMapper(final double lower, final double upper) {
-            boundingFunction   = new Sigmoid(lower, upper);
-            unboundingFunction = new Logit(lower, upper);
-        }
-
-        /** {@inheritDoc} */
-        public double unboundedToBounded(final double y) {
-            return boundingFunction.value(y);
-        }
-
-        /** {@inheritDoc} */
-        public double boundedToUnbounded(final double x) {
-            return unboundingFunction.value(x);
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionPenaltyAdapter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionPenaltyAdapter.java b/src/main/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionPenaltyAdapter.java
deleted file mode 100644
index 113ebc8..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/MultivariateFunctionPenaltyAdapter.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.MathUtils;
-
-/**
- * <p>Adapter extending bounded {@link MultivariateFunction} to an unbouded
- * domain using a penalty function.</p>
- *
- * <p>
- * This adapter can be used to wrap functions subject to simple bounds on
- * parameters so they can be used by optimizers that do <em>not</em> directly
- * support simple bounds.
- * </p>
- * <p>
- * The principle is that the user function that will be wrapped will see its
- * parameters bounded as required, i.e when its {@code value} method is called
- * with argument array {@code point}, the elements array will fulfill requirement
- * {@code lower[i] <= point[i] <= upper[i]} for all i. Some of the components
- * may be unbounded or bounded only on one side if the corresponding bound is
- * set to an infinite value. The optimizer will not manage the user function by
- * itself, but it will handle this adapter and it is this adapter that will take
- * care the bounds are fulfilled. The adapter {@link #value(double[])} method will
- * be called by the optimizer with unbound parameters, and the adapter will check
- * if the parameters is within range or not. If it is in range, then the underlying
- * user function will be called, and if it is not the value of a penalty function
- * will be returned instead.
- * </p>
- * <p>
- * This adapter is only a poor man solution to simple bounds optimization constraints
- * that can be used with simple optimizers like {@link SimplexOptimizer} with {@link
- * NelderMeadSimplex} or {@link MultiDirectionalSimplex}. A better solution is to use
- * an optimizer that directly supports simple bounds like {@link CMAESOptimizer} or
- * {@link BOBYQAOptimizer}. One caveat of this poor man solution is that if start point
- * or start simplex is completely outside of the allowed range, only the penalty function
- * is used, and the optimizer may converge without ever entering the range.
- * </p>
- *
- * @see MultivariateFunctionMappingAdapter
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-
-@Deprecated
-public class MultivariateFunctionPenaltyAdapter implements MultivariateFunction {
-
-    /** Underlying bounded function. */
-    private final MultivariateFunction bounded;
-
-    /** Lower bounds. */
-    private final double[] lower;
-
-    /** Upper bounds. */
-    private final double[] upper;
-
-    /** Penalty offset. */
-    private final double offset;
-
-    /** Penalty scales. */
-    private final double[] scale;
-
-    /** Simple constructor.
-     * <p>
-     * When the optimizer provided points are out of range, the value of the
-     * penalty function will be used instead of the value of the underlying
-     * function. In order for this penalty to be effective in rejecting this
-     * point during the optimization process, the penalty function value should
-     * be defined with care. This value is computed as:
-     * <pre>
-     *   penalty(point) = offset + &sum;<sub>i</sub>[scale[i] * &radic;|point[i]-boundary[i]|]
-     * </pre>
-     * where indices i correspond to all the components that violates their boundaries.
-     * </p>
-     * <p>
-     * So when attempting a function minimization, offset should be larger than
-     * the maximum expected value of the underlying function and scale components
-     * should all be positive. When attempting a function maximization, offset
-     * should be lesser than the minimum expected value of the underlying function
-     * and scale components should all be negative.
-     * minimization, and lesser than the minimum expected value of the underlying
-     * function when attempting maximization.
-     * </p>
-     * <p>
-     * These choices for the penalty function have two properties. First, all out
-     * of range points will return a function value that is worse than the value
-     * returned by any in range point. Second, the penalty is worse for large
-     * boundaries violation than for small violations, so the optimizer has an hint
-     * about the direction in which it should search for acceptable points.
-     * </p>
-     * @param bounded bounded function
-     * @param lower lower bounds for each element of the input parameters array
-     * (some elements may be set to {@code Double.NEGATIVE_INFINITY} for
-     * unbounded values)
-     * @param upper upper bounds for each element of the input parameters array
-     * (some elements may be set to {@code Double.POSITIVE_INFINITY} for
-     * unbounded values)
-     * @param offset base offset of the penalty function
-     * @param scale scale of the penalty function
-     * @exception DimensionMismatchException if lower bounds, upper bounds and
-     * scales are not consistent, either according to dimension or to bounadary
-     * values
-     */
-    public MultivariateFunctionPenaltyAdapter(final MultivariateFunction bounded,
-                                                  final double[] lower, final double[] upper,
-                                                  final double offset, final double[] scale) {
-
-        // safety checks
-        MathUtils.checkNotNull(lower);
-        MathUtils.checkNotNull(upper);
-        MathUtils.checkNotNull(scale);
-        if (lower.length != upper.length) {
-            throw new DimensionMismatchException(lower.length, upper.length);
-        }
-        if (lower.length != scale.length) {
-            throw new DimensionMismatchException(lower.length, scale.length);
-        }
-        for (int i = 0; i < lower.length; ++i) {
-            // note the following test is written in such a way it also fails for NaN
-            if (!(upper[i] >= lower[i])) {
-                throw new NumberIsTooSmallException(upper[i], lower[i], true);
-            }
-        }
-
-        this.bounded = bounded;
-        this.lower   = lower.clone();
-        this.upper   = upper.clone();
-        this.offset  = offset;
-        this.scale   = scale.clone();
-
-    }
-
-    /** Compute the underlying function value from an unbounded point.
-     * <p>
-     * This method simply returns the value of the underlying function
-     * if the unbounded point already fulfills the bounds, and compute
-     * a replacement value using the offset and scale if bounds are
-     * violated, without calling the function at all.
-     * </p>
-     * @param point unbounded point
-     * @return either underlying function value or penalty function value
-     */
-    public double value(double[] point) {
-
-        for (int i = 0; i < scale.length; ++i) {
-            if ((point[i] < lower[i]) || (point[i] > upper[i])) {
-                // bound violation starting at this component
-                double sum = 0;
-                for (int j = i; j < scale.length; ++j) {
-                    final double overshoot;
-                    if (point[j] < lower[j]) {
-                        overshoot = scale[j] * (lower[j] - point[j]);
-                    } else if (point[j] > upper[j]) {
-                        overshoot = scale[j] * (point[j] - upper[j]);
-                    } else {
-                        overshoot = 0;
-                    }
-                    sum += FastMath.sqrt(overshoot);
-                }
-                return offset + sum;
-            }
-        }
-
-        // all boundaries are fulfilled, we are in the expected
-        // domain of the underlying function
-        return bounded.value(point);
-
-    }
-
-}


[16/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/OptimizationData.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/OptimizationData.java b/src/main/java/org/apache/commons/math4/optimization/OptimizationData.java
deleted file mode 100644
index 1ddf3c7..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/OptimizationData.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization;
-
-/**
- * Marker interface.
- * Implementations will provide functionality (optional or required) needed
- * by the optimizers, and those will need to check the actual type of the
- * arguments and perform the appropriate cast in order to access the data
- * they need.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public interface OptimizationData {}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/PointValuePair.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/PointValuePair.java b/src/main/java/org/apache/commons/math4/optimization/PointValuePair.java
deleted file mode 100644
index d3831e9..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/PointValuePair.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import java.io.Serializable;
-
-import org.apache.commons.math4.util.Pair;
-
-/**
- * This class holds a point and the value of an objective function at
- * that point.
- *
- * @see PointVectorValuePair
- * @see org.apache.commons.math4.analysis.MultivariateFunction
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class PointValuePair extends Pair<double[], Double> implements Serializable {
-
-    /** Serializable UID. */
-    private static final long serialVersionUID = 20120513L;
-
-    /**
-     * Builds a point/objective function value pair.
-     *
-     * @param point Point coordinates. This instance will store
-     * a copy of the array, not the array passed as argument.
-     * @param value Value of the objective function at the point.
-     */
-    public PointValuePair(final double[] point,
-                          final double value) {
-        this(point, value, true);
-    }
-
-    /**
-     * Builds a point/objective function value pair.
-     *
-     * @param point Point coordinates.
-     * @param value Value of the objective function at the point.
-     * @param copyArray if {@code true}, the input array will be copied,
-     * otherwise it will be referenced.
-     */
-    public PointValuePair(final double[] point,
-                          final double value,
-                          final boolean copyArray) {
-        super(copyArray ? ((point == null) ? null :
-                           point.clone()) :
-              point,
-              value);
-    }
-
-    /**
-     * Gets the point.
-     *
-     * @return a copy of the stored point.
-     */
-    public double[] getPoint() {
-        final double[] p = getKey();
-        return p == null ? null : p.clone();
-    }
-
-    /**
-     * Gets a reference to the point.
-     *
-     * @return a reference to the internal array storing the point.
-     */
-    public double[] getPointRef() {
-        return getKey();
-    }
-
-    /**
-     * Replace the instance with a data transfer object for serialization.
-     * @return data transfer object that will be serialized
-     */
-    private Object writeReplace() {
-        return new DataTransferObject(getKey(), getValue());
-    }
-
-    /** Internal class used only for serialization. */
-    private static class DataTransferObject implements Serializable {
-        /** Serializable UID. */
-        private static final long serialVersionUID = 20120513L;
-        /**
-         * Point coordinates.
-         * @Serial
-         */
-        private final double[] point;
-        /**
-         * Value of the objective function at the point.
-         * @Serial
-         */
-        private final double value;
-
-        /** Simple constructor.
-         * @param point Point coordinates.
-         * @param value Value of the objective function at the point.
-         */
-        public DataTransferObject(final double[] point, final double value) {
-            this.point = point.clone();
-            this.value = value;
-        }
-
-        /** Replace the deserialized data transfer object with a {@link PointValuePair}.
-         * @return replacement {@link PointValuePair}
-         */
-        private Object readResolve() {
-            return new PointValuePair(point, value, false);
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/PointVectorValuePair.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/PointVectorValuePair.java b/src/main/java/org/apache/commons/math4/optimization/PointVectorValuePair.java
deleted file mode 100644
index 410ba67..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/PointVectorValuePair.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import java.io.Serializable;
-
-import org.apache.commons.math4.util.Pair;
-
-/**
- * This class holds a point and the vectorial value of an objective function at
- * that point.
- *
- * @see PointValuePair
- * @see org.apache.commons.math4.analysis.MultivariateVectorFunction
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class PointVectorValuePair extends Pair<double[], double[]> implements Serializable {
-
-    /** Serializable UID. */
-    private static final long serialVersionUID = 20120513L;
-
-    /**
-     * Builds a point/objective function value pair.
-     *
-     * @param point Point coordinates. This instance will store
-     * a copy of the array, not the array passed as argument.
-     * @param value Value of the objective function at the point.
-     */
-    public PointVectorValuePair(final double[] point,
-                                final double[] value) {
-        this(point, value, true);
-    }
-
-    /**
-     * Build a point/objective function value pair.
-     *
-     * @param point Point coordinates.
-     * @param value Value of the objective function at the point.
-     * @param copyArray if {@code true}, the input arrays will be copied,
-     * otherwise they will be referenced.
-     */
-    public PointVectorValuePair(final double[] point,
-                                final double[] value,
-                                final boolean copyArray) {
-        super(copyArray ?
-              ((point == null) ? null :
-               point.clone()) :
-              point,
-              copyArray ?
-              ((value == null) ? null :
-               value.clone()) :
-              value);
-    }
-
-    /**
-     * Gets the point.
-     *
-     * @return a copy of the stored point.
-     */
-    public double[] getPoint() {
-        final double[] p = getKey();
-        return p == null ? null : p.clone();
-    }
-
-    /**
-     * Gets a reference to the point.
-     *
-     * @return a reference to the internal array storing the point.
-     */
-    public double[] getPointRef() {
-        return getKey();
-    }
-
-    /**
-     * Gets the value of the objective function.
-     *
-     * @return a copy of the stored value of the objective function.
-     */
-    @Override
-    public double[] getValue() {
-        final double[] v = super.getValue();
-        return v == null ? null : v.clone();
-    }
-
-    /**
-     * Gets a reference to the value of the objective function.
-     *
-     * @return a reference to the internal array storing the value of
-     * the objective function.
-     */
-    public double[] getValueRef() {
-        return super.getValue();
-    }
-
-    /**
-     * Replace the instance with a data transfer object for serialization.
-     * @return data transfer object that will be serialized
-     */
-    private Object writeReplace() {
-        return new DataTransferObject(getKey(), getValue());
-    }
-
-    /** Internal class used only for serialization. */
-    private static class DataTransferObject implements Serializable {
-        /** Serializable UID. */
-        private static final long serialVersionUID = 20120513L;
-        /**
-         * Point coordinates.
-         * @Serial
-         */
-        private final double[] point;
-        /**
-         * Value of the objective function at the point.
-         * @Serial
-         */
-        private final double[] value;
-
-        /** Simple constructor.
-         * @param point Point coordinates.
-         * @param value Value of the objective function at the point.
-         */
-        public DataTransferObject(final double[] point, final double[] value) {
-            this.point = point.clone();
-            this.value = value.clone();
-        }
-
-        /** Replace the deserialized data transfer object with a {@link PointValuePair}.
-         * @return replacement {@link PointValuePair}
-         */
-        private Object readResolve() {
-            return new PointVectorValuePair(point, value, false);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/SimpleBounds.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/SimpleBounds.java b/src/main/java/org/apache/commons/math4/optimization/SimpleBounds.java
deleted file mode 100644
index 097ba8a..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/SimpleBounds.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-/**
- * Simple optimization constraints: lower and upper bounds.
- * The valid range of the parameters is an interval that can be infinite
- * (in one or both directions).
- * <br/>
- * Immutable class.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public class SimpleBounds implements OptimizationData {
-    /** Lower bounds. */
-    private final double[] lower;
-    /** Upper bounds. */
-    private final double[] upper;
-
-    /**
-     * @param lB Lower bounds.
-     * @param uB Upper bounds.
-     */
-    public SimpleBounds(double[] lB,
-                        double[] uB) {
-        lower = lB.clone();
-        upper = uB.clone();
-    }
-
-    /**
-     * Gets the lower bounds.
-     *
-     * @return the initial guess.
-     */
-    public double[] getLower() {
-        return lower.clone();
-    }
-    /**
-     * Gets the lower bounds.
-     *
-     * @return the initial guess.
-     */
-    public double[] getUpper() {
-        return upper.clone();
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/SimplePointChecker.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/SimplePointChecker.java b/src/main/java/org/apache/commons/math4/optimization/SimplePointChecker.java
deleted file mode 100644
index 0651725..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/SimplePointChecker.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.Pair;
-
-/**
- * Simple implementation of the {@link ConvergenceChecker} interface using
- * only point coordinates.
- *
- * Convergence is considered to have been reached if either the relative
- * difference between each point coordinate are smaller than a threshold
- * or if either the absolute difference between the point coordinates are
- * smaller than another threshold.
- * <br/>
- * The {@link #converged(int,Pair,Pair) converged} method will also return
- * {@code true} if the number of iterations has been set (see
- * {@link #SimplePointChecker(double,double,int) this constructor}).
- *
- * @param <PAIR> Type of the (point, value) pair.
- * The type of the "value" part of the pair (not used by this class).
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class SimplePointChecker<PAIR extends Pair<double[], ? extends Object>>
-    extends AbstractConvergenceChecker<PAIR> {
-    /**
-     * If {@link #maxIterationCount} is set to this value, the number of
-     * iterations will never cause {@link #converged(int, Pair, Pair)}
-     * to return {@code true}.
-     */
-    private static final int ITERATION_CHECK_DISABLED = -1;
-    /**
-     * Number of iterations after which the
-     * {@link #converged(int, Pair, Pair)} method
-     * will return true (unless the check is disabled).
-     */
-    private final int maxIterationCount;
-
-    /**
-     * Build an instance with default threshold.
-     * @deprecated See {@link AbstractConvergenceChecker#AbstractConvergenceChecker()}
-     */
-    @Deprecated
-    public SimplePointChecker() {
-        maxIterationCount = ITERATION_CHECK_DISABLED;
-    }
-
-    /**
-     * Build an instance with specified thresholds.
-     * In order to perform only relative checks, the absolute tolerance
-     * must be set to a negative value. In order to perform only absolute
-     * checks, the relative tolerance must be set to a negative value.
-     *
-     * @param relativeThreshold relative tolerance threshold
-     * @param absoluteThreshold absolute tolerance threshold
-     */
-    public SimplePointChecker(final double relativeThreshold,
-                              final double absoluteThreshold) {
-        super(relativeThreshold, absoluteThreshold);
-        maxIterationCount = ITERATION_CHECK_DISABLED;
-    }
-
-    /**
-     * Builds an instance with specified thresholds.
-     * In order to perform only relative checks, the absolute tolerance
-     * must be set to a negative value. In order to perform only absolute
-     * checks, the relative tolerance must be set to a negative value.
-     *
-     * @param relativeThreshold Relative tolerance threshold.
-     * @param absoluteThreshold Absolute tolerance threshold.
-     * @param maxIter Maximum iteration count.
-     * @throws NotStrictlyPositiveException if {@code maxIter <= 0}.
-     *
-     * @since 3.1
-     */
-    public SimplePointChecker(final double relativeThreshold,
-                              final double absoluteThreshold,
-                              final int maxIter) {
-        super(relativeThreshold, absoluteThreshold);
-
-        if (maxIter <= 0) {
-            throw new NotStrictlyPositiveException(maxIter);
-        }
-        maxIterationCount = maxIter;
-    }
-
-    /**
-     * Check if the optimization algorithm has converged considering the
-     * last two points.
-     * This method may be called several times from the same algorithm
-     * iteration with different points. This can be detected by checking the
-     * iteration number at each call if needed. Each time this method is
-     * called, the previous and current point correspond to points with the
-     * same role at each iteration, so they can be compared. As an example,
-     * simplex-based algorithms call this method for all points of the simplex,
-     * not only for the best or worst ones.
-     *
-     * @param iteration Index of current iteration
-     * @param previous Best point in the previous iteration.
-     * @param current Best point in the current iteration.
-     * @return {@code true} if the arguments satify the convergence criterion.
-     */
-    @Override
-    public boolean converged(final int iteration,
-                             final PAIR previous,
-                             final PAIR current) {
-        if (maxIterationCount != ITERATION_CHECK_DISABLED && iteration >= maxIterationCount) {
-            return true;
-        }
-
-        final double[] p = previous.getKey();
-        final double[] c = current.getKey();
-        for (int i = 0; i < p.length; ++i) {
-            final double pi = p[i];
-            final double ci = c[i];
-            final double difference = FastMath.abs(pi - ci);
-            final double size = FastMath.max(FastMath.abs(pi), FastMath.abs(ci));
-            if (difference > size * getRelativeThreshold() &&
-                difference > getAbsoluteThreshold()) {
-                return false;
-            }
-        }
-        return true;
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/SimpleValueChecker.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/SimpleValueChecker.java b/src/main/java/org/apache/commons/math4/optimization/SimpleValueChecker.java
deleted file mode 100644
index 45f44ba..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/SimpleValueChecker.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.util.FastMath;
-
-/**
- * Simple implementation of the {@link ConvergenceChecker} interface using
- * only objective function values.
- *
- * Convergence is considered to have been reached if either the relative
- * difference between the objective function values is smaller than a
- * threshold or if either the absolute difference between the objective
- * function values is smaller than another threshold.
- * <br/>
- * The {@link #converged(int,PointValuePair,PointValuePair) converged}
- * method will also return {@code true} if the number of iterations has been set
- * (see {@link #SimpleValueChecker(double,double,int) this constructor}).
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class SimpleValueChecker
-    extends AbstractConvergenceChecker<PointValuePair> {
-    /**
-     * If {@link #maxIterationCount} is set to this value, the number of
-     * iterations will never cause
-     * {@link #converged(int,PointValuePair,PointValuePair)}
-     * to return {@code true}.
-     */
-    private static final int ITERATION_CHECK_DISABLED = -1;
-    /**
-     * Number of iterations after which the
-     * {@link #converged(int,PointValuePair,PointValuePair)} method
-     * will return true (unless the check is disabled).
-     */
-    private final int maxIterationCount;
-
-    /**
-     * Build an instance with default thresholds.
-     * @deprecated See {@link AbstractConvergenceChecker#AbstractConvergenceChecker()}
-     */
-    @Deprecated
-    public SimpleValueChecker() {
-        maxIterationCount = ITERATION_CHECK_DISABLED;
-    }
-
-    /** Build an instance with specified thresholds.
-     *
-     * In order to perform only relative checks, the absolute tolerance
-     * must be set to a negative value. In order to perform only absolute
-     * checks, the relative tolerance must be set to a negative value.
-     *
-     * @param relativeThreshold relative tolerance threshold
-     * @param absoluteThreshold absolute tolerance threshold
-     */
-    public SimpleValueChecker(final double relativeThreshold,
-                              final double absoluteThreshold) {
-        super(relativeThreshold, absoluteThreshold);
-        maxIterationCount = ITERATION_CHECK_DISABLED;
-    }
-
-    /**
-     * Builds an instance with specified thresholds.
-     *
-     * In order to perform only relative checks, the absolute tolerance
-     * must be set to a negative value. In order to perform only absolute
-     * checks, the relative tolerance must be set to a negative value.
-     *
-     * @param relativeThreshold relative tolerance threshold
-     * @param absoluteThreshold absolute tolerance threshold
-     * @param maxIter Maximum iteration count.
-     * @throws NotStrictlyPositiveException if {@code maxIter <= 0}.
-     *
-     * @since 3.1
-     */
-    public SimpleValueChecker(final double relativeThreshold,
-                              final double absoluteThreshold,
-                              final int maxIter) {
-        super(relativeThreshold, absoluteThreshold);
-
-        if (maxIter <= 0) {
-            throw new NotStrictlyPositiveException(maxIter);
-        }
-        maxIterationCount = maxIter;
-    }
-
-    /**
-     * Check if the optimization algorithm has converged considering the
-     * last two points.
-     * This method may be called several time from the same algorithm
-     * iteration with different points. This can be detected by checking the
-     * iteration number at each call if needed. Each time this method is
-     * called, the previous and current point correspond to points with the
-     * same role at each iteration, so they can be compared. As an example,
-     * simplex-based algorithms call this method for all points of the simplex,
-     * not only for the best or worst ones.
-     *
-     * @param iteration Index of current iteration
-     * @param previous Best point in the previous iteration.
-     * @param current Best point in the current iteration.
-     * @return {@code true} if the algorithm has converged.
-     */
-    @Override
-    public boolean converged(final int iteration,
-                             final PointValuePair previous,
-                             final PointValuePair current) {
-        if (maxIterationCount != ITERATION_CHECK_DISABLED && iteration >= maxIterationCount) {
-            return true;
-        }
-
-        final double p = previous.getValue();
-        final double c = current.getValue();
-        final double difference = FastMath.abs(p - c);
-        final double size = FastMath.max(FastMath.abs(p), FastMath.abs(c));
-        return difference <= size * getRelativeThreshold() ||
-            difference <= getAbsoluteThreshold();
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/SimpleVectorValueChecker.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/SimpleVectorValueChecker.java b/src/main/java/org/apache/commons/math4/optimization/SimpleVectorValueChecker.java
deleted file mode 100644
index 8105988..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/SimpleVectorValueChecker.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.util.FastMath;
-
-/**
- * Simple implementation of the {@link ConvergenceChecker} interface using
- * only objective function values.
- *
- * Convergence is considered to have been reached if either the relative
- * difference between the objective function values is smaller than a
- * threshold or if either the absolute difference between the objective
- * function values is smaller than another threshold for all vectors elements.
- * <br/>
- * The {@link #converged(int,PointVectorValuePair,PointVectorValuePair) converged}
- * method will also return {@code true} if the number of iterations has been set
- * (see {@link #SimpleVectorValueChecker(double,double,int) this constructor}).
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class SimpleVectorValueChecker
-    extends AbstractConvergenceChecker<PointVectorValuePair> {
-    /**
-     * If {@link #maxIterationCount} is set to this value, the number of
-     * iterations will never cause
-     * {@link #converged(int,PointVectorValuePair,PointVectorValuePair)}
-     * to return {@code true}.
-     */
-    private static final int ITERATION_CHECK_DISABLED = -1;
-    /**
-     * Number of iterations after which the
-     * {@link #converged(int,PointVectorValuePair,PointVectorValuePair)} method
-     * will return true (unless the check is disabled).
-     */
-    private final int maxIterationCount;
-
-    /**
-     * Build an instance with default thresholds.
-     * @deprecated See {@link AbstractConvergenceChecker#AbstractConvergenceChecker()}
-     */
-    @Deprecated
-    public SimpleVectorValueChecker() {
-        maxIterationCount = ITERATION_CHECK_DISABLED;
-    }
-
-    /**
-     * Build an instance with specified thresholds.
-     *
-     * In order to perform only relative checks, the absolute tolerance
-     * must be set to a negative value. In order to perform only absolute
-     * checks, the relative tolerance must be set to a negative value.
-     *
-     * @param relativeThreshold relative tolerance threshold
-     * @param absoluteThreshold absolute tolerance threshold
-     */
-    public SimpleVectorValueChecker(final double relativeThreshold,
-                                    final double absoluteThreshold) {
-        super(relativeThreshold, absoluteThreshold);
-        maxIterationCount = ITERATION_CHECK_DISABLED;
-    }
-
-    /**
-     * Builds an instance with specified tolerance thresholds and
-     * iteration count.
-     *
-     * In order to perform only relative checks, the absolute tolerance
-     * must be set to a negative value. In order to perform only absolute
-     * checks, the relative tolerance must be set to a negative value.
-     *
-     * @param relativeThreshold Relative tolerance threshold.
-     * @param absoluteThreshold Absolute tolerance threshold.
-     * @param maxIter Maximum iteration count.
-     * @throws NotStrictlyPositiveException if {@code maxIter <= 0}.
-     *
-     * @since 3.1
-     */
-    public SimpleVectorValueChecker(final double relativeThreshold,
-                                    final double absoluteThreshold,
-                                    final int maxIter) {
-        super(relativeThreshold, absoluteThreshold);
-
-        if (maxIter <= 0) {
-            throw new NotStrictlyPositiveException(maxIter);
-        }
-        maxIterationCount = maxIter;
-    }
-
-    /**
-     * Check if the optimization algorithm has converged considering the
-     * last two points.
-     * This method may be called several times from the same algorithm
-     * iteration with different points. This can be detected by checking the
-     * iteration number at each call if needed. Each time this method is
-     * called, the previous and current point correspond to points with the
-     * same role at each iteration, so they can be compared. As an example,
-     * simplex-based algorithms call this method for all points of the simplex,
-     * not only for the best or worst ones.
-     *
-     * @param iteration Index of current iteration
-     * @param previous Best point in the previous iteration.
-     * @param current Best point in the current iteration.
-     * @return {@code true} if the arguments satify the convergence criterion.
-     */
-    @Override
-    public boolean converged(final int iteration,
-                             final PointVectorValuePair previous,
-                             final PointVectorValuePair current) {
-        if (maxIterationCount != ITERATION_CHECK_DISABLED && iteration >= maxIterationCount) {
-            return true;
-        }
-
-        final double[] p = previous.getValueRef();
-        final double[] c = current.getValueRef();
-        for (int i = 0; i < p.length; ++i) {
-            final double pi         = p[i];
-            final double ci         = c[i];
-            final double difference = FastMath.abs(pi - ci);
-            final double size       = FastMath.max(FastMath.abs(pi), FastMath.abs(ci));
-            if (difference > size * getRelativeThreshold() &&
-                difference > getAbsoluteThreshold()) {
-                return false;
-            }
-        }
-        return true;
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/Target.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/Target.java b/src/main/java/org/apache/commons/math4/optimization/Target.java
deleted file mode 100644
index 380d841..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/Target.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-/**
- * Target of the optimization procedure.
- * They are the values which the objective vector function must reproduce
- * When the parameters of the model have been optimized.
- * <br/>
- * Immutable class.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public class Target implements OptimizationData {
-    /** Target values (of the objective vector function). */
-    private final double[] target;
-
-    /**
-     * @param observations Target values.
-     */
-    public Target(double[] observations) {
-        target = observations.clone();
-    }
-
-    /**
-     * Gets the initial guess.
-     *
-     * @return the initial guess.
-     */
-    public double[] getTarget() {
-        return target.clone();
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/Weight.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/Weight.java b/src/main/java/org/apache/commons/math4/optimization/Weight.java
deleted file mode 100644
index e5a3a9e..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/Weight.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization;
-
-import org.apache.commons.math4.linear.DiagonalMatrix;
-import org.apache.commons.math4.linear.NonSquareMatrixException;
-import org.apache.commons.math4.linear.RealMatrix;
-
-/**
- * Weight matrix of the residuals between model and observations.
- * <br/>
- * Immutable class.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public class Weight implements OptimizationData {
-    /** Weight matrix. */
-    private final RealMatrix weightMatrix;
-
-    /**
-     * Creates a diagonal weight matrix.
-     *
-     * @param weight List of the values of the diagonal.
-     */
-    public Weight(double[] weight) {
-        weightMatrix = new DiagonalMatrix(weight);
-    }
-
-    /**
-     * @param weight Weight matrix.
-     * @throws NonSquareMatrixException if the argument is not
-     * a square matrix.
-     */
-    public Weight(RealMatrix weight) {
-        if (weight.getColumnDimension() != weight.getRowDimension()) {
-            throw new NonSquareMatrixException(weight.getColumnDimension(),
-                                               weight.getRowDimension());
-        }
-
-        weightMatrix = weight.copy();
-    }
-
-    /**
-     * Gets the initial guess.
-     *
-     * @return the initial guess.
-     */
-    public RealMatrix getWeight() {
-        return weightMatrix.copy();
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/AbstractSimplex.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/AbstractSimplex.java b/src/main/java/org/apache/commons/math4/optimization/direct/AbstractSimplex.java
deleted file mode 100644
index d30a0c6..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/AbstractSimplex.java
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import java.util.Arrays;
-import java.util.Comparator;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.MathIllegalArgumentException;
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.exception.NullArgumentException;
-import org.apache.commons.math4.exception.OutOfRangeException;
-import org.apache.commons.math4.exception.ZeroException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.optimization.OptimizationData;
-import org.apache.commons.math4.optimization.PointValuePair;
-
-/**
- * This class implements the simplex concept.
- * It is intended to be used in conjunction with {@link SimplexOptimizer}.
- * <br/>
- * The initial configuration of the simplex is set by the constructors
- * {@link #AbstractSimplex(double[])} or {@link #AbstractSimplex(double[][])}.
- * The other {@link #AbstractSimplex(int) constructor} will set all steps
- * to 1, thus building a default configuration from a unit hypercube.
- * <br/>
- * Users <em>must</em> call the {@link #build(double[]) build} method in order
- * to create the data structure that will be acted on by the other methods of
- * this class.
- *
- * @see SimplexOptimizer
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public abstract class AbstractSimplex implements OptimizationData {
-    /** Simplex. */
-    private PointValuePair[] simplex;
-    /** Start simplex configuration. */
-    private double[][] startConfiguration;
-    /** Simplex dimension (must be equal to {@code simplex.length - 1}). */
-    private final int dimension;
-
-    /**
-     * Build a unit hypercube simplex.
-     *
-     * @param n Dimension of the simplex.
-     */
-    protected AbstractSimplex(int n) {
-        this(n, 1d);
-    }
-
-    /**
-     * Build a hypercube simplex with the given side length.
-     *
-     * @param n Dimension of the simplex.
-     * @param sideLength Length of the sides of the hypercube.
-     */
-    protected AbstractSimplex(int n,
-                              double sideLength) {
-        this(createHypercubeSteps(n, sideLength));
-    }
-
-    /**
-     * The start configuration for simplex is built from a box parallel to
-     * the canonical axes of the space. The simplex is the subset of vertices
-     * of a box parallel to the canonical axes. It is built as the path followed
-     * while traveling from one vertex of the box to the diagonally opposite
-     * vertex moving only along the box edges. The first vertex of the box will
-     * be located at the start point of the optimization.
-     * As an example, in dimension 3 a simplex has 4 vertices. Setting the
-     * steps to (1, 10, 2) and the start point to (1, 1, 1) would imply the
-     * start simplex would be: { (1, 1, 1), (2, 1, 1), (2, 11, 1), (2, 11, 3) }.
-     * The first vertex would be set to the start point at (1, 1, 1) and the
-     * last vertex would be set to the diagonally opposite vertex at (2, 11, 3).
-     *
-     * @param steps Steps along the canonical axes representing box edges. They
-     * may be negative but not zero.
-     * @throws NullArgumentException if {@code steps} is {@code null}.
-     * @throws ZeroException if one of the steps is zero.
-     */
-    protected AbstractSimplex(final double[] steps) {
-        if (steps == null) {
-            throw new NullArgumentException();
-        }
-        if (steps.length == 0) {
-            throw new ZeroException();
-        }
-        dimension = steps.length;
-
-        // Only the relative position of the n final vertices with respect
-        // to the first one are stored.
-        startConfiguration = new double[dimension][dimension];
-        for (int i = 0; i < dimension; i++) {
-            final double[] vertexI = startConfiguration[i];
-            for (int j = 0; j < i + 1; j++) {
-                if (steps[j] == 0) {
-                    throw new ZeroException(LocalizedFormats.EQUAL_VERTICES_IN_SIMPLEX);
-                }
-                System.arraycopy(steps, 0, vertexI, 0, j + 1);
-            }
-        }
-    }
-
-    /**
-     * The real initial simplex will be set up by moving the reference
-     * simplex such that its first point is located at the start point of the
-     * optimization.
-     *
-     * @param referenceSimplex Reference simplex.
-     * @throws NotStrictlyPositiveException if the reference simplex does not
-     * contain at least one point.
-     * @throws DimensionMismatchException if there is a dimension mismatch
-     * in the reference simplex.
-     * @throws IllegalArgumentException if one of its vertices is duplicated.
-     */
-    protected AbstractSimplex(final double[][] referenceSimplex) {
-        if (referenceSimplex.length <= 0) {
-            throw new NotStrictlyPositiveException(LocalizedFormats.SIMPLEX_NEED_ONE_POINT,
-                                                   referenceSimplex.length);
-        }
-        dimension = referenceSimplex.length - 1;
-
-        // Only the relative position of the n final vertices with respect
-        // to the first one are stored.
-        startConfiguration = new double[dimension][dimension];
-        final double[] ref0 = referenceSimplex[0];
-
-        // Loop over vertices.
-        for (int i = 0; i < referenceSimplex.length; i++) {
-            final double[] refI = referenceSimplex[i];
-
-            // Safety checks.
-            if (refI.length != dimension) {
-                throw new DimensionMismatchException(refI.length, dimension);
-            }
-            for (int j = 0; j < i; j++) {
-                final double[] refJ = referenceSimplex[j];
-                boolean allEquals = true;
-                for (int k = 0; k < dimension; k++) {
-                    if (refI[k] != refJ[k]) {
-                        allEquals = false;
-                        break;
-                    }
-                }
-                if (allEquals) {
-                    throw new MathIllegalArgumentException(LocalizedFormats.EQUAL_VERTICES_IN_SIMPLEX,
-                                                           i, j);
-                }
-            }
-
-            // Store vertex i position relative to vertex 0 position.
-            if (i > 0) {
-                final double[] confI = startConfiguration[i - 1];
-                for (int k = 0; k < dimension; k++) {
-                    confI[k] = refI[k] - ref0[k];
-                }
-            }
-        }
-    }
-
-    /**
-     * Get simplex dimension.
-     *
-     * @return the dimension of the simplex.
-     */
-    public int getDimension() {
-        return dimension;
-    }
-
-    /**
-     * Get simplex size.
-     * After calling the {@link #build(double[]) build} method, this method will
-     * will be equivalent to {@code getDimension() + 1}.
-     *
-     * @return the size of the simplex.
-     */
-    public int getSize() {
-        return simplex.length;
-    }
-
-    /**
-     * Compute the next simplex of the algorithm.
-     *
-     * @param evaluationFunction Evaluation function.
-     * @param comparator Comparator to use to sort simplex vertices from best
-     * to worst.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the algorithm fails to converge.
-     */
-    public abstract void iterate(final MultivariateFunction evaluationFunction,
-                                 final Comparator<PointValuePair> comparator);
-
-    /**
-     * Build an initial simplex.
-     *
-     * @param startPoint First point of the simplex.
-     * @throws DimensionMismatchException if the start point does not match
-     * simplex dimension.
-     */
-    public void build(final double[] startPoint) {
-        if (dimension != startPoint.length) {
-            throw new DimensionMismatchException(dimension, startPoint.length);
-        }
-
-        // Set first vertex.
-        simplex = new PointValuePair[dimension + 1];
-        simplex[0] = new PointValuePair(startPoint, Double.NaN);
-
-        // Set remaining vertices.
-        for (int i = 0; i < dimension; i++) {
-            final double[] confI = startConfiguration[i];
-            final double[] vertexI = new double[dimension];
-            for (int k = 0; k < dimension; k++) {
-                vertexI[k] = startPoint[k] + confI[k];
-            }
-            simplex[i + 1] = new PointValuePair(vertexI, Double.NaN);
-        }
-    }
-
-    /**
-     * Evaluate all the non-evaluated points of the simplex.
-     *
-     * @param evaluationFunction Evaluation function.
-     * @param comparator Comparator to use to sort simplex vertices from best to worst.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     */
-    public void evaluate(final MultivariateFunction evaluationFunction,
-                         final Comparator<PointValuePair> comparator) {
-        // Evaluate the objective function at all non-evaluated simplex points.
-        for (int i = 0; i < simplex.length; i++) {
-            final PointValuePair vertex = simplex[i];
-            final double[] point = vertex.getPointRef();
-            if (Double.isNaN(vertex.getValue())) {
-                simplex[i] = new PointValuePair(point, evaluationFunction.value(point), false);
-            }
-        }
-
-        // Sort the simplex from best to worst.
-        Arrays.sort(simplex, comparator);
-    }
-
-    /**
-     * Replace the worst point of the simplex by a new point.
-     *
-     * @param pointValuePair Point to insert.
-     * @param comparator Comparator to use for sorting the simplex vertices
-     * from best to worst.
-     */
-    protected void replaceWorstPoint(PointValuePair pointValuePair,
-                                     final Comparator<PointValuePair> comparator) {
-        for (int i = 0; i < dimension; i++) {
-            if (comparator.compare(simplex[i], pointValuePair) > 0) {
-                PointValuePair tmp = simplex[i];
-                simplex[i] = pointValuePair;
-                pointValuePair = tmp;
-            }
-        }
-        simplex[dimension] = pointValuePair;
-    }
-
-    /**
-     * Get the points of the simplex.
-     *
-     * @return all the simplex points.
-     */
-    public PointValuePair[] getPoints() {
-        final PointValuePair[] copy = new PointValuePair[simplex.length];
-        System.arraycopy(simplex, 0, copy, 0, simplex.length);
-        return copy;
-    }
-
-    /**
-     * Get the simplex point stored at the requested {@code index}.
-     *
-     * @param index Location.
-     * @return the point at location {@code index}.
-     */
-    public PointValuePair getPoint(int index) {
-        if (index < 0 ||
-            index >= simplex.length) {
-            throw new OutOfRangeException(index, 0, simplex.length - 1);
-        }
-        return simplex[index];
-    }
-
-    /**
-     * Store a new point at location {@code index}.
-     * Note that no deep-copy of {@code point} is performed.
-     *
-     * @param index Location.
-     * @param point New value.
-     */
-    protected void setPoint(int index, PointValuePair point) {
-        if (index < 0 ||
-            index >= simplex.length) {
-            throw new OutOfRangeException(index, 0, simplex.length - 1);
-        }
-        simplex[index] = point;
-    }
-
-    /**
-     * Replace all points.
-     * Note that no deep-copy of {@code points} is performed.
-     *
-     * @param points New Points.
-     */
-    protected void setPoints(PointValuePair[] points) {
-        if (points.length != simplex.length) {
-            throw new DimensionMismatchException(points.length, simplex.length);
-        }
-        simplex = points;
-    }
-
-    /**
-     * Create steps for a unit hypercube.
-     *
-     * @param n Dimension of the hypercube.
-     * @param sideLength Length of the sides of the hypercube.
-     * @return the steps.
-     */
-    private static double[] createHypercubeSteps(int n,
-                                                 double sideLength) {
-        final double[] steps = new double[n];
-        for (int i = 0; i < n; i++) {
-            steps[i] = sideLength;
-        }
-        return steps;
-    }
-}


[05/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/CircleVectorial.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/CircleVectorial.java b/src/test/java/org/apache/commons/math4/optimization/general/CircleVectorial.java
deleted file mode 100644
index b0f4da5..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/CircleVectorial.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import java.util.ArrayList;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
-
-/**
- * Class used in the tests.
- */
-@Deprecated
-class CircleVectorial implements MultivariateDifferentiableVectorFunction {
-    private ArrayList<Vector2D> points;
-
-    public CircleVectorial() {
-        points  = new ArrayList<Vector2D>();
-    }
-
-    public void addPoint(double px, double py) {
-        points.add(new Vector2D(px, py));
-    }
-
-    public int getN() {
-        return points.size();
-    }
-
-    public double getRadius(Vector2D center) {
-        double r = 0;
-        for (Vector2D point : points) {
-            r += point.distance(center);
-        }
-        return r / points.size();
-    }
-
-    private DerivativeStructure distance(Vector2D point,
-                                         DerivativeStructure cx, DerivativeStructure cy) {
-        DerivativeStructure dx = cx.subtract(point.getX());
-        DerivativeStructure dy = cy.subtract(point.getY());
-        return dx.multiply(dx).add(dy.multiply(dy)).sqrt();
-    }
-
-    public DerivativeStructure getRadius(DerivativeStructure cx, DerivativeStructure cy) {
-        DerivativeStructure r = cx.getField().getZero();
-        for (Vector2D point : points) {
-            r = r.add(distance(point, cx, cy));
-        }
-        return r.divide(points.size());
-    }
-
-    public double[] value(double[] variables) {
-        Vector2D center = new Vector2D(variables[0], variables[1]);
-        double radius = getRadius(center);
-
-        double[] residuals = new double[points.size()];
-        for (int i = 0; i < residuals.length; ++i) {
-            residuals[i] = points.get(i).distance(center) - radius;
-        }
-
-        return residuals;
-    }
-
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure radius = getRadius(variables[0], variables[1]);
-
-        DerivativeStructure[] residuals = new DerivativeStructure[points.size()];
-        for (int i = 0; i < residuals.length; ++i) {
-            residuals[i] = distance(points.get(i), variables[0], variables[1]).subtract(radius);
-        }
-
-        return residuals;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/GaussNewtonOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/GaussNewtonOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/general/GaussNewtonOptimizerTest.java
deleted file mode 100644
index 88e2f3a..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/GaussNewtonOptimizerTest.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import java.io.IOException;
-
-import org.apache.commons.math4.exception.ConvergenceException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.optimization.SimpleVectorValueChecker;
-import org.apache.commons.math4.optimization.general.AbstractLeastSquaresOptimizer;
-import org.apache.commons.math4.optimization.general.GaussNewtonOptimizer;
-import org.junit.Test;
-
-/**
- * <p>Some of the unit tests are re-implementations of the MINPACK <a
- * href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
- * href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
- * The redistribution policy for MINPACK is available <a
- * href="http://www.netlib.org/minpack/disclaimer">here</a>, for
- * convenience, it is reproduced below.</p>
-
- * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
- * <tr><td>
- *    Minpack Copyright Notice (1999) University of Chicago.
- *    All rights reserved
- * </td></tr>
- * <tr><td>
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * <ol>
- *  <li>Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.</li>
- * <li>Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials provided
- *     with the distribution.</li>
- * <li>The end-user documentation included with the redistribution, if any,
- *     must include the following acknowledgment:
- *     <code>This product includes software developed by the University of
- *           Chicago, as Operator of Argonne National Laboratory.</code>
- *     Alternately, this acknowledgment may appear in the software itself,
- *     if and wherever such third-party acknowledgments normally appear.</li>
- * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
- *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
- *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
- *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
- *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
- *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
- *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
- *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
- *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
- *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
- *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
- *     BE CORRECTED.</strong></li>
- * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
- *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
- *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
- *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
- *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
- *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
- *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
- *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
- *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
- *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
- * <ol></td></tr>
- * </table>
-
- * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
- * @author Burton S. Garbow (original fortran minpack tests)
- * @author Kenneth E. Hillstrom (original fortran minpack tests)
- * @author Jorge J. More (original fortran minpack tests)
- * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
- */
-@Deprecated
-public class GaussNewtonOptimizerTest
-    extends AbstractLeastSquaresOptimizerAbstractTest {
-
-    @Override
-    public AbstractLeastSquaresOptimizer createOptimizer() {
-        return new GaussNewtonOptimizer(new SimpleVectorValueChecker(1.0e-6, 1.0e-6));
-    }
-
-    @Override
-    @Test(expected = ConvergenceException.class)
-    public void testMoreEstimatedParametersSimple() {
-        /*
-         * Exception is expected with this optimizer
-         */
-        super.testMoreEstimatedParametersSimple();
-    }
-
-    @Override
-    @Test(expected=ConvergenceException.class)
-    public void testMoreEstimatedParametersUnsorted() {
-        /*
-         * Exception is expected with this optimizer
-         */
-        super.testMoreEstimatedParametersUnsorted();
-    }
-
-    @Test(expected=TooManyEvaluationsException.class)
-    public void testMaxEvaluations() throws Exception {
-        CircleVectorial circle = new CircleVectorial();
-        circle.addPoint( 30.0,  68.0);
-        circle.addPoint( 50.0,  -6.0);
-        circle.addPoint(110.0, -20.0);
-        circle.addPoint( 35.0,  15.0);
-        circle.addPoint( 45.0,  97.0);
-
-        GaussNewtonOptimizer optimizer
-            = new GaussNewtonOptimizer(new SimpleVectorValueChecker(1.0e-30, 1.0e-30));
-
-        optimizer.optimize(100, circle, new double[] { 0, 0, 0, 0, 0 },
-                           new double[] { 1, 1, 1, 1, 1 },
-                           new double[] { 98.680, 47.345 });
-    }
-
-    @Override
-    @Test(expected=ConvergenceException.class)
-    public void testCircleFittingBadInit() {
-        /*
-         * This test does not converge with this optimizer.
-         */
-        super.testCircleFittingBadInit();
-    }
-
-    @Override
-    @Test(expected = ConvergenceException.class)
-    public void testHahn1()
-        throws IOException {
-        /*
-         * TODO This test leads to a singular problem with the Gauss-Newton
-         * optimizer. This should be inquired.
-         */
-        super.testHahn1();
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/LevenbergMarquardtOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/LevenbergMarquardtOptimizerTest.java b/src/test/java/org/apache/commons/math4/optimization/general/LevenbergMarquardtOptimizerTest.java
deleted file mode 100644
index da77546..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/LevenbergMarquardtOptimizerTest.java
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.exception.ConvergenceException;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
-import org.apache.commons.math4.linear.SingularMatrixException;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.general.AbstractLeastSquaresOptimizer;
-import org.apache.commons.math4.optimization.general.LevenbergMarquardtOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.Precision;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.Ignore;
-
-/**
- * <p>Some of the unit tests are re-implementations of the MINPACK <a
- * href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
- * href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
- * The redistribution policy for MINPACK is available <a
- * href="http://www.netlib.org/minpack/disclaimer">here</a>, for
- * convenience, it is reproduced below.</p>
-
- * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
- * <tr><td>
- *    Minpack Copyright Notice (1999) University of Chicago.
- *    All rights reserved
- * </td></tr>
- * <tr><td>
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * <ol>
- *  <li>Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.</li>
- * <li>Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials provided
- *     with the distribution.</li>
- * <li>The end-user documentation included with the redistribution, if any,
- *     must include the following acknowledgment:
- *     <code>This product includes software developed by the University of
- *           Chicago, as Operator of Argonne National Laboratory.</code>
- *     Alternately, this acknowledgment may appear in the software itself,
- *     if and wherever such third-party acknowledgments normally appear.</li>
- * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
- *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
- *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
- *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
- *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
- *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
- *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
- *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
- *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
- *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
- *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
- *     BE CORRECTED.</strong></li>
- * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
- *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
- *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
- *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
- *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
- *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
- *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
- *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
- *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
- *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
- * <ol></td></tr>
- * </table>
-
- * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
- * @author Burton S. Garbow (original fortran minpack tests)
- * @author Kenneth E. Hillstrom (original fortran minpack tests)
- * @author Jorge J. More (original fortran minpack tests)
- * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
- */
-@Deprecated
-public class LevenbergMarquardtOptimizerTest extends AbstractLeastSquaresOptimizerAbstractTest {
-
-    @Override
-    public AbstractLeastSquaresOptimizer createOptimizer() {
-        return new LevenbergMarquardtOptimizer();
-    }
-
-    @Override
-    @Test(expected=SingularMatrixException.class)
-    public void testNonInvertible() {
-        /*
-         * Overrides the method from parent class, since the default singularity
-         * threshold (1e-14) does not trigger the expected exception.
-         */
-        LinearProblem problem = new LinearProblem(new double[][] {
-                {  1, 2, -3 },
-                {  2, 1,  3 },
-                { -3, 0, -9 }
-        }, new double[] { 1, 1, 1 });
-
-        AbstractLeastSquaresOptimizer optimizer = createOptimizer();
-        PointVectorValuePair optimum = optimizer.optimize(100, problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0, 0 });
-        Assert.assertTrue(FastMath.sqrt(problem.target.length) * optimizer.getRMS() > 0.6);
-
-        optimizer.computeCovariances(optimum.getPoint(), 1.5e-14);
-    }
-
-    @Test
-    public void testControlParameters() {
-        CircleVectorial circle = new CircleVectorial();
-        circle.addPoint( 30.0,  68.0);
-        circle.addPoint( 50.0,  -6.0);
-        circle.addPoint(110.0, -20.0);
-        circle.addPoint( 35.0,  15.0);
-        circle.addPoint( 45.0,  97.0);
-        checkEstimate(circle, 0.1, 10, 1.0e-14, 1.0e-16, 1.0e-10, false);
-        checkEstimate(circle, 0.1, 10, 1.0e-15, 1.0e-17, 1.0e-10, true);
-        checkEstimate(circle, 0.1,  5, 1.0e-15, 1.0e-16, 1.0e-10, true);
-        circle.addPoint(300, -300);
-        checkEstimate(circle, 0.1, 20, 1.0e-18, 1.0e-16, 1.0e-10, true);
-    }
-
-    private void checkEstimate(MultivariateDifferentiableVectorFunction problem,
-                               double initialStepBoundFactor, int maxCostEval,
-                               double costRelativeTolerance, double parRelativeTolerance,
-                               double orthoTolerance, boolean shouldFail) {
-        try {
-            LevenbergMarquardtOptimizer optimizer
-                = new LevenbergMarquardtOptimizer(initialStepBoundFactor,
-                                                  costRelativeTolerance,
-                                                  parRelativeTolerance,
-                                                  orthoTolerance,
-                                                  Precision.SAFE_MIN);
-            optimizer.optimize(maxCostEval, problem, new double[] { 0, 0, 0, 0, 0 },
-                               new double[] { 1, 1, 1, 1, 1 },
-                               new double[] { 98.680, 47.345 });
-            Assert.assertTrue(!shouldFail);
-        } catch (DimensionMismatchException ee) {
-            Assert.assertTrue(shouldFail);
-        } catch (TooManyEvaluationsException ee) {
-            Assert.assertTrue(shouldFail);
-        }
-    }
-
-    // Test is skipped because it fails with the latest code update.
-    @Ignore@Test
-    public void testMath199() {
-        try {
-            QuadraticProblem problem = new QuadraticProblem();
-            problem.addPoint (0, -3.182591015485607);
-            problem.addPoint (1, -2.5581184967730577);
-            problem.addPoint (2, -2.1488478161387325);
-            problem.addPoint (3, -1.9122489313410047);
-            problem.addPoint (4, 1.7785661310051026);
-            LevenbergMarquardtOptimizer optimizer
-                = new LevenbergMarquardtOptimizer(100, 1e-10, 1e-10, 1e-10, 0);
-            optimizer.optimize(100, problem,
-                               new double[] { 0, 0, 0, 0, 0 },
-                               new double[] { 0.0, 4.4e-323, 1.0, 4.4e-323, 0.0 },
-                               new double[] { 0, 0, 0 });
-            Assert.fail("an exception should have been thrown");
-        } catch (ConvergenceException ee) {
-            // expected behavior
-        }
-    }
-
-    /**
-     * Non-linear test case: fitting of decay curve (from Chapter 8 of
-     * Bevington's textbook, "Data reduction and analysis for the physical sciences").
-     * XXX The expected ("reference") values may not be accurate and the tolerance too
-     * relaxed for this test to be currently really useful (the issue is under
-     * investigation).
-     */
-    @Test
-    public void testBevington() {
-        final double[][] dataPoints = {
-            // column 1 = times
-            { 15, 30, 45, 60, 75, 90, 105, 120, 135, 150,
-              165, 180, 195, 210, 225, 240, 255, 270, 285, 300,
-              315, 330, 345, 360, 375, 390, 405, 420, 435, 450,
-              465, 480, 495, 510, 525, 540, 555, 570, 585, 600,
-              615, 630, 645, 660, 675, 690, 705, 720, 735, 750,
-              765, 780, 795, 810, 825, 840, 855, 870, 885, },
-            // column 2 = measured counts
-            { 775, 479, 380, 302, 185, 157, 137, 119, 110, 89,
-              74, 61, 66, 68, 48, 54, 51, 46, 55, 29,
-              28, 37, 49, 26, 35, 29, 31, 24, 25, 35,
-              24, 30, 26, 28, 21, 18, 20, 27, 17, 17,
-              14, 17, 24, 11, 22, 17, 12, 10, 13, 16,
-              9, 9, 14, 21, 17, 13, 12, 18, 10, },
-        };
-
-        final BevingtonProblem problem = new BevingtonProblem();
-
-        final int len = dataPoints[0].length;
-        final double[] weights = new double[len];
-        for (int i = 0; i < len; i++) {
-            problem.addPoint(dataPoints[0][i],
-                             dataPoints[1][i]);
-
-            weights[i] = 1 / dataPoints[1][i];
-        }
-
-        final LevenbergMarquardtOptimizer optimizer
-            = new LevenbergMarquardtOptimizer();
-
-        final PointVectorValuePair optimum
-            = optimizer.optimize(100, problem, dataPoints[1], weights,
-                               new double[] { 10, 900, 80, 27, 225 });
-
-        final double[] solution = optimum.getPoint();
-        final double[] expectedSolution = { 10.4, 958.3, 131.4, 33.9, 205.0 };
-
-        final double[][] covarMatrix = optimizer.computeCovariances(solution, 1e-14);
-        final double[][] expectedCovarMatrix = {
-            { 3.38, -3.69, 27.98, -2.34, -49.24 },
-            { -3.69, 2492.26, 81.89, -69.21, -8.9 },
-            { 27.98, 81.89, 468.99, -44.22, -615.44 },
-            { -2.34, -69.21, -44.22, 6.39, 53.80 },
-            { -49.24, -8.9, -615.44, 53.8, 929.45 }
-        };
-
-        final int numParams = expectedSolution.length;
-
-        // Check that the computed solution is within the reference error range.
-        for (int i = 0; i < numParams; i++) {
-            final double error = FastMath.sqrt(expectedCovarMatrix[i][i]);
-            Assert.assertEquals("Parameter " + i, expectedSolution[i], solution[i], error);
-        }
-
-        // Check that each entry of the computed covariance matrix is within 10%
-        // of the reference matrix entry.
-        for (int i = 0; i < numParams; i++) {
-            for (int j = 0; j < numParams; j++) {
-                Assert.assertEquals("Covariance matrix [" + i + "][" + j + "]",
-                                    expectedCovarMatrix[i][j],
-                                    covarMatrix[i][j],
-                                    FastMath.abs(0.1 * expectedCovarMatrix[i][j]));
-            }
-        }
-    }
-
-    @Test
-    public void testCircleFitting2() {
-        final double xCenter = 123.456;
-        final double yCenter = 654.321;
-        final double xSigma = 10;
-        final double ySigma = 15;
-        final double radius = 111.111;
-        // The test is extremely sensitive to the seed.
-        final long seed = 59421061L;
-        final RandomCirclePointGenerator factory
-            = new RandomCirclePointGenerator(xCenter, yCenter, radius,
-                                             xSigma, ySigma,
-                                             seed);
-        final CircleProblem circle = new CircleProblem(xSigma, ySigma);
-
-        final int numPoints = 10;
-        for (Vector2D p : factory.generate(numPoints)) {
-            circle.addPoint(p);
-            // System.out.println(p.x + " " + p.y);
-        }
-
-        // First guess for the center's coordinates and radius.
-        final double[] init = { 90, 659, 115 };
-
-        final LevenbergMarquardtOptimizer optimizer
-            = new LevenbergMarquardtOptimizer();
-        final PointVectorValuePair optimum = optimizer.optimize(100, circle,
-                                                                circle.target(), circle.weight(),
-                                                                init);
-
-        final double[] paramFound = optimum.getPoint();
-
-        // Retrieve errors estimation.
-        final double[][] covMatrix = optimizer.computeCovariances(paramFound, 1e-14);
-        final double[] asymptoticStandardErrorFound = optimizer.guessParametersErrors();
-        final double[] sigmaFound = new double[covMatrix.length];
-        for (int i = 0; i < covMatrix.length; i++) {
-            sigmaFound[i] = FastMath.sqrt(covMatrix[i][i]);
-//             System.out.println("i=" + i + " value=" + paramFound[i]
-//                                + " sigma=" + sigmaFound[i]
-//                                + " ase=" + asymptoticStandardErrorFound[i]);
-        }
-
-        // System.out.println("chi2=" + optimizer.getChiSquare());
-
-        // Check that the parameters are found within the assumed error bars.
-        Assert.assertEquals(xCenter, paramFound[0], asymptoticStandardErrorFound[0]);
-        Assert.assertEquals(yCenter, paramFound[1], asymptoticStandardErrorFound[1]);
-        Assert.assertEquals(radius, paramFound[2], asymptoticStandardErrorFound[2]);
-    }
-
-    private static class QuadraticProblem implements MultivariateDifferentiableVectorFunction, Serializable {
-
-        private static final long serialVersionUID = 7072187082052755854L;
-        private List<Double> x;
-        private List<Double> y;
-
-        public QuadraticProblem() {
-            x = new ArrayList<Double>();
-            y = new ArrayList<Double>();
-        }
-
-        public void addPoint(double x, double y) {
-            this.x.add(x);
-            this.y.add(y);
-        }
-
-        public double[] value(double[] variables) {
-            double[] values = new double[x.size()];
-            for (int i = 0; i < values.length; ++i) {
-                values[i] = (variables[0] * x.get(i) + variables[1]) * x.get(i) + variables[2];
-            }
-            return values;
-        }
-
-        public DerivativeStructure[] value(DerivativeStructure[] variables) {
-            DerivativeStructure[] values = new DerivativeStructure[x.size()];
-            for (int i = 0; i < values.length; ++i) {
-                values[i] = (variables[0].multiply(x.get(i)).add(variables[1])).multiply(x.get(i)).add(variables[2]);
-            }
-            return values;
-        }
-
-    }
-
-    private static class BevingtonProblem
-        implements MultivariateDifferentiableVectorFunction {
-        private List<Double> time;
-        private List<Double> count;
-
-        public BevingtonProblem() {
-            time = new ArrayList<Double>();
-            count = new ArrayList<Double>();
-        }
-
-        public void addPoint(double t, double c) {
-            time.add(t);
-            count.add(c);
-        }
-
-        public double[] value(double[] params) {
-            double[] values = new double[time.size()];
-            for (int i = 0; i < values.length; ++i) {
-                final double t = time.get(i);
-                values[i] = params[0]
-                    + params[1] * FastMath.exp(-t / params[3])
-                    + params[2] * FastMath.exp(-t / params[4]);
-            }
-            return values;
-        }
-
-        public DerivativeStructure[] value(DerivativeStructure[] params) {
-            DerivativeStructure[] values = new DerivativeStructure[time.size()];
-            for (int i = 0; i < values.length; ++i) {
-                final double t = time.get(i);
-                values[i] = params[0].add(
-                    params[1].multiply(params[3].reciprocal().multiply(-t).exp())).add(
-                    params[2].multiply(params[4].reciprocal().multiply(-t).exp()));
-            }
-            return values;
-        }
-
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/test/java/org/apache/commons/math4/optimization/general/MinpackTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/optimization/general/MinpackTest.java b/src/test/java/org/apache/commons/math4/optimization/general/MinpackTest.java
deleted file mode 100644
index 50440c3..0000000
--- a/src/test/java/org/apache/commons/math4/optimization/general/MinpackTest.java
+++ /dev/null
@@ -1,1212 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.general;
-
-import java.io.Serializable;
-import java.util.Arrays;
-
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.general.LevenbergMarquardtOptimizer;
-import org.apache.commons.math4.util.FastMath;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * <p>Some of the unit tests are re-implementations of the MINPACK <a
- * href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
- * href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
- * The redistribution policy for MINPACK is available <a
- * href="http://www.netlib.org/minpack/disclaimer">here</a>, for
- * convenience, it is reproduced below.</p>
-
- * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
- * <tr><td>
- *    Minpack Copyright Notice (1999) University of Chicago.
- *    All rights reserved
- * </td></tr>
- * <tr><td>
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * <ol>
- *  <li>Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.</li>
- * <li>Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials provided
- *     with the distribution.</li>
- * <li>The end-user documentation included with the redistribution, if any,
- *     must include the following acknowledgment:
- *     <code>This product includes software developed by the University of
- *           Chicago, as Operator of Argonne National Laboratory.</code>
- *     Alternately, this acknowledgment may appear in the software itself,
- *     if and wherever such third-party acknowledgments normally appear.</li>
- * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
- *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
- *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
- *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
- *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
- *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
- *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
- *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
- *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
- *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
- *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
- *     BE CORRECTED.</strong></li>
- * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
- *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
- *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
- *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
- *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
- *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
- *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
- *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
- *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
- *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
- * <ol></td></tr>
- * </table>
-
- * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
- * @author Burton S. Garbow (original fortran minpack tests)
- * @author Kenneth E. Hillstrom (original fortran minpack tests)
- * @author Jorge J. More (original fortran minpack tests)
- * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
- */
-@Deprecated
-public class MinpackTest {
-
-  @Test
-  public void testMinpackLinearFullRank() {
-    minpackTest(new LinearFullRankFunction(10, 5, 1.0,
-                                           5.0, 2.23606797749979), false);
-    minpackTest(new LinearFullRankFunction(50, 5, 1.0,
-                                           8.06225774829855, 6.70820393249937), false);
-  }
-
-  @Test
-  public void testMinpackLinearRank1() {
-    minpackTest(new LinearRank1Function(10, 5, 1.0,
-                                        291.521868819476, 1.4638501094228), false);
-    minpackTest(new LinearRank1Function(50, 5, 1.0,
-                                        3101.60039334535, 3.48263016573496), false);
-  }
-
-  @Test
-  public void testMinpackLinearRank1ZeroColsAndRows() {
-    minpackTest(new LinearRank1ZeroColsAndRowsFunction(10, 5, 1.0), false);
-    minpackTest(new LinearRank1ZeroColsAndRowsFunction(50, 5, 1.0), false);
-  }
-
-  @Test
-  public void testMinpackRosenbrok() {
-    minpackTest(new RosenbrockFunction(new double[] { -1.2, 1.0 },
-                                       FastMath.sqrt(24.2)), false);
-    minpackTest(new RosenbrockFunction(new double[] { -12.0, 10.0 },
-                                       FastMath.sqrt(1795769.0)), false);
-    minpackTest(new RosenbrockFunction(new double[] { -120.0, 100.0 },
-                                       11.0 * FastMath.sqrt(169000121.0)), false);
-  }
-
-  @Test
-  public void testMinpackHelicalValley() {
-    minpackTest(new HelicalValleyFunction(new double[] { -1.0, 0.0, 0.0 },
-                                          50.0), false);
-    minpackTest(new HelicalValleyFunction(new double[] { -10.0, 0.0, 0.0 },
-                                          102.95630140987), false);
-    minpackTest(new HelicalValleyFunction(new double[] { -100.0, 0.0, 0.0},
-                                          991.261822123701), false);
-  }
-
-  @Test
-  public void testMinpackPowellSingular() {
-    minpackTest(new PowellSingularFunction(new double[] { 3.0, -1.0, 0.0, 1.0 },
-                                           14.6628782986152), false);
-    minpackTest(new PowellSingularFunction(new double[] { 30.0, -10.0, 0.0, 10.0 },
-                                           1270.9838708654), false);
-    minpackTest(new PowellSingularFunction(new double[] { 300.0, -100.0, 0.0, 100.0 },
-                                           126887.903284750), false);
-  }
-
-  @Test
-  public void testMinpackFreudensteinRoth() {
-    minpackTest(new FreudensteinRothFunction(new double[] { 0.5, -2.0 },
-                                             20.0124960961895, 6.99887517584575,
-                                             new double[] {
-                                               11.4124844654993,
-                                               -0.896827913731509
-                                             }), false);
-    minpackTest(new FreudensteinRothFunction(new double[] { 5.0, -20.0 },
-                                             12432.833948863, 6.9988751744895,
-                                             new double[] {
-                                                11.41300466147456,
-                                                -0.896796038685959
-                                             }), false);
-    minpackTest(new FreudensteinRothFunction(new double[] { 50.0, -200.0 },
-                                             11426454.595762, 6.99887517242903,
-                                             new double[] {
-                                                 11.412781785788564,
-                                                 -0.8968051074920405
-                                             }), false);
-  }
-
-  @Test
-  public void testMinpackBard() {
-    minpackTest(new BardFunction(1.0, 6.45613629515967, 0.0906359603390466,
-                                 new double[] {
-                                   0.0824105765758334,
-                                   1.1330366534715,
-                                   2.34369463894115
-                                 }), false);
-    minpackTest(new BardFunction(10.0, 36.1418531596785, 4.17476870138539,
-                                 new double[] {
-                                   0.840666673818329,
-                                   -158848033.259565,
-                                   -164378671.653535
-                                 }), false);
-    minpackTest(new BardFunction(100.0, 384.114678637399, 4.17476870135969,
-                                 new double[] {
-                                   0.840666673867645,
-                                   -158946167.205518,
-                                   -164464906.857771
-                                 }), false);
-  }
-
-  @Test
-  public void testMinpackKowalikOsborne() {
-    minpackTest(new KowalikOsborneFunction(new double[] { 0.25, 0.39, 0.415, 0.39 },
-                                           0.0728915102882945,
-                                           0.017535837721129,
-                                           new double[] {
-                                             0.192807810476249,
-                                             0.191262653354071,
-                                             0.123052801046931,
-                                             0.136053221150517
-                                           }), false);
-    minpackTest(new KowalikOsborneFunction(new double[] { 2.5, 3.9, 4.15, 3.9 },
-                                           2.97937007555202,
-                                           0.032052192917937,
-                                           new double[] {
-                                             728675.473768287,
-                                             -14.0758803129393,
-                                             -32977797.7841797,
-                                             -20571594.1977912
-                                           }), false);
-    minpackTest(new KowalikOsborneFunction(new double[] { 25.0, 39.0, 41.5, 39.0 },
-                                           29.9590617016037,
-                                           0.0175364017658228,
-                                           new double[] {
-                                             0.192948328597594,
-                                             0.188053165007911,
-                                             0.122430604321144,
-                                             0.134575665392506
-                                           }), false);
-  }
-
-  @Test
-  public void testMinpackMeyer() {
-    minpackTest(new MeyerFunction(new double[] { 0.02, 4000.0, 250.0 },
-                                  41153.4665543031, 9.37794514651874,
-                                  new double[] {
-                                    0.00560963647102661,
-                                    6181.34634628659,
-                                    345.223634624144
-                                  }), false);
-    minpackTest(new MeyerFunction(new double[] { 0.2, 40000.0, 2500.0 },
-                                  4168216.89130846, 792.917871779501,
-                                  new double[] {
-                                    1.42367074157994e-11,
-                                    33695.7133432541,
-                                    901.268527953801
-                                  }), true);
-  }
-
-  @Test
-  public void testMinpackWatson() {
-
-    minpackTest(new WatsonFunction(6, 0.0,
-                                   5.47722557505166, 0.0478295939097601,
-                                   new double[] {
-                                     -0.0157249615083782, 1.01243488232965,
-                                     -0.232991722387673,  1.26043101102818,
-                                     -1.51373031394421,   0.99299727291842
-                                   }), false);
-    minpackTest(new WatsonFunction(6, 10.0,
-                                   6433.12578950026, 0.0478295939096951,
-                                   new double[] {
-                                     -0.0157251901386677, 1.01243485860105,
-                                     -0.232991545843829,  1.26042932089163,
-                                     -1.51372776706575,   0.99299573426328
-                                   }), false);
-    minpackTest(new WatsonFunction(6, 100.0,
-                                   674256.040605213, 0.047829593911544,
-                                   new double[] {
-                                    -0.0157247019712586, 1.01243490925658,
-                                    -0.232991922761641,  1.26043292929555,
-                                    -1.51373320452707,   0.99299901922322
-                                   }), false);
-
-    minpackTest(new WatsonFunction(9, 0.0,
-                                   5.47722557505166, 0.00118311459212420,
-                                   new double[] {
-                                    -0.153070644166722e-4, 0.999789703934597,
-                                     0.0147639634910978,   0.146342330145992,
-                                     1.00082109454817,    -2.61773112070507,
-                                     4.10440313943354,    -3.14361226236241,
-                                     1.05262640378759
-                                   }), false);
-    minpackTest(new WatsonFunction(9, 10.0,
-                                   12088.127069307, 0.00118311459212513,
-                                   new double[] {
-                                   -0.153071334849279e-4, 0.999789703941234,
-                                    0.0147639629786217,   0.146342334818836,
-                                    1.00082107321386,    -2.61773107084722,
-                                    4.10440307655564,    -3.14361222178686,
-                                    1.05262639322589
-                                   }), false);
-    minpackTest(new WatsonFunction(9, 100.0,
-                                   1269109.29043834, 0.00118311459212384,
-                                   new double[] {
-                                    -0.153069523352176e-4, 0.999789703958371,
-                                     0.0147639625185392,   0.146342341096326,
-                                     1.00082104729164,    -2.61773101573645,
-                                     4.10440301427286,    -3.14361218602503,
-                                     1.05262638516774
-                                   }), false);
-
-    minpackTest(new WatsonFunction(12, 0.0,
-                                   5.47722557505166, 0.217310402535861e-4,
-                                   new double[] {
-                                    -0.660266001396382e-8, 1.00000164411833,
-                                    -0.000563932146980154, 0.347820540050756,
-                                    -0.156731500244233,    1.05281515825593,
-                                    -3.24727109519451,     7.2884347837505,
-                                   -10.271848098614,       9.07411353715783,
-                                    -4.54137541918194,     1.01201187975044
-                                   }), false);
-    minpackTest(new WatsonFunction(12, 10.0,
-                                   19220.7589790951, 0.217310402518509e-4,
-                                   new double[] {
-                                    -0.663710223017410e-8, 1.00000164411787,
-                                    -0.000563932208347327, 0.347820540486998,
-                                    -0.156731503955652,    1.05281517654573,
-                                    -3.2472711515214,      7.28843489430665,
-                                   -10.2718482369638,      9.07411364383733,
-                                    -4.54137546533666,     1.01201188830857
-                                   }), false);
-    minpackTest(new WatsonFunction(12, 100.0,
-                                   2018918.04462367, 0.217310402539845e-4,
-                                   new double[] {
-                                    -0.663806046485249e-8, 1.00000164411786,
-                                    -0.000563932210324959, 0.347820540503588,
-                                    -0.156731504091375,    1.05281517718031,
-                                    -3.24727115337025,     7.28843489775302,
-                                   -10.2718482410813,      9.07411364688464,
-                                    -4.54137546660822,     1.0120118885369
-                                   }), false);
-
-  }
-
-  @Test
-  public void testMinpackBox3Dimensional() {
-    minpackTest(new Box3DimensionalFunction(10, new double[] { 0.0, 10.0, 20.0 },
-                                            32.1115837449572), false);
-  }
-
-  @Test
-  public void testMinpackJennrichSampson() {
-    minpackTest(new JennrichSampsonFunction(10, new double[] { 0.3, 0.4 },
-                                            64.5856498144943, 11.1517793413499,
-                                            new double[] {
- //                                            0.2578330049, 0.257829976764542
-                                               0.2578199266368004, 0.25782997676455244
-                                            }), false);
-  }
-
-  @Test
-  public void testMinpackBrownDennis() {
-    minpackTest(new BrownDennisFunction(20,
-                                        new double[] { 25.0, 5.0, -5.0, -1.0 },
-                                        2815.43839161816, 292.954288244866,
-                                        new double[] {
-                                         -11.59125141003, 13.2024883984741,
-                                         -0.403574643314272, 0.236736269844604
-                                        }), false);
-    minpackTest(new BrownDennisFunction(20,
-                                        new double[] { 250.0, 50.0, -50.0, -10.0 },
-                                        555073.354173069, 292.954270581415,
-                                        new double[] {
-                                         -11.5959274272203, 13.2041866926242,
-                                         -0.403417362841545, 0.236771143410386
-                                       }), false);
-    minpackTest(new BrownDennisFunction(20,
-                                        new double[] { 2500.0, 500.0, -500.0, -100.0 },
-                                        61211252.2338581, 292.954306151134,
-                                        new double[] {
-                                         -11.5902596937374, 13.2020628854665,
-                                         -0.403688070279258, 0.236665033746463
-                                        }), false);
-  }
-
-  @Test
-  public void testMinpackChebyquad() {
-    minpackTest(new ChebyquadFunction(1, 8, 1.0,
-                                      1.88623796907732, 1.88623796907732,
-                                      new double[] { 0.5 }), false);
-    minpackTest(new ChebyquadFunction(1, 8, 10.0,
-                                      5383344372.34005, 1.88424820499951,
-                                      new double[] { 0.9817314924684 }), false);
-    minpackTest(new ChebyquadFunction(1, 8, 100.0,
-                                      0.118088726698392e19, 1.88424820499347,
-                                      new double[] { 0.9817314852934 }), false);
-    minpackTest(new ChebyquadFunction(8, 8, 1.0,
-                                      0.196513862833975, 0.0593032355046727,
-                                      new double[] {
-                                        0.0431536648587336, 0.193091637843267,
-                                        0.266328593812698,  0.499999334628884,
-                                        0.500000665371116,  0.733671406187302,
-                                        0.806908362156733,  0.956846335141266
-                                      }), false);
-    minpackTest(new ChebyquadFunction(9, 9, 1.0,
-                                      0.16994993465202, 0.0,
-                                      new double[] {
-                                        0.0442053461357828, 0.199490672309881,
-                                        0.23561910847106,   0.416046907892598,
-                                        0.5,                0.583953092107402,
-                                        0.764380891528940,  0.800509327690119,
-                                        0.955794653864217
-                                      }), false);
-    minpackTest(new ChebyquadFunction(10, 10, 1.0,
-                                      0.183747831178711, 0.0806471004038253,
-                                      new double[] {
-                                        0.0596202671753563, 0.166708783805937,
-                                        0.239171018813509,  0.398885290346268,
-                                        0.398883667870681,  0.601116332129320,
-                                        0.60111470965373,   0.760828981186491,
-                                        0.833291216194063,  0.940379732824644
-                                      }), false);
-  }
-
-  @Test
-  public void testMinpackBrownAlmostLinear() {
-    minpackTest(new BrownAlmostLinearFunction(10, 0.5,
-                                              16.5302162063499, 0.0,
-                                              new double[] {
-                                                0.979430303349862, 0.979430303349862,
-                                                0.979430303349862, 0.979430303349862,
-                                                0.979430303349862, 0.979430303349862,
-                                                0.979430303349862, 0.979430303349862,
-                                                0.979430303349862, 1.20569696650138
-                                              }), false);
-    minpackTest(new BrownAlmostLinearFunction(10, 5.0,
-                                              9765624.00089211, 0.0,
-                                              new double[] {
-                                               0.979430303349865, 0.979430303349865,
-                                               0.979430303349865, 0.979430303349865,
-                                               0.979430303349865, 0.979430303349865,
-                                               0.979430303349865, 0.979430303349865,
-                                               0.979430303349865, 1.20569696650135
-                                              }), false);
-    minpackTest(new BrownAlmostLinearFunction(10, 50.0,
-                                              0.9765625e17, 0.0,
-                                              new double[] {
-                                                1.0, 1.0, 1.0, 1.0, 1.0,
-                                                1.0, 1.0, 1.0, 1.0, 1.0
-                                              }), false);
-    minpackTest(new BrownAlmostLinearFunction(30, 0.5,
-                                              83.476044467848, 0.0,
-                                              new double[] {
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 0.997754216442807,
-                                                0.997754216442807, 1.06737350671578
-                                              }), false);
-    minpackTest(new BrownAlmostLinearFunction(40, 0.5,
-                                              128.026364472323, 0.0,
-                                              new double[] {
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                1.00000000000002, 1.00000000000002,
-                                                0.999999999999121
-                                              }), false);
-    }
-
-  @Test
-  public void testMinpackOsborne1() {
-      minpackTest(new Osborne1Function(new double[] { 0.5, 1.5, -1.0, 0.01, 0.02, },
-                                       0.937564021037838, 0.00739249260904843,
-                                       new double[] {
-                                         0.375410049244025, 1.93584654543108,
-                                        -1.46468676748716, 0.0128675339110439,
-                                         0.0221227011813076
-                                       }), false);
-    }
-
-  @Test
-  public void testMinpackOsborne2() {
-
-    minpackTest(new Osborne2Function(new double[] {
-                                       1.3, 0.65, 0.65, 0.7, 0.6,
-                                       3.0, 5.0, 7.0, 2.0, 4.5, 5.5
-                                     },
-                                     1.44686540984712, 0.20034404483314,
-                                     new double[] {
-                                       1.30997663810096,  0.43155248076,
-                                       0.633661261602859, 0.599428560991695,
-                                       0.754179768272449, 0.904300082378518,
-                                       1.36579949521007, 4.82373199748107,
-                                       2.39868475104871, 4.56887554791452,
-                                       5.67534206273052
-                                     }), false);
-  }
-
-  private void minpackTest(MinpackFunction function, boolean exceptionExpected) {
-      LevenbergMarquardtOptimizer optimizer
-          = new LevenbergMarquardtOptimizer(FastMath.sqrt(2.22044604926e-16),
-                                            FastMath.sqrt(2.22044604926e-16),
-                                            2.22044604926e-16);
-//      Assert.assertTrue(function.checkTheoreticalStartCost(optimizer.getRMS()));
-      try {
-          PointVectorValuePair optimum =
-              optimizer.optimize(400 * (function.getN() + 1), function,
-                                 function.getTarget(), function.getWeight(),
-                                 function.getStartPoint());
-          Assert.assertFalse(exceptionExpected);
-          function.checkTheoreticalMinCost(optimizer.getRMS());
-          function.checkTheoreticalMinParams(optimum);
-      } catch (TooManyEvaluationsException e) {
-          Assert.assertTrue(exceptionExpected);
-      }
-  }
-
-  private static abstract class MinpackFunction
-      implements MultivariateDifferentiableVectorFunction, Serializable {
-
-      private static final long serialVersionUID = -6209760235478794233L;
-      protected int      n;
-      protected int      m;
-      protected double[] startParams;
-      protected double   theoreticalMinCost;
-      protected double[] theoreticalMinParams;
-      protected double   costAccuracy;
-      protected double   paramsAccuracy;
-
-      protected MinpackFunction(int m, double[] startParams,
-                                double theoreticalMinCost, double[] theoreticalMinParams) {
-          this.m = m;
-          this.n = startParams.length;
-          this.startParams          = startParams.clone();
-          this.theoreticalMinCost   = theoreticalMinCost;
-          this.theoreticalMinParams = theoreticalMinParams;
-          this.costAccuracy         = 1.0e-8;
-          this.paramsAccuracy       = 1.0e-5;
-      }
-
-      protected static double[] buildArray(int n, double x) {
-          double[] array = new double[n];
-          Arrays.fill(array, x);
-          return array;
-      }
-
-      public double[] getTarget() {
-          return buildArray(m, 0.0);
-      }
-
-      public double[] getWeight() {
-          return buildArray(m, 1.0);
-      }
-
-      public double[] getStartPoint() {
-          return startParams.clone();
-      }
-
-      protected void setCostAccuracy(double costAccuracy) {
-          this.costAccuracy = costAccuracy;
-      }
-
-      protected void setParamsAccuracy(double paramsAccuracy) {
-          this.paramsAccuracy = paramsAccuracy;
-      }
-
-      public int getN() {
-          return startParams.length;
-      }
-
-      public void checkTheoreticalMinCost(double rms) {
-          double threshold = costAccuracy * (1.0 + theoreticalMinCost);
-          Assert.assertEquals(theoreticalMinCost, FastMath.sqrt(m) * rms, threshold);
-      }
-
-      public void checkTheoreticalMinParams(PointVectorValuePair optimum) {
-          double[] params = optimum.getPointRef();
-          if (theoreticalMinParams != null) {
-              for (int i = 0; i < theoreticalMinParams.length; ++i) {
-                  double mi = theoreticalMinParams[i];
-                  double vi = params[i];
-                  Assert.assertEquals(mi, vi, paramsAccuracy * (1.0 + FastMath.abs(mi)));
-              }
-          }
-      }
-
-      public double[] value(double[] variables) {
-          DerivativeStructure[] dsV = new DerivativeStructure[variables.length];
-          for (int i = 0; i < variables.length; ++i) {
-              dsV[i] = new DerivativeStructure(0, 0, variables[i]);
-          }
-          DerivativeStructure[] dsY = value(dsV);
-          double[] y = new double[dsY.length];
-          for (int i = 0; i < dsY.length; ++i) {
-              y[i] = dsY[i].getValue();
-          }
-          return y;
-      }
-
-      public abstract DerivativeStructure[] value(DerivativeStructure[] variables);
-
-  }
-
-  private static class LinearFullRankFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = -9030323226268039536L;
-
-    public LinearFullRankFunction(int m, int n, double x0,
-                                  double theoreticalStartCost,
-                                  double theoreticalMinCost) {
-      super(m, buildArray(n, x0), theoreticalMinCost,
-            buildArray(n, -1.0));
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-      DerivativeStructure sum = variables[0].getField().getZero();
-      for (int i = 0; i < n; ++i) {
-        sum = sum.add(variables[i]);
-      }
-      DerivativeStructure t  = sum.multiply(2.0 / m).add(1);
-      DerivativeStructure[] f = new DerivativeStructure[m];
-      for (int i = 0; i < n; ++i) {
-        f[i] = variables[i].subtract(t);
-      }
-      Arrays.fill(f, n, m, t.negate());
-      return f;
-    }
-
-  }
-
-  private static class LinearRank1Function extends MinpackFunction {
-
-    private static final long serialVersionUID = 8494863245104608300L;
-
-    public LinearRank1Function(int m, int n, double x0,
-                                  double theoreticalStartCost,
-                                  double theoreticalMinCost) {
-      super(m, buildArray(n, x0), theoreticalMinCost, null);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure[] f = new DerivativeStructure[m];
-        DerivativeStructure sum = variables[0].getField().getZero();
-        for (int i = 0; i < n; ++i) {
-            sum = sum.add(variables[i].multiply(i + 1));
-        }
-        for (int i = 0; i < m; ++i) {
-            f[i] = sum.multiply(i + 1).subtract(1);
-        }
-        return f;
-    }
-
-  }
-
-  private static class LinearRank1ZeroColsAndRowsFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = -3316653043091995018L;
-
-    public LinearRank1ZeroColsAndRowsFunction(int m, int n, double x0) {
-      super(m, buildArray(n, x0),
-            FastMath.sqrt((m * (m + 3) - 6) / (2.0 * (2 * m - 3))),
-            null);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure[] f = new DerivativeStructure[m];
-        DerivativeStructure sum = variables[0].getField().getZero();
-      for (int i = 1; i < (n - 1); ++i) {
-          sum = sum.add(variables[i].multiply(i + 1));
-      }
-      for (int i = 0; i < (m - 1); ++i) {
-        f[i] = sum.multiply(i).subtract(1);
-      }
-      f[m - 1] = variables[0].getField().getOne().negate();
-      return f;
-    }
-
-  }
-
-  private static class RosenbrockFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = 2893438180956569134L;
-
-    public RosenbrockFunction(double[] startParams, double theoreticalStartCost) {
-      super(2, startParams, 0.0, buildArray(2, 1.0));
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        return new DerivativeStructure[] {
-            x2.subtract(x1.multiply(x1)).multiply(10),
-            x1.negate().add(1)
-        };
-    }
-
-  }
-
-  private static class HelicalValleyFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = 220613787843200102L;
-
-    public HelicalValleyFunction(double[] startParams,
-                                 double theoreticalStartCost) {
-      super(3, startParams, 0.0, new double[] { 1.0, 0.0, 0.0 });
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        DerivativeStructure x3 = variables[2];
-        DerivativeStructure tmp1 = variables[0].getField().getZero();
-        if (x1.getValue() == 0) {
-            tmp1 = tmp1.add((x2.getValue() >= 0) ? 0.25 : -0.25);
-        } else {
-            tmp1 = x2.divide(x1).atan().divide(twoPi);
-            if (x1.getValue() < 0) {
-                tmp1 = tmp1.add(0.5);
-            }
-        }
-        DerivativeStructure tmp2 = x1.multiply(x1).add(x2.multiply(x2)).sqrt();
-        return new DerivativeStructure[] {
-            x3.subtract(tmp1.multiply(10)).multiply(10),
-            tmp2.subtract(1).multiply(10),
-            x3
-        };
-    }
-
-    private static final double twoPi = 2.0 * FastMath.PI;
-
-  }
-
-  private static class PowellSingularFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = 7298364171208142405L;
-
-    public PowellSingularFunction(double[] startParams,
-                                  double theoreticalStartCost) {
-      super(4, startParams, 0.0, buildArray(4, 0.0));
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        DerivativeStructure x3 = variables[2];
-        DerivativeStructure x4 = variables[3];
-      return new DerivativeStructure[] {
-        x1.add(x2.multiply(10)),
-        x3.subtract(x4).multiply(sqrt5),
-        x2.subtract(x3.multiply(2)).multiply(x2.subtract(x3.multiply(2))),
-        x1.subtract(x4).multiply(x1.subtract(x4)).multiply(sqrt10)
-      };
-    }
-
-    private static final double sqrt5  = FastMath.sqrt( 5.0);
-    private static final double sqrt10 = FastMath.sqrt(10.0);
-
-  }
-
-  private static class FreudensteinRothFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = 2892404999344244214L;
-
-    public FreudensteinRothFunction(double[] startParams,
-                                    double theoreticalStartCost,
-                                    double theoreticalMinCost,
-                                    double[] theoreticalMinParams) {
-      super(2, startParams, theoreticalMinCost,
-            theoreticalMinParams);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        return new DerivativeStructure[] {
-            x1.subtract(13.0).add(x2.negate().add(5.0).multiply(x2).subtract(2).multiply(x2)),
-            x1.subtract(29.0).add(x2.add(1).multiply(x2).subtract(14).multiply(x2))
-        };
-    }
-
-  }
-
-  private static class BardFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = 5990442612572087668L;
-
-    public BardFunction(double x0,
-                        double theoreticalStartCost,
-                        double theoreticalMinCost,
-                        double[] theoreticalMinParams) {
-      super(15, buildArray(3, x0), theoreticalMinCost,
-            theoreticalMinParams);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure   x1 = variables[0];
-        DerivativeStructure   x2 = variables[1];
-        DerivativeStructure   x3 = variables[2];
-        DerivativeStructure[] f = new DerivativeStructure[m];
-      for (int i = 0; i < m; ++i) {
-        double tmp1 = i + 1;
-        double tmp2 = 15 - i;
-        double tmp3 = (i <= 7) ? tmp1 : tmp2;
-        f[i] = x1.add(x2.multiply(tmp2).add(x3.multiply(tmp3)).reciprocal().multiply(tmp1)).negate().add(y[i]);
-      }
-      return f;
-    }
-
-    private static final double[] y = {
-      0.14, 0.18, 0.22, 0.25, 0.29,
-      0.32, 0.35, 0.39, 0.37, 0.58,
-      0.73, 0.96, 1.34, 2.10, 4.39
-    };
-
-  }
-
-  private static class KowalikOsborneFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = -4867445739880495801L;
-
-    public KowalikOsborneFunction(double[] startParams,
-                                  double theoreticalStartCost,
-                                  double theoreticalMinCost,
-                                  double[] theoreticalMinParams) {
-      super(11, startParams, theoreticalMinCost,
-            theoreticalMinParams);
-      if (theoreticalStartCost > 20.0) {
-        setCostAccuracy(2.0e-4);
-        setParamsAccuracy(5.0e-3);
-      }
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        DerivativeStructure x3 = variables[2];
-        DerivativeStructure x4 = variables[3];
-        DerivativeStructure[] f = new DerivativeStructure[m];
-        for (int i = 0; i < m; ++i) {
-            f[i] = x1.multiply(x2.add(v[i]).multiply(v[i])).divide(x4.add(x3.add(v[i]).multiply(v[i]))).negate().add(y[i]);
-        }
-        return f;
-    }
-
-    private static final double[] v = {
-      4.0, 2.0, 1.0, 0.5, 0.25, 0.167, 0.125, 0.1, 0.0833, 0.0714, 0.0625
-    };
-
-    private static final double[] y = {
-      0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627,
-      0.0456, 0.0342, 0.0323, 0.0235, 0.0246
-    };
-
-  }
-
-  private static class MeyerFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = -838060619150131027L;
-
-    public MeyerFunction(double[] startParams,
-                         double theoreticalStartCost,
-                         double theoreticalMinCost,
-                         double[] theoreticalMinParams) {
-      super(16, startParams, theoreticalMinCost,
-            theoreticalMinParams);
-      if (theoreticalStartCost > 1.0e6) {
-        setCostAccuracy(7.0e-3);
-        setParamsAccuracy(2.0e-2);
-      }
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        DerivativeStructure x3 = variables[2];
-        DerivativeStructure[] f = new DerivativeStructure[m];
-      for (int i = 0; i < m; ++i) {
-        f[i] = x1.multiply(x2.divide(x3.add(5.0 * (i + 1) + 45.0)).exp()).subtract(y[i]);
-      }
-     return f;
-    }
-
-    private static final double[] y = {
-      34780.0, 28610.0, 23650.0, 19630.0,
-      16370.0, 13720.0, 11540.0,  9744.0,
-       8261.0,  7030.0,  6005.0,  5147.0,
-       4427.0,  3820.0,  3307.0,  2872.0
-    };
-
-  }
-
-  private static class WatsonFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = -9034759294980218927L;
-
-    public WatsonFunction(int n, double x0,
-                          double theoreticalStartCost,
-                          double theoreticalMinCost,
-                          double[] theoreticalMinParams) {
-      super(31, buildArray(n, x0), theoreticalMinCost,
-            theoreticalMinParams);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure[] f = new DerivativeStructure[m];
-        for (int i = 0; i < (m - 2); ++i) {
-            double div = (i + 1) / 29.0;
-            DerivativeStructure s1 = variables[0].getField().getZero();
-            DerivativeStructure dx = variables[0].getField().getOne();
-            for (int j = 1; j < n; ++j) {
-                s1 = s1.add(dx.multiply(j).multiply(variables[j]));
-                dx = dx.multiply(div);
-            }
-            DerivativeStructure s2 = variables[0].getField().getZero();
-            dx = variables[0].getField().getOne();
-            for (int j = 0; j < n; ++j) {
-                s2 = s2.add(dx.multiply(variables[j]));
-                dx = dx.multiply(div);
-            }
-            f[i] = s1.subtract(s2.multiply(s2)).subtract(1);
-        }
-
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        f[m - 2] = x1;
-        f[m - 1] = x2.subtract(x1.multiply(x1)).subtract(1);
-
-        return f;
-
-    }
-
-  }
-
-  private static class Box3DimensionalFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = 5511403858142574493L;
-
-    public Box3DimensionalFunction(int m, double[] startParams,
-                                   double theoreticalStartCost) {
-      super(m, startParams, 0.0,
-            new double[] { 1.0, 10.0, 1.0 });
-   }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        DerivativeStructure x3 = variables[2];
-        DerivativeStructure[] f = new DerivativeStructure[m];
-      for (int i = 0; i < m; ++i) {
-        double tmp = (i + 1) / 10.0;
-        f[i] = x1.multiply(-tmp).exp().subtract(x2.multiply(-tmp).exp()).add(
-                  x3.multiply(FastMath.exp(-i - 1) - FastMath.exp(-tmp)));
-      }
-      return f;
-    }
-
-  }
-
-  private static class JennrichSampsonFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = -2489165190443352947L;
-
-    public JennrichSampsonFunction(int m, double[] startParams,
-                                   double theoreticalStartCost,
-                                   double theoreticalMinCost,
-                                   double[] theoreticalMinParams) {
-      super(m, startParams, theoreticalMinCost,
-            theoreticalMinParams);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        DerivativeStructure[] f = new DerivativeStructure[m];
-        for (int i = 0; i < m; ++i) {
-            double temp = i + 1;
-            f[i] = x1.multiply(temp).exp().add(x2.multiply(temp).exp()).subtract(2 + 2 * temp).negate();
-        }
-        return f;
-    }
-
-  }
-
-  private static class BrownDennisFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = 8340018645694243910L;
-
-    public BrownDennisFunction(int m, double[] startParams,
-                               double theoreticalStartCost,
-                               double theoreticalMinCost,
-                               double[] theoreticalMinParams) {
-      super(m, startParams, theoreticalMinCost,
-            theoreticalMinParams);
-      setCostAccuracy(2.5e-8);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        DerivativeStructure x3 = variables[2];
-        DerivativeStructure x4 = variables[3];
-        DerivativeStructure[] f = new DerivativeStructure[m];
-        for (int i = 0; i < m; ++i) {
-            double temp = (i + 1) / 5.0;
-            DerivativeStructure tmp1 = x1.add(x2.multiply(temp)).subtract(FastMath.exp(temp));
-            DerivativeStructure tmp2 = x3.add(x4.multiply(FastMath.sin(temp))).subtract(FastMath.cos(temp));
-            f[i] = tmp1.multiply(tmp1).add(tmp2.multiply(tmp2));
-        }
-        return f;
-    }
-
-  }
-
-  private static class ChebyquadFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = -2394877275028008594L;
-
-    private static double[] buildChebyquadArray(int n, double factor) {
-      double[] array = new double[n];
-      double inv = factor / (n + 1);
-      for (int i = 0; i < n; ++i) {
-        array[i] = (i + 1) * inv;
-      }
-      return array;
-    }
-
-    public ChebyquadFunction(int n, int m, double factor,
-                             double theoreticalStartCost,
-                             double theoreticalMinCost,
-                             double[] theoreticalMinParams) {
-      super(m, buildChebyquadArray(n, factor), theoreticalMinCost,
-            theoreticalMinParams);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-
-        DerivativeStructure[] f = new DerivativeStructure[m];
-        Arrays.fill(f, variables[0].getField().getZero());
-
-        for (int j = 0; j < n; ++j) {
-            DerivativeStructure tmp1 = variables[0].getField().getOne();
-            DerivativeStructure tmp2 = variables[j].multiply(2).subtract(1);
-            DerivativeStructure temp = tmp2.multiply(2);
-            for (int i = 0; i < m; ++i) {
-                f[i] = f[i].add(tmp2);
-                DerivativeStructure ti = temp.multiply(tmp2).subtract(tmp1);
-                tmp1 = tmp2;
-                tmp2 = ti;
-            }
-        }
-
-        double dx = 1.0 / n;
-        boolean iev = false;
-        for (int i = 0; i < m; ++i) {
-            f[i] = f[i].multiply(dx);
-            if (iev) {
-                f[i] = f[i].add(1.0 / (i * (i + 2)));
-            }
-            iev = ! iev;
-        }
-
-        return f;
-
-    }
-
-  }
-
-  private static class BrownAlmostLinearFunction extends MinpackFunction {
-
-    private static final long serialVersionUID = 8239594490466964725L;
-
-    public BrownAlmostLinearFunction(int m, double factor,
-                                     double theoreticalStartCost,
-                                     double theoreticalMinCost,
-                                     double[] theoreticalMinParams) {
-      super(m, buildArray(m, factor), theoreticalMinCost,
-            theoreticalMinParams);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure[] f = new DerivativeStructure[m];
-        DerivativeStructure sum  = variables[0].getField().getZero().subtract(n + 1);
-        DerivativeStructure prod = variables[0].getField().getOne();
-      for (int j = 0; j < n; ++j) {
-        sum  = sum.add(variables[j]);
-        prod = prod.multiply(variables[j]);
-      }
-      for (int i = 0; i < n; ++i) {
-        f[i] = variables[i].add(sum);
-      }
-      f[n - 1] = prod.subtract(1);
-      return f;
-    }
-
-  }
-
-  private static class Osborne1Function extends MinpackFunction {
-
-    private static final long serialVersionUID = 4006743521149849494L;
-
-    public Osborne1Function(double[] startParams,
-                            double theoreticalStartCost,
-                            double theoreticalMinCost,
-                            double[] theoreticalMinParams) {
-      super(33, startParams, theoreticalMinCost,
-            theoreticalMinParams);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x1 = variables[0];
-        DerivativeStructure x2 = variables[1];
-        DerivativeStructure x3 = variables[2];
-        DerivativeStructure x4 = variables[3];
-        DerivativeStructure x5 = variables[4];
-        DerivativeStructure[] f = new DerivativeStructure[m];
-      for (int i = 0; i < m; ++i) {
-        double temp = 10.0 * i;
-        DerivativeStructure tmp1 = x4.multiply(-temp).exp();
-        DerivativeStructure tmp2 = x5.multiply(-temp).exp();
-        f[i] = x1.add(x2.multiply(tmp1)).add(x3.multiply(tmp2)).negate().add(y[i]);
-      }
-      return f;
-    }
-
-    private static final double[] y = {
-      0.844, 0.908, 0.932, 0.936, 0.925, 0.908, 0.881, 0.850, 0.818, 0.784, 0.751,
-      0.718, 0.685, 0.658, 0.628, 0.603, 0.580, 0.558, 0.538, 0.522, 0.506, 0.490,
-      0.478, 0.467, 0.457, 0.448, 0.438, 0.431, 0.424, 0.420, 0.414, 0.411, 0.406
-    };
-
-  }
-
-  private static class Osborne2Function extends MinpackFunction {
-
-    private static final long serialVersionUID = -8418268780389858746L;
-
-    public Osborne2Function(double[] startParams,
-                            double theoreticalStartCost,
-                            double theoreticalMinCost,
-                            double[] theoreticalMinParams) {
-      super(65, startParams, theoreticalMinCost,
-            theoreticalMinParams);
-    }
-
-    @Override
-    public DerivativeStructure[] value(DerivativeStructure[] variables) {
-        DerivativeStructure x01 = variables[0];
-        DerivativeStructure x02 = variables[1];
-        DerivativeStructure x03 = variables[2];
-        DerivativeStructure x04 = variables[3];
-        DerivativeStructure x05 = variables[4];
-        DerivativeStructure x06 = variables[5];
-        DerivativeStructure x07 = variables[6];
-        DerivativeStructure x08 = variables[7];
-        DerivativeStructure x09 = variables[8];
-        DerivativeStructure x10 = variables[9];
-        DerivativeStructure x11 = variables[10];
-        DerivativeStructure[] f = new DerivativeStructure[m];
-        for (int i = 0; i < m; ++i) {
-            double temp = i / 10.0;
-            DerivativeStructure tmp1 = x05.multiply(-temp).exp();
-            DerivativeStructure tmp2 = x06.negate().multiply(x09.subtract(temp).multiply(x09.subtract(temp))).exp();
-            DerivativeStructure tmp3 = x07.negate().multiply(x10.subtract(temp).multiply(x10.subtract(temp))).exp();
-            DerivativeStructure tmp4 = x08.negate().multiply(x11.subtract(temp).multiply(x11.subtract(temp))).exp();
-            f[i] = x01.multiply(tmp1).add(x02.multiply(tmp2)).add(x03.multiply(tmp3)).add(x04.multiply(tmp4)).negate().add(y[i]);
-        }
-        return f;
-    }
-
-    private static final double[] y = {
-      1.366, 1.191, 1.112, 1.013, 0.991,
-      0.885, 0.831, 0.847, 0.786, 0.725,
-      0.746, 0.679, 0.608, 0.655, 0.616,
-      0.606, 0.602, 0.626, 0.651, 0.724,
-      0.649, 0.649, 0.694, 0.644, 0.624,
-      0.661, 0.612, 0.558, 0.533, 0.495,
-      0.500, 0.423, 0.395, 0.375, 0.372,
-      0.391, 0.396, 0.405, 0.428, 0.429,
-      0.523, 0.562, 0.607, 0.653, 0.672,
-      0.708, 0.633, 0.668, 0.645, 0.632,
-      0.591, 0.559, 0.597, 0.625, 0.739,
-      0.710, 0.729, 0.720, 0.636, 0.581,
-      0.428, 0.292, 0.162, 0.098, 0.054
-    };
-
-  }
-
-}


[02/18] [math] [MATH-869] NullArgumentException now extends NullPointerException.

Posted by tn...@apache.org.
[MATH-869] NullArgumentException now extends NullPointerException.


Project: http://git-wip-us.apache.org/repos/asf/commons-math/repo
Commit: http://git-wip-us.apache.org/repos/asf/commons-math/commit/35b688b7
Tree: http://git-wip-us.apache.org/repos/asf/commons-math/tree/35b688b7
Diff: http://git-wip-us.apache.org/repos/asf/commons-math/diff/35b688b7

Branch: refs/heads/master
Commit: 35b688b7ec3b32dc671af4c7cb9556ff26e761eb
Parents: c22e7fb
Author: Thomas Neidhart <th...@gmail.com>
Authored: Wed Feb 25 22:25:47 2015 +0100
Committer: Thomas Neidhart <th...@gmail.com>
Committed: Wed Feb 25 22:25:47 2015 +0100

----------------------------------------------------------------------
 src/changes/changes.xml                         |  4 +++
 .../math4/exception/NullArgumentException.java  | 38 ++++++++++++++++++--
 .../solvers/UnivariateSolverUtilsTest.java      |  7 ++--
 .../commons/math4/fraction/FractionTest.java    | 18 +++++-----
 .../commons/math4/stat/StatUtilsTest.java       | 35 +++++++++---------
 .../AbstractUnivariateStatisticTest.java        |  9 ++---
 .../descriptive/moment/SemiVarianceTest.java    |  6 ++--
 .../descriptive/rank/PSquarePercentileTest.java |  3 +-
 .../stat/descriptive/rank/PercentileTest.java   |  8 ++---
 .../GLSMultipleLinearRegressionTest.java        |  5 +--
 .../MultipleLinearRegressionAbstractTest.java   |  7 ++--
 .../OLSMultipleLinearRegressionTest.java        |  5 +--
 .../commons/math4/util/MathArraysTest.java      |  8 ++---
 13 files changed, 98 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/changes/changes.xml
----------------------------------------------------------------------
diff --git a/src/changes/changes.xml b/src/changes/changes.xml
index 9b4744e..ee73fca 100644
--- a/src/changes/changes.xml
+++ b/src/changes/changes.xml
@@ -54,6 +54,10 @@ If the output is not quite correct, check for invisible trailing spaces!
     </release>
 
     <release version="4.0" date="XXXX-XX-XX" description="">
+      <action dev="tn" type="update" issue="MATH-869">
+        "NullArgumentException" extends now "java.lang.NullPointerException"
+        instead of "MathIllegalArgumentException".
+      </action>
       <action dev="tn" type="update" issue="MATH-839" due-to="Gilles Sadowski">
         Renamed "cumulativeProbability(double, double)" to "probability(double, double)"
         in "IntegerDistribution" and "RealDistribution".

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/main/java/org/apache/commons/math4/exception/NullArgumentException.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/exception/NullArgumentException.java b/src/main/java/org/apache/commons/math4/exception/NullArgumentException.java
index 7b73be7..5577042 100644
--- a/src/main/java/org/apache/commons/math4/exception/NullArgumentException.java
+++ b/src/main/java/org/apache/commons/math4/exception/NullArgumentException.java
@@ -16,6 +16,8 @@
  */
 package org.apache.commons.math4.exception;
 
+import org.apache.commons.math4.exception.util.ExceptionContext;
+import org.apache.commons.math4.exception.util.ExceptionContextProvider;
 import org.apache.commons.math4.exception.util.Localizable;
 import org.apache.commons.math4.exception.util.LocalizedFormats;
 
@@ -26,12 +28,20 @@ import org.apache.commons.math4.exception.util.LocalizedFormats;
  * argument") and so does not extend the standard {@code NullPointerException}.
  * Propagation of {@code NullPointerException} from within Commons-Math is
  * construed to be a bug.
+ * <p>
+ * Note: from 4.0 onwards, this class extends {@link NullPointerException} instead
+ * of {@link MathIllegalArgumentException}.
  *
  * @since 2.2
  */
-public class NullArgumentException extends MathIllegalArgumentException {
+public class NullArgumentException extends NullPointerException
+    implements ExceptionContextProvider {
+
     /** Serializable version Id. */
-    private static final long serialVersionUID = -6024911025449780478L;
+    private static final long serialVersionUID = 20150225L;
+
+    /** Context. */
+    private final ExceptionContext context;
 
     /**
      * Default constructor.
@@ -46,6 +56,28 @@ public class NullArgumentException extends MathIllegalArgumentException {
      */
     public NullArgumentException(Localizable pattern,
                                  Object ... arguments) {
-        super(pattern, arguments);
+        context = new ExceptionContext(this);
+        context.addMessage(pattern, arguments);
+    }
+    
+    /**
+     * {@inheritDoc}
+     * @since 4.0
+     */
+    public ExceptionContext getContext() {
+        return context;
     }
+
+    /** {@inheritDoc} */
+    @Override
+    public String getMessage() {
+        return context.getMessage();
+    }
+
+    /** {@inheritDoc} */
+    @Override
+    public String getLocalizedMessage() {
+        return context.getLocalizedMessage();
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/analysis/solvers/UnivariateSolverUtilsTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/analysis/solvers/UnivariateSolverUtilsTest.java b/src/test/java/org/apache/commons/math4/analysis/solvers/UnivariateSolverUtilsTest.java
index fba50e3..f2471b7 100644
--- a/src/test/java/org/apache/commons/math4/analysis/solvers/UnivariateSolverUtilsTest.java
+++ b/src/test/java/org/apache/commons/math4/analysis/solvers/UnivariateSolverUtilsTest.java
@@ -23,6 +23,7 @@ import org.apache.commons.math4.analysis.function.Sin;
 import org.apache.commons.math4.analysis.solvers.UnivariateSolverUtils;
 import org.apache.commons.math4.exception.MathIllegalArgumentException;
 import org.apache.commons.math4.exception.NoBracketingException;
+import org.apache.commons.math4.exception.NullArgumentException;
 import org.apache.commons.math4.util.FastMath;
 import org.junit.Assert;
 import org.junit.Test;
@@ -33,7 +34,7 @@ public class UnivariateSolverUtilsTest {
 
     protected UnivariateFunction sin = new Sin();
 
-    @Test(expected=MathIllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void testSolveNull() {
         UnivariateSolverUtils.solve(null, 0.0, 4.0);
     }
@@ -60,7 +61,7 @@ public class UnivariateSolverUtilsTest {
         Assert.assertEquals(FastMath.PI, x, 1.0e-4);
     }
 
-    @Test(expected=MathIllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void testSolveAccuracyNull()  {
         double accuracy = 1.0e-6;
         UnivariateSolverUtils.solve(null, 0.0, 4.0, accuracy);
@@ -144,7 +145,7 @@ public class UnivariateSolverUtilsTest {
         Assert.assertTrue(sin.value(result[1]) > 0);
     }
 
-    @Test(expected=MathIllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void testNullFunction() {
         UnivariateSolverUtils.bracket(null, 1.5, 0, 2.0);
     }

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/fraction/FractionTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/fraction/FractionTest.java b/src/test/java/org/apache/commons/math4/fraction/FractionTest.java
index 174fa09..aca7d05 100644
--- a/src/test/java/org/apache/commons/math4/fraction/FractionTest.java
+++ b/src/test/java/org/apache/commons/math4/fraction/FractionTest.java
@@ -19,7 +19,7 @@ package org.apache.commons.math4.fraction;
 import org.apache.commons.math4.TestUtils;
 import org.apache.commons.math4.exception.ConvergenceException;
 import org.apache.commons.math4.exception.MathArithmeticException;
-import org.apache.commons.math4.exception.MathIllegalArgumentException;
+import org.apache.commons.math4.exception.NullArgumentException;
 import org.apache.commons.math4.fraction.Fraction;
 import org.apache.commons.math4.fraction.FractionConversionException;
 import org.apache.commons.math4.util.FastMath;
@@ -348,8 +348,8 @@ public class FractionTest {
 
         try {
             f.add(null);
-            Assert.fail("expecting MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {}
+            Assert.fail("expecting NullArgumentException");
+        } catch (NullArgumentException ex) {}
 
         // if this fraction is added naively, it will overflow.
         // check that it doesn't.
@@ -445,8 +445,8 @@ public class FractionTest {
 
         try {
             f.divide(null);
-            Assert.fail("MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {}
+            Assert.fail("NullArgumentException");
+        } catch (NullArgumentException ex) {}
 
         try {
             f1 = new Fraction(1, Integer.MAX_VALUE);
@@ -484,8 +484,8 @@ public class FractionTest {
 
         try {
             f.multiply(null);
-            Assert.fail("expecting MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {}
+            Assert.fail("expecting NullArgumentException");
+        } catch (NullArgumentException ex) {}
 
         f1 = new Fraction(6, 35);
         f  = f1.multiply(15);
@@ -506,8 +506,8 @@ public class FractionTest {
         Fraction f = new Fraction(1,1);
         try {
             f.subtract(null);
-            Assert.fail("expecting MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {}
+            Assert.fail("expecting NullArgumentException");
+        } catch (NullArgumentException ex) {}
 
         // if this fraction is subtracted naively, it will overflow.
         // check that it doesn't.

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/stat/StatUtilsTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/stat/StatUtilsTest.java b/src/test/java/org/apache/commons/math4/stat/StatUtilsTest.java
index 9837b85..2528f3c 100644
--- a/src/test/java/org/apache/commons/math4/stat/StatUtilsTest.java
+++ b/src/test/java/org/apache/commons/math4/stat/StatUtilsTest.java
@@ -19,6 +19,7 @@ package org.apache.commons.math4.stat;
 
 import org.apache.commons.math4.TestUtils;
 import org.apache.commons.math4.exception.MathIllegalArgumentException;
+import org.apache.commons.math4.exception.NullArgumentException;
 import org.apache.commons.math4.stat.StatUtils;
 import org.apache.commons.math4.stat.descriptive.DescriptiveStatistics;
 import org.apache.commons.math4.util.FastMath;
@@ -122,14 +123,14 @@ public final class StatUtilsTest {
         try {
             StatUtils.sumSq(x);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
         try {
             StatUtils.sumSq(x, 0, 4);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
@@ -157,14 +158,14 @@ public final class StatUtilsTest {
         try {
             StatUtils.product(x);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
         try {
             StatUtils.product(x, 0, 4);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
@@ -192,14 +193,14 @@ public final class StatUtilsTest {
         try {
             StatUtils.sumLog(x);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
         try {
             StatUtils.sumLog(x, 0, 4);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
@@ -226,7 +227,7 @@ public final class StatUtilsTest {
         try {
             StatUtils.mean(x, 0, 4);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
@@ -250,7 +251,7 @@ public final class StatUtilsTest {
         try {
             StatUtils.variance(x, 0, 4);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
@@ -278,7 +279,7 @@ public final class StatUtilsTest {
         try {
             StatUtils.variance(x, 0, 4);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
@@ -307,7 +308,7 @@ public final class StatUtilsTest {
         try {
             StatUtils.max(x, 0, 4);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
@@ -347,7 +348,7 @@ public final class StatUtilsTest {
         try {
             StatUtils.min(x, 0, 4);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
@@ -388,14 +389,14 @@ public final class StatUtilsTest {
         try {
             StatUtils.percentile(x, .25);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
         try {
             StatUtils.percentile(x, 0, 4, 0.25);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException ex) {
+        } catch (NullArgumentException ex) {
             // success
         }
 
@@ -452,8 +453,8 @@ public final class StatUtilsTest {
         double[] test = null;
         try {
             StatUtils.geometricMean(test);
-            Assert.fail("Expecting MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {
+            Assert.fail("Expecting NullArgumentException");
+        } catch (NullArgumentException ex) {
             // expected
         }
         test = new double[] {2, 4, 6, 8};
@@ -547,8 +548,8 @@ public final class StatUtilsTest {
         final double[] nullArray = null;
         try {
             StatUtils.mode(nullArray);
-            Assert.fail("Expecting MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {
+            Assert.fail("Expecting NullArgumentException");
+        } catch (NullArgumentException ex) {
             // Expected
         }
     }

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/stat/descriptive/AbstractUnivariateStatisticTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/stat/descriptive/AbstractUnivariateStatisticTest.java b/src/test/java/org/apache/commons/math4/stat/descriptive/AbstractUnivariateStatisticTest.java
index dca0ece..b455f5b 100644
--- a/src/test/java/org/apache/commons/math4/stat/descriptive/AbstractUnivariateStatisticTest.java
+++ b/src/test/java/org/apache/commons/math4/stat/descriptive/AbstractUnivariateStatisticTest.java
@@ -18,6 +18,7 @@ package org.apache.commons.math4.stat.descriptive;
 
 
 import org.apache.commons.math4.exception.MathIllegalArgumentException;
+import org.apache.commons.math4.exception.NullArgumentException;
 import org.apache.commons.math4.stat.descriptive.moment.Mean;
 import org.junit.Assert;
 import org.junit.Test;
@@ -76,14 +77,14 @@ public class AbstractUnivariateStatisticTest {
         }
         try {
             testStatistic.test(nullArray, 0, 1);  // null array
-            Assert.fail("Expecting MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {
+            Assert.fail("Expecting NullArgumentException");
+        } catch (NullArgumentException ex) {
             // expected
         }
         try {
             testStatistic.test(testArray, nullArray, 0, 1);  // null weights array
-            Assert.fail("Expecting MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {
+            Assert.fail("Expecting NullArgumentException");
+        } catch (NullArgumentException ex) {
             // expected
         }
         try {

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/stat/descriptive/moment/SemiVarianceTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/stat/descriptive/moment/SemiVarianceTest.java b/src/test/java/org/apache/commons/math4/stat/descriptive/moment/SemiVarianceTest.java
index e9dc85a..c964a78 100644
--- a/src/test/java/org/apache/commons/math4/stat/descriptive/moment/SemiVarianceTest.java
+++ b/src/test/java/org/apache/commons/math4/stat/descriptive/moment/SemiVarianceTest.java
@@ -18,7 +18,7 @@
 package org.apache.commons.math4.stat.descriptive.moment;
 
 import org.apache.commons.math4.TestUtils;
-import org.apache.commons.math4.exception.MathIllegalArgumentException;
+import org.apache.commons.math4.exception.NullArgumentException;
 import org.apache.commons.math4.stat.StatUtils;
 import org.apache.commons.math4.stat.descriptive.moment.SemiVariance;
 import org.junit.Assert;
@@ -34,14 +34,14 @@ public class SemiVarianceTest {
         try {
             sv.evaluate(nothing);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException iae) {
+        } catch (NullArgumentException nae) {
         }
 
         try {
             sv.setVarianceDirection(SemiVariance.UPSIDE_VARIANCE);
             sv.evaluate(nothing);
             Assert.fail("null is not a valid data array.");
-        } catch (MathIllegalArgumentException iae) {
+        } catch (NullArgumentException nae) {
         }
         nothing = new double[] {};
         Assert.assertTrue(Double.isNaN(sv.evaluate(nothing)));

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PSquarePercentileTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PSquarePercentileTest.java b/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PSquarePercentileTest.java
index 78969fb..8751b00 100644
--- a/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PSquarePercentileTest.java
+++ b/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PSquarePercentileTest.java
@@ -28,6 +28,7 @@ import org.apache.commons.math4.distribution.LogNormalDistribution;
 import org.apache.commons.math4.distribution.NormalDistribution;
 import org.apache.commons.math4.distribution.RealDistribution;
 import org.apache.commons.math4.exception.MathIllegalArgumentException;
+import org.apache.commons.math4.exception.NullArgumentException;
 import org.apache.commons.math4.exception.OutOfRangeException;
 import org.apache.commons.math4.random.RandomGenerator;
 import org.apache.commons.math4.random.Well19937c;
@@ -439,7 +440,7 @@ public class PSquarePercentileTest extends
                 1.0);// changed the accuracy to 1 instead of tolerance
     }
 
-    @Test(expected = MathIllegalArgumentException.class)
+    @Test(expected = NullArgumentException.class)
     public void testNull() {
         PSquarePercentile percentile = new PSquarePercentile(50d);
         double[] nullArray = null;

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PercentileTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PercentileTest.java b/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PercentileTest.java
index bd67c5a..8b2107b 100644
--- a/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PercentileTest.java
+++ b/src/test/java/org/apache/commons/math4/stat/descriptive/rank/PercentileTest.java
@@ -171,8 +171,8 @@ public class PercentileTest extends UnivariateStatisticAbstractTest{
         final double[] emptyArray = new double[] {};
         try {
             percentile.evaluate(nullArray);
-            Assert.fail("Expecting MathIllegalArgumentException for null array");
-        } catch (final MathIllegalArgumentException ex) {
+            Assert.fail("Expecting NullArgumentException for null array");
+        } catch (final NullArgumentException ex) {
             // expected
         }
         Assert.assertTrue(Double.isNaN(percentile.evaluate(emptyArray)));
@@ -364,9 +364,9 @@ public class PercentileTest extends UnivariateStatisticAbstractTest{
             final UnivariateStatistic percentile = getUnivariateStatistic();
             try {
                 percentile.evaluate(nullArray);
-                Assert.fail("Expecting MathIllegalArgumentException "
+                Assert.fail("Expecting NullArgumentException "
                         + "for null array");
-            } catch (final MathIllegalArgumentException ex) {
+            } catch (final NullArgumentException ex) {
                 // expected
             }
             Assert.assertTrue(Double.isNaN(percentile.evaluate(emptyArray)));

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/stat/regression/GLSMultipleLinearRegressionTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/stat/regression/GLSMultipleLinearRegressionTest.java b/src/test/java/org/apache/commons/math4/stat/regression/GLSMultipleLinearRegressionTest.java
index 29dbe07..a2f5f62 100644
--- a/src/test/java/org/apache/commons/math4/stat/regression/GLSMultipleLinearRegressionTest.java
+++ b/src/test/java/org/apache/commons/math4/stat/regression/GLSMultipleLinearRegressionTest.java
@@ -20,6 +20,7 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.apache.commons.math4.TestUtils;
+import org.apache.commons.math4.exception.NullArgumentException;
 import org.apache.commons.math4.linear.MatrixUtils;
 import org.apache.commons.math4.linear.RealMatrix;
 import org.apache.commons.math4.linear.RealVector;
@@ -77,12 +78,12 @@ public class GLSMultipleLinearRegressionTest extends MultipleLinearRegressionAbs
         super.setUp();
     }
 
-    @Test(expected=IllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void cannotAddXSampleData() {
         createRegression().newSampleData(new double[]{}, null, null);
     }
 
-    @Test(expected=IllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void cannotAddNullYSampleData() {
         createRegression().newSampleData(null, new double[][]{}, null);
     }

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/stat/regression/MultipleLinearRegressionAbstractTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/stat/regression/MultipleLinearRegressionAbstractTest.java b/src/test/java/org/apache/commons/math4/stat/regression/MultipleLinearRegressionAbstractTest.java
index 8e05600..1fc839b 100644
--- a/src/test/java/org/apache/commons/math4/stat/regression/MultipleLinearRegressionAbstractTest.java
+++ b/src/test/java/org/apache/commons/math4/stat/regression/MultipleLinearRegressionAbstractTest.java
@@ -16,6 +16,7 @@
  */
 package org.apache.commons.math4.stat.regression;
 
+import org.apache.commons.math4.exception.NullArgumentException;
 import org.apache.commons.math4.linear.RealMatrix;
 import org.apache.commons.math4.linear.RealVector;
 import org.apache.commons.math4.stat.regression.AbstractMultipleLinearRegression;
@@ -104,7 +105,7 @@ public abstract class MultipleLinearRegressionAbstractTest {
         Assert.assertEquals(flatY, regression.getY());
     }
     
-    @Test(expected=IllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void testNewSampleNullData() {
         double[] data = null;
         createRegression().newSampleData(data, 2, 3); 
@@ -122,12 +123,12 @@ public abstract class MultipleLinearRegressionAbstractTest {
         createRegression().newSampleData(data, 1, 3);
     }
     
-    @Test(expected=IllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void testXSampleDataNull() {
         createRegression().newXSampleData(null);
     }
     
-    @Test(expected=IllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void testYSampleDataNull() {
         createRegression().newYSampleData(null);
     }

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/stat/regression/OLSMultipleLinearRegressionTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/stat/regression/OLSMultipleLinearRegressionTest.java b/src/test/java/org/apache/commons/math4/stat/regression/OLSMultipleLinearRegressionTest.java
index f5025b0..d383d0f 100644
--- a/src/test/java/org/apache/commons/math4/stat/regression/OLSMultipleLinearRegressionTest.java
+++ b/src/test/java/org/apache/commons/math4/stat/regression/OLSMultipleLinearRegressionTest.java
@@ -18,6 +18,7 @@ package org.apache.commons.math4.stat.regression;
 
 
 import org.apache.commons.math4.TestUtils;
+import org.apache.commons.math4.exception.NullArgumentException;
 import org.apache.commons.math4.linear.Array2DRowRealMatrix;
 import org.apache.commons.math4.linear.DefaultRealMatrixChangingVisitor;
 import org.apache.commons.math4.linear.MatrixUtils;
@@ -500,12 +501,12 @@ public class OLSMultipleLinearRegressionTest extends MultipleLinearRegressionAbs
         Assert.assertEquals(combinedY, regression.getY());
     }
     
-    @Test(expected=IllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void testNewSampleDataYNull() {
         createRegression().newSampleData(null, new double[][] {});
     }
     
-    @Test(expected=IllegalArgumentException.class)
+    @Test(expected=NullArgumentException.class)
     public void testNewSampleDataXNull() {
         createRegression().newSampleData(new double[] {}, null);
     }

http://git-wip-us.apache.org/repos/asf/commons-math/blob/35b688b7/src/test/java/org/apache/commons/math4/util/MathArraysTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/commons/math4/util/MathArraysTest.java b/src/test/java/org/apache/commons/math4/util/MathArraysTest.java
index 71d75b9..16e6a52 100644
--- a/src/test/java/org/apache/commons/math4/util/MathArraysTest.java
+++ b/src/test/java/org/apache/commons/math4/util/MathArraysTest.java
@@ -1128,14 +1128,14 @@ public class MathArraysTest {
         }
         try {
             MathArrays.verifyValues(nullArray, 0, 1);  // null array
-            Assert.fail("Expecting MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {
+            Assert.fail("Expecting NullArgumentException");
+        } catch (NullArgumentException ex) {
             // expected
         }
         try {
             MathArrays.verifyValues(testArray, nullArray, 0, 1);  // null weights array
-            Assert.fail("Expecting MathIllegalArgumentException");
-        } catch (MathIllegalArgumentException ex) {
+            Assert.fail("Expecting NullArgumentException");
+        } catch (NullArgumentException ex) {
             // expected
         }
         try {


[18/18] [math] Enabled MissingOverride check after upgrade to Java 7.

Posted by tn...@apache.org.
Enabled MissingOverride check after upgrade to Java 7.


Project: http://git-wip-us.apache.org/repos/asf/commons-math/repo
Commit: http://git-wip-us.apache.org/repos/asf/commons-math/commit/b28255e1
Tree: http://git-wip-us.apache.org/repos/asf/commons-math/tree/b28255e1
Diff: http://git-wip-us.apache.org/repos/asf/commons-math/diff/b28255e1

Branch: refs/heads/master
Commit: b28255e1be6abbb689c712cd41c5defd41d66d6d
Parents: b4669aa
Author: Thomas Neidhart <th...@gmail.com>
Authored: Wed Feb 25 22:49:13 2015 +0100
Committer: Thomas Neidhart <th...@gmail.com>
Committed: Wed Feb 25 22:49:13 2015 +0100

----------------------------------------------------------------------
 checkstyle.xml | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/commons-math/blob/b28255e1/checkstyle.xml
----------------------------------------------------------------------
diff --git a/checkstyle.xml b/checkstyle.xml
index 5549177..ee644d3 100644
--- a/checkstyle.xml
+++ b/checkstyle.xml
@@ -139,6 +139,9 @@
       <property name="ignoreStringsRegexp" value='^(("")|(".")|("unchecked"))$'/>
     </module>
 
+    <!-- Check if @Override tags are present  -->
+    <module name="MissingOverride" />
+
     <!-- <module name="TodoComment" /> -->
 
   </module>


[14/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateOptimizer.java
deleted file mode 100644
index 8af7c47..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateOptimizer.java
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.MaxCountExceededException;
-import org.apache.commons.math4.exception.NumberIsTooLargeException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.optimization.BaseMultivariateOptimizer;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.InitialGuess;
-import org.apache.commons.math4.optimization.OptimizationData;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleBounds;
-import org.apache.commons.math4.optimization.SimpleValueChecker;
-import org.apache.commons.math4.util.Incrementor;
-
-/**
- * Base class for implementing optimizers for multivariate scalar functions.
- * This base class handles the boiler-plate methods associated to thresholds,
- * evaluations counting, initial guess and simple bounds settings.
- *
- * @param <FUNC> Type of the objective function to be optimized.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.2
- */
-@Deprecated
-public abstract class BaseAbstractMultivariateOptimizer<FUNC extends MultivariateFunction>
-    implements BaseMultivariateOptimizer<FUNC> {
-    /** Evaluations counter. */
-    protected final Incrementor evaluations = new Incrementor();
-    /** Convergence checker. */
-    private ConvergenceChecker<PointValuePair> checker;
-    /** Type of optimization. */
-    private GoalType goal;
-    /** Initial guess. */
-    private double[] start;
-    /** Lower bounds. */
-    private double[] lowerBound;
-    /** Upper bounds. */
-    private double[] upperBound;
-    /** Objective function. */
-    private MultivariateFunction function;
-
-    /**
-     * Simple constructor with default settings.
-     * The convergence check is set to a {@link SimpleValueChecker}.
-     * @deprecated See {@link SimpleValueChecker#SimpleValueChecker()}
-     */
-    @Deprecated
-    protected BaseAbstractMultivariateOptimizer() {
-        this(new SimpleValueChecker());
-    }
-    /**
-     * @param checker Convergence checker.
-     */
-    protected BaseAbstractMultivariateOptimizer(ConvergenceChecker<PointValuePair> checker) {
-        this.checker = checker;
-    }
-
-    /** {@inheritDoc} */
-    public int getMaxEvaluations() {
-        return evaluations.getMaximalCount();
-    }
-
-    /** {@inheritDoc} */
-    public int getEvaluations() {
-        return evaluations.getCount();
-    }
-
-    /** {@inheritDoc} */
-    public ConvergenceChecker<PointValuePair> getConvergenceChecker() {
-        return checker;
-    }
-
-    /**
-     * Compute the objective function value.
-     *
-     * @param point Point at which the objective function must be evaluated.
-     * @return the objective function value at the specified point.
-     * @throws TooManyEvaluationsException if the maximal number of
-     * evaluations is exceeded.
-     */
-    protected double computeObjectiveValue(double[] point) {
-        try {
-            evaluations.incrementCount();
-        } catch (MaxCountExceededException e) {
-            throw new TooManyEvaluationsException(e.getMax());
-        }
-        return function.value(point);
-    }
-
-    /**
-     * {@inheritDoc}
-     *
-     * @deprecated As of 3.1. Please use
-     * {@link #optimize(int,MultivariateFunction,GoalType,OptimizationData[])}
-     * instead.
-     */
-    @Deprecated
-    public PointValuePair optimize(int maxEval, FUNC f, GoalType goalType,
-                                   double[] startPoint) {
-        return optimizeInternal(maxEval, f, goalType, new InitialGuess(startPoint));
-    }
-
-    /**
-     * Optimize an objective function.
-     *
-     * @param maxEval Allowed number of evaluations of the objective function.
-     * @param f Objective function.
-     * @param goalType Optimization type.
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link InitialGuess}</li>
-     *  <li>{@link SimpleBounds}</li>
-     * </ul>
-     * @return the point/value pair giving the optimal value of the objective
-     * function.
-     * @since 3.1
-     */
-    public PointValuePair optimize(int maxEval,
-                                   FUNC f,
-                                   GoalType goalType,
-                                   OptimizationData... optData) {
-        return optimizeInternal(maxEval, f, goalType, optData);
-    }
-
-    /**
-     * Optimize an objective function.
-     *
-     * @param f Objective function.
-     * @param goalType Type of optimization goal: either
-     * {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}.
-     * @param startPoint Start point for optimization.
-     * @param maxEval Maximum number of function evaluations.
-     * @return the point/value pair giving the optimal value for objective
-     * function.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if the start point dimension is wrong.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     * @throws org.apache.commons.math4.exception.NullArgumentException if
-     * any argument is {@code null}.
-     * @deprecated As of 3.1. Please use
-     * {@link #optimize(int,MultivariateFunction,GoalType,OptimizationData[])}
-     * instead.
-     */
-    @Deprecated
-    protected PointValuePair optimizeInternal(int maxEval, FUNC f, GoalType goalType,
-                                              double[] startPoint) {
-        return optimizeInternal(maxEval, f, goalType, new InitialGuess(startPoint));
-    }
-
-    /**
-     * Optimize an objective function.
-     *
-     * @param maxEval Allowed number of evaluations of the objective function.
-     * @param f Objective function.
-     * @param goalType Optimization type.
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link InitialGuess}</li>
-     *  <li>{@link SimpleBounds}</li>
-     * </ul>
-     * @return the point/value pair giving the optimal value of the objective
-     * function.
-     * @throws TooManyEvaluationsException if the maximal number of
-     * evaluations is exceeded.
-     * @since 3.1
-     */
-    protected PointValuePair optimizeInternal(int maxEval,
-                                              FUNC f,
-                                              GoalType goalType,
-                                              OptimizationData... optData)
-        throws TooManyEvaluationsException {
-        // Set internal state.
-        evaluations.setMaximalCount(maxEval);
-        evaluations.resetCount();
-        function = f;
-        goal = goalType;
-        // Retrieve other settings.
-        parseOptimizationData(optData);
-        // Check input consistency.
-        checkParameters();
-        // Perform computation.
-        return doOptimize();
-    }
-
-    /**
-     * Scans the list of (required and optional) optimization data that
-     * characterize the problem.
-     *
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link InitialGuess}</li>
-     *  <li>{@link SimpleBounds}</li>
-     * </ul>
-     */
-    private void parseOptimizationData(OptimizationData... optData) {
-        // The existing values (as set by the previous call) are reused if
-        // not provided in the argument list.
-        for (OptimizationData data : optData) {
-            if (data instanceof InitialGuess) {
-                start = ((InitialGuess) data).getInitialGuess();
-                continue;
-            }
-            if (data instanceof SimpleBounds) {
-                final SimpleBounds bounds = (SimpleBounds) data;
-                lowerBound = bounds.getLower();
-                upperBound = bounds.getUpper();
-                continue;
-            }
-        }
-    }
-
-    /**
-     * @return the optimization type.
-     */
-    public GoalType getGoalType() {
-        return goal;
-    }
-
-    /**
-     * @return the initial guess.
-     */
-    public double[] getStartPoint() {
-        return start == null ? null : start.clone();
-    }
-    /**
-     * @return the lower bounds.
-     * @since 3.1
-     */
-    public double[] getLowerBound() {
-        return lowerBound == null ? null : lowerBound.clone();
-    }
-    /**
-     * @return the upper bounds.
-     * @since 3.1
-     */
-    public double[] getUpperBound() {
-        return upperBound == null ? null : upperBound.clone();
-    }
-
-    /**
-     * Perform the bulk of the optimization algorithm.
-     *
-     * @return the point/value pair giving the optimal value of the
-     * objective function.
-     */
-    protected abstract PointValuePair doOptimize();
-
-    /**
-     * Check parameters consistency.
-     */
-    private void checkParameters() {
-        if (start != null) {
-            final int dim = start.length;
-            if (lowerBound != null) {
-                if (lowerBound.length != dim) {
-                    throw new DimensionMismatchException(lowerBound.length, dim);
-                }
-                for (int i = 0; i < dim; i++) {
-                    final double v = start[i];
-                    final double lo = lowerBound[i];
-                    if (v < lo) {
-                        throw new NumberIsTooSmallException(v, lo, true);
-                    }
-                }
-            }
-            if (upperBound != null) {
-                if (upperBound.length != dim) {
-                    throw new DimensionMismatchException(upperBound.length, dim);
-                }
-                for (int i = 0; i < dim; i++) {
-                    final double v = start[i];
-                    final double hi = upperBound[i];
-                    if (v > hi) {
-                        throw new NumberIsTooLargeException(v, hi, true);
-                    }
-                }
-            }
-
-            // If the bounds were not specified, the allowed interval is
-            // assumed to be [-inf, +inf].
-            if (lowerBound == null) {
-                lowerBound = new double[dim];
-                for (int i = 0; i < dim; i++) {
-                    lowerBound[i] = Double.NEGATIVE_INFINITY;
-                }
-            }
-            if (upperBound == null) {
-                upperBound = new double[dim];
-                for (int i = 0; i < dim; i++) {
-                    upperBound[i] = Double.POSITIVE_INFINITY;
-                }
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateSimpleBoundsOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateSimpleBoundsOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateSimpleBoundsOptimizer.java
deleted file mode 100644
index d179202..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateSimpleBoundsOptimizer.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.optimization.BaseMultivariateOptimizer;
-import org.apache.commons.math4.optimization.BaseMultivariateSimpleBoundsOptimizer;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.InitialGuess;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleBounds;
-
-/**
- * Base class for implementing optimizers for multivariate scalar functions,
- * subject to simple bounds: The valid range of the parameters is an interval.
- * The interval can possibly be infinite (in one or both directions).
- * This base class handles the boiler-plate methods associated to thresholds
- * settings, iterations and evaluations counting.
- *
- * @param <FUNC> Type of the objective function to be optimized.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- * @deprecated As of 3.1 since the {@link BaseAbstractMultivariateOptimizer
- * base class} contains similar functionality.
- */
-@Deprecated
-public abstract class BaseAbstractMultivariateSimpleBoundsOptimizer<FUNC extends MultivariateFunction>
-    extends BaseAbstractMultivariateOptimizer<FUNC>
-    implements BaseMultivariateOptimizer<FUNC>,
-               BaseMultivariateSimpleBoundsOptimizer<FUNC> {
-    /**
-     * Simple constructor with default settings.
-     * The convergence checker is set to a
-     * {@link org.apache.commons.math4.optimization.SimpleValueChecker}.
-     *
-     * @see BaseAbstractMultivariateOptimizer#BaseAbstractMultivariateOptimizer()
-     * @deprecated See {@link org.apache.commons.math4.optimization.SimpleValueChecker#SimpleValueChecker()}
-     */
-    @Deprecated
-    protected BaseAbstractMultivariateSimpleBoundsOptimizer() {}
-
-    /**
-     * @param checker Convergence checker.
-     */
-    protected BaseAbstractMultivariateSimpleBoundsOptimizer(ConvergenceChecker<PointValuePair> checker) {
-        super(checker);
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public PointValuePair optimize(int maxEval, FUNC f, GoalType goalType,
-                                   double[] startPoint) {
-        return super.optimizeInternal(maxEval, f, goalType,
-                                      new InitialGuess(startPoint));
-    }
-
-    /** {@inheritDoc} */
-    public PointValuePair optimize(int maxEval, FUNC f, GoalType goalType,
-                                   double[] startPoint,
-                                   double[] lower, double[] upper) {
-        return super.optimizeInternal(maxEval, f, goalType,
-                                      new InitialGuess(startPoint),
-                                      new SimpleBounds(lower, upper));
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateVectorOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateVectorOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateVectorOptimizer.java
deleted file mode 100644
index ccca86e..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/BaseAbstractMultivariateVectorOptimizer.java
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import org.apache.commons.math4.analysis.MultivariateVectorFunction;
-import org.apache.commons.math4.exception.DimensionMismatchException;
-import org.apache.commons.math4.exception.MaxCountExceededException;
-import org.apache.commons.math4.exception.NullArgumentException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.optimization.BaseMultivariateVectorOptimizer;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.InitialGuess;
-import org.apache.commons.math4.optimization.OptimizationData;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-import org.apache.commons.math4.optimization.SimpleVectorValueChecker;
-import org.apache.commons.math4.optimization.Target;
-import org.apache.commons.math4.optimization.Weight;
-import org.apache.commons.math4.util.Incrementor;
-
-/**
- * Base class for implementing optimizers for multivariate scalar functions.
- * This base class handles the boiler-plate methods associated to thresholds
- * settings, iterations and evaluations counting.
- *
- * @param <FUNC> the type of the objective function to be optimized
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public abstract class BaseAbstractMultivariateVectorOptimizer<FUNC extends MultivariateVectorFunction>
-    implements BaseMultivariateVectorOptimizer<FUNC> {
-    /** Evaluations counter. */
-    protected final Incrementor evaluations = new Incrementor();
-    /** Convergence checker. */
-    private ConvergenceChecker<PointVectorValuePair> checker;
-    /** Target value for the objective functions at optimum. */
-    private double[] target;
-    /** Weight matrix. */
-    private RealMatrix weightMatrix;
-    /** Weight for the least squares cost computation.
-     * @deprecated
-     */
-    @Deprecated
-    private double[] weight;
-    /** Initial guess. */
-    private double[] start;
-    /** Objective function. */
-    private FUNC function;
-
-    /**
-     * Simple constructor with default settings.
-     * The convergence check is set to a {@link SimpleVectorValueChecker}.
-     * @deprecated See {@link SimpleVectorValueChecker#SimpleVectorValueChecker()}
-     */
-    @Deprecated
-    protected BaseAbstractMultivariateVectorOptimizer() {
-        this(new SimpleVectorValueChecker());
-    }
-    /**
-     * @param checker Convergence checker.
-     */
-    protected BaseAbstractMultivariateVectorOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
-        this.checker = checker;
-    }
-
-    /** {@inheritDoc} */
-    public int getMaxEvaluations() {
-        return evaluations.getMaximalCount();
-    }
-
-    /** {@inheritDoc} */
-    public int getEvaluations() {
-        return evaluations.getCount();
-    }
-
-    /** {@inheritDoc} */
-    public ConvergenceChecker<PointVectorValuePair> getConvergenceChecker() {
-        return checker;
-    }
-
-    /**
-     * Compute the objective function value.
-     *
-     * @param point Point at which the objective function must be evaluated.
-     * @return the objective function value at the specified point.
-     * @throws TooManyEvaluationsException if the maximal number of evaluations is
-     * exceeded.
-     */
-    protected double[] computeObjectiveValue(double[] point) {
-        try {
-            evaluations.incrementCount();
-        } catch (MaxCountExceededException e) {
-            throw new TooManyEvaluationsException(e.getMax());
-        }
-        return function.value(point);
-    }
-
-    /** {@inheritDoc}
-     *
-     * @deprecated As of 3.1. Please use
-     * {@link #optimize(int,MultivariateVectorFunction,OptimizationData[])}
-     * instead.
-     */
-    @Deprecated
-    public PointVectorValuePair optimize(int maxEval, FUNC f, double[] t, double[] w,
-                                         double[] startPoint) {
-        return optimizeInternal(maxEval, f, t, w, startPoint);
-    }
-
-    /**
-     * Optimize an objective function.
-     *
-     * @param maxEval Allowed number of evaluations of the objective function.
-     * @param f Objective function.
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link Target}</li>
-     *  <li>{@link Weight}</li>
-     *  <li>{@link InitialGuess}</li>
-     * </ul>
-     * @return the point/value pair giving the optimal value of the objective
-     * function.
-     * @throws TooManyEvaluationsException if the maximal number of
-     * evaluations is exceeded.
-     * @throws DimensionMismatchException if the initial guess, target, and weight
-     * arguments have inconsistent dimensions.
-     *
-     * @since 3.1
-     */
-    protected PointVectorValuePair optimize(int maxEval,
-                                            FUNC f,
-                                            OptimizationData... optData)
-        throws TooManyEvaluationsException,
-               DimensionMismatchException {
-        return optimizeInternal(maxEval, f, optData);
-    }
-
-    /**
-     * Optimize an objective function.
-     * Optimization is considered to be a weighted least-squares minimization.
-     * The cost function to be minimized is
-     * <code>&sum;weight<sub>i</sub>(objective<sub>i</sub> - target<sub>i</sub>)<sup>2</sup></code>
-     *
-     * @param f Objective function.
-     * @param t Target value for the objective functions at optimum.
-     * @param w Weights for the least squares cost computation.
-     * @param startPoint Start point for optimization.
-     * @return the point/value pair giving the optimal value for objective
-     * function.
-     * @param maxEval Maximum number of function evaluations.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if the start point dimension is wrong.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximal number of evaluations is exceeded.
-     * @throws org.apache.commons.math4.exception.NullArgumentException if
-     * any argument is {@code null}.
-     * @deprecated As of 3.1. Please use
-     * {@link #optimizeInternal(int,MultivariateVectorFunction,OptimizationData[])}
-     * instead.
-     */
-    @Deprecated
-    protected PointVectorValuePair optimizeInternal(final int maxEval, final FUNC f,
-                                                    final double[] t, final double[] w,
-                                                    final double[] startPoint) {
-        // Checks.
-        if (f == null) {
-            throw new NullArgumentException();
-        }
-        if (t == null) {
-            throw new NullArgumentException();
-        }
-        if (w == null) {
-            throw new NullArgumentException();
-        }
-        if (startPoint == null) {
-            throw new NullArgumentException();
-        }
-        if (t.length != w.length) {
-            throw new DimensionMismatchException(t.length, w.length);
-        }
-
-        return optimizeInternal(maxEval, f,
-                                new Target(t),
-                                new Weight(w),
-                                new InitialGuess(startPoint));
-    }
-
-    /**
-     * Optimize an objective function.
-     *
-     * @param maxEval Allowed number of evaluations of the objective function.
-     * @param f Objective function.
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link Target}</li>
-     *  <li>{@link Weight}</li>
-     *  <li>{@link InitialGuess}</li>
-     * </ul>
-     * @return the point/value pair giving the optimal value of the objective
-     * function.
-     * @throws TooManyEvaluationsException if the maximal number of
-     * evaluations is exceeded.
-     * @throws DimensionMismatchException if the initial guess, target, and weight
-     * arguments have inconsistent dimensions.
-     *
-     * @since 3.1
-     */
-    protected PointVectorValuePair optimizeInternal(int maxEval,
-                                                    FUNC f,
-                                                    OptimizationData... optData)
-        throws TooManyEvaluationsException,
-               DimensionMismatchException {
-        // Set internal state.
-        evaluations.setMaximalCount(maxEval);
-        evaluations.resetCount();
-        function = f;
-        // Retrieve other settings.
-        parseOptimizationData(optData);
-        // Check input consistency.
-        checkParameters();
-        // Allow subclasses to reset their own internal state.
-        setUp();
-        // Perform computation.
-        return doOptimize();
-    }
-
-    /**
-     * Gets the initial values of the optimized parameters.
-     *
-     * @return the initial guess.
-     */
-    public double[] getStartPoint() {
-        return start.clone();
-    }
-
-    /**
-     * Gets the weight matrix of the observations.
-     *
-     * @return the weight matrix.
-     * @since 3.1
-     */
-    public RealMatrix getWeight() {
-        return weightMatrix.copy();
-    }
-    /**
-     * Gets the observed values to be matched by the objective vector
-     * function.
-     *
-     * @return the target values.
-     * @since 3.1
-     */
-    public double[] getTarget() {
-        return target.clone();
-    }
-
-    /**
-     * Gets the objective vector function.
-     * Note that this access bypasses the evaluation counter.
-     *
-     * @return the objective vector function.
-     * @since 3.1
-     */
-    protected FUNC getObjectiveFunction() {
-        return function;
-    }
-
-    /**
-     * Perform the bulk of the optimization algorithm.
-     *
-     * @return the point/value pair giving the optimal value for the
-     * objective function.
-     */
-    protected abstract PointVectorValuePair doOptimize();
-
-    /**
-     * @return a reference to the {@link #target array}.
-     * @deprecated As of 3.1.
-     */
-    @Deprecated
-    protected double[] getTargetRef() {
-        return target;
-    }
-    /**
-     * @return a reference to the {@link #weight array}.
-     * @deprecated As of 3.1.
-     */
-    @Deprecated
-    protected double[] getWeightRef() {
-        return weight;
-    }
-
-    /**
-     * Method which a subclass <em>must</em> override whenever its internal
-     * state depend on the {@link OptimizationData input} parsed by this base
-     * class.
-     * It will be called after the parsing step performed in the
-     * {@link #optimize(int,MultivariateVectorFunction,OptimizationData[])
-     * optimize} method and just before {@link #doOptimize()}.
-     *
-     * @since 3.1
-     */
-    protected void setUp() {
-        // XXX Temporary code until the new internal data is used everywhere.
-        final int dim = target.length;
-        weight = new double[dim];
-        for (int i = 0; i < dim; i++) {
-            weight[i] = weightMatrix.getEntry(i, i);
-        }
-    }
-
-    /**
-     * Scans the list of (required and optional) optimization data that
-     * characterize the problem.
-     *
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link Target}</li>
-     *  <li>{@link Weight}</li>
-     *  <li>{@link InitialGuess}</li>
-     * </ul>
-     */
-    private void parseOptimizationData(OptimizationData... optData) {
-        // The existing values (as set by the previous call) are reused if
-        // not provided in the argument list.
-        for (OptimizationData data : optData) {
-            if (data instanceof Target) {
-                target = ((Target) data).getTarget();
-                continue;
-            }
-            if (data instanceof Weight) {
-                weightMatrix = ((Weight) data).getWeight();
-                continue;
-            }
-            if (data instanceof InitialGuess) {
-                start = ((InitialGuess) data).getInitialGuess();
-                continue;
-            }
-        }
-    }
-
-    /**
-     * Check parameters consistency.
-     *
-     * @throws DimensionMismatchException if {@link #target} and
-     * {@link #weightMatrix} have inconsistent dimensions.
-     */
-    private void checkParameters() {
-        if (target.length != weightMatrix.getColumnDimension()) {
-            throw new DimensionMismatchException(target.length,
-                                                 weightMatrix.getColumnDimension());
-        }
-    }
-}


[09/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/Relationship.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/Relationship.java b/src/main/java/org/apache/commons/math4/optimization/linear/Relationship.java
deleted file mode 100644
index 7675694..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/Relationship.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-/**
- * Types of relationships between two cells in a Solver {@link LinearConstraint}.
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public enum Relationship {
-
-    /** Equality relationship. */
-    EQ("="),
-
-    /** Lesser than or equal relationship. */
-    LEQ("<="),
-
-    /** Greater than or equal relationship. */
-    GEQ(">=");
-
-    /** Display string for the relationship. */
-    private final String stringValue;
-
-    /** Simple constructor.
-     * @param stringValue display string for the relationship
-     */
-    private Relationship(String stringValue) {
-        this.stringValue = stringValue;
-    }
-
-    @Override
-    public String toString() {
-        return stringValue;
-    }
-
-    /**
-     * Get the relationship obtained when multiplying all coefficients by -1.
-     * @return relationship obtained when multiplying all coefficients by -1
-     */
-    public Relationship oppositeRelationship() {
-        switch (this) {
-        case LEQ :
-            return GEQ;
-        case GEQ :
-            return LEQ;
-        default :
-            return EQ;
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/SimplexSolver.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/SimplexSolver.java b/src/main/java/org/apache/commons/math4/optimization/linear/SimplexSolver.java
deleted file mode 100644
index 23db158..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/SimplexSolver.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.math4.exception.MaxCountExceededException;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.util.Precision;
-
-
-/**
- * Solves a linear problem using the Two-Phase Simplex Method.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class SimplexSolver extends AbstractLinearOptimizer {
-
-    /** Default amount of error to accept for algorithm convergence. */
-    private static final double DEFAULT_EPSILON = 1.0e-6;
-
-    /** Default amount of error to accept in floating point comparisons (as ulps). */
-    private static final int DEFAULT_ULPS = 10;
-
-    /** Amount of error to accept for algorithm convergence. */
-    private final double epsilon;
-
-    /** Amount of error to accept in floating point comparisons (as ulps). */
-    private final int maxUlps;
-
-    /**
-     * Build a simplex solver with default settings.
-     */
-    public SimplexSolver() {
-        this(DEFAULT_EPSILON, DEFAULT_ULPS);
-    }
-
-    /**
-     * Build a simplex solver with a specified accepted amount of error
-     * @param epsilon the amount of error to accept for algorithm convergence
-     * @param maxUlps amount of error to accept in floating point comparisons
-     */
-    public SimplexSolver(final double epsilon, final int maxUlps) {
-        this.epsilon = epsilon;
-        this.maxUlps = maxUlps;
-    }
-
-    /**
-     * Returns the column with the most negative coefficient in the objective function row.
-     * @param tableau simple tableau for the problem
-     * @return column with the most negative coefficient
-     */
-    private Integer getPivotColumn(SimplexTableau tableau) {
-        double minValue = 0;
-        Integer minPos = null;
-        for (int i = tableau.getNumObjectiveFunctions(); i < tableau.getWidth() - 1; i++) {
-            final double entry = tableau.getEntry(0, i);
-            // check if the entry is strictly smaller than the current minimum
-            // do not use a ulp/epsilon check
-            if (entry < minValue) {
-                minValue = entry;
-                minPos = i;
-            }
-        }
-        return minPos;
-    }
-
-    /**
-     * Returns the row with the minimum ratio as given by the minimum ratio test (MRT).
-     * @param tableau simple tableau for the problem
-     * @param col the column to test the ratio of.  See {@link #getPivotColumn(SimplexTableau)}
-     * @return row with the minimum ratio
-     */
-    private Integer getPivotRow(SimplexTableau tableau, final int col) {
-        // create a list of all the rows that tie for the lowest score in the minimum ratio test
-        List<Integer> minRatioPositions = new ArrayList<Integer>();
-        double minRatio = Double.MAX_VALUE;
-        for (int i = tableau.getNumObjectiveFunctions(); i < tableau.getHeight(); i++) {
-            final double rhs = tableau.getEntry(i, tableau.getWidth() - 1);
-            final double entry = tableau.getEntry(i, col);
-
-            if (Precision.compareTo(entry, 0d, maxUlps) > 0) {
-                final double ratio = rhs / entry;
-                // check if the entry is strictly equal to the current min ratio
-                // do not use a ulp/epsilon check
-                final int cmp = Double.compare(ratio, minRatio);
-                if (cmp == 0) {
-                    minRatioPositions.add(i);
-                } else if (cmp < 0) {
-                    minRatio = ratio;
-                    minRatioPositions = new ArrayList<Integer>();
-                    minRatioPositions.add(i);
-                }
-            }
-        }
-
-        if (minRatioPositions.size() == 0) {
-            return null;
-        } else if (minRatioPositions.size() > 1) {
-            // there's a degeneracy as indicated by a tie in the minimum ratio test
-
-            // 1. check if there's an artificial variable that can be forced out of the basis
-            if (tableau.getNumArtificialVariables() > 0) {
-                for (Integer row : minRatioPositions) {
-                    for (int i = 0; i < tableau.getNumArtificialVariables(); i++) {
-                        int column = i + tableau.getArtificialVariableOffset();
-                        final double entry = tableau.getEntry(row, column);
-                        if (Precision.equals(entry, 1d, maxUlps) && row.equals(tableau.getBasicRow(column))) {
-                            return row;
-                        }
-                    }
-                }
-            }
-
-            // 2. apply Bland's rule to prevent cycling:
-            //    take the row for which the corresponding basic variable has the smallest index
-            //
-            // see http://www.stanford.edu/class/msande310/blandrule.pdf
-            // see http://en.wikipedia.org/wiki/Bland%27s_rule (not equivalent to the above paper)
-            //
-            // Additional heuristic: if we did not get a solution after half of maxIterations
-            //                       revert to the simple case of just returning the top-most row
-            // This heuristic is based on empirical data gathered while investigating MATH-828.
-            if (getIterations() < getMaxIterations() / 2) {
-                Integer minRow = null;
-                int minIndex = tableau.getWidth();
-                final int varStart = tableau.getNumObjectiveFunctions();
-                final int varEnd = tableau.getWidth() - 1;
-                for (Integer row : minRatioPositions) {
-                    for (int i = varStart; i < varEnd && !row.equals(minRow); i++) {
-                        final Integer basicRow = tableau.getBasicRow(i);
-                        if (basicRow != null && basicRow.equals(row) && i < minIndex) {
-                            minIndex = i;
-                            minRow = row;
-                        }
-                    }
-                }
-                return minRow;
-            }
-        }
-        return minRatioPositions.get(0);
-    }
-
-    /**
-     * Runs one iteration of the Simplex method on the given model.
-     * @param tableau simple tableau for the problem
-     * @throws MaxCountExceededException if the maximal iteration count has been exceeded
-     * @throws UnboundedSolutionException if the model is found not to have a bounded solution
-     */
-    protected void doIteration(final SimplexTableau tableau)
-        throws MaxCountExceededException, UnboundedSolutionException {
-
-        incrementIterationsCounter();
-
-        Integer pivotCol = getPivotColumn(tableau);
-        Integer pivotRow = getPivotRow(tableau, pivotCol);
-        if (pivotRow == null) {
-            throw new UnboundedSolutionException();
-        }
-
-        // set the pivot element to 1
-        double pivotVal = tableau.getEntry(pivotRow, pivotCol);
-        tableau.divideRow(pivotRow, pivotVal);
-
-        // set the rest of the pivot column to 0
-        for (int i = 0; i < tableau.getHeight(); i++) {
-            if (i != pivotRow) {
-                final double multiplier = tableau.getEntry(i, pivotCol);
-                tableau.subtractRow(i, pivotRow, multiplier);
-            }
-        }
-    }
-
-    /**
-     * Solves Phase 1 of the Simplex method.
-     * @param tableau simple tableau for the problem
-     * @throws MaxCountExceededException if the maximal iteration count has been exceeded
-     * @throws UnboundedSolutionException if the model is found not to have a bounded solution
-     * @throws NoFeasibleSolutionException if there is no feasible solution
-     */
-    protected void solvePhase1(final SimplexTableau tableau)
-        throws MaxCountExceededException, UnboundedSolutionException, NoFeasibleSolutionException {
-
-        // make sure we're in Phase 1
-        if (tableau.getNumArtificialVariables() == 0) {
-            return;
-        }
-
-        while (!tableau.isOptimal()) {
-            doIteration(tableau);
-        }
-
-        // if W is not zero then we have no feasible solution
-        if (!Precision.equals(tableau.getEntry(0, tableau.getRhsOffset()), 0d, epsilon)) {
-            throw new NoFeasibleSolutionException();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public PointValuePair doOptimize()
-        throws MaxCountExceededException, UnboundedSolutionException, NoFeasibleSolutionException {
-        final SimplexTableau tableau =
-            new SimplexTableau(getFunction(),
-                               getConstraints(),
-                               getGoalType(),
-                               restrictToNonNegative(),
-                               epsilon,
-                               maxUlps);
-
-        solvePhase1(tableau);
-        tableau.dropPhase1Objective();
-
-        while (!tableau.isOptimal()) {
-            doIteration(tableau);
-        }
-        return tableau.getSolution();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/SimplexTableau.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/SimplexTableau.java b/src/main/java/org/apache/commons/math4/optimization/linear/SimplexTableau.java
deleted file mode 100644
index 16f07ef..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/SimplexTableau.java
+++ /dev/null
@@ -1,635 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.commons.math4.linear.Array2DRowRealMatrix;
-import org.apache.commons.math4.linear.MatrixUtils;
-import org.apache.commons.math4.linear.RealMatrix;
-import org.apache.commons.math4.linear.RealVector;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.Precision;
-
-/**
- * A tableau for use in the Simplex method.
- *
- * <p>
- * Example:
- * <pre>
- *   W |  Z |  x1 |  x2 |  x- | s1 |  s2 |  a1 |  RHS
- * ---------------------------------------------------
- *  -1    0    0     0     0     0     0     1     0   &lt;= phase 1 objective
- *   0    1   -15   -10    0     0     0     0     0   &lt;= phase 2 objective
- *   0    0    1     0     0     1     0     0     2   &lt;= constraint 1
- *   0    0    0     1     0     0     1     0     3   &lt;= constraint 2
- *   0    0    1     1     0     0     0     1     4   &lt;= constraint 3
- * </pre>
- * W: Phase 1 objective function</br>
- * Z: Phase 2 objective function</br>
- * x1 &amp; x2: Decision variables</br>
- * x-: Extra decision variable to allow for negative values</br>
- * s1 &amp; s2: Slack/Surplus variables</br>
- * a1: Artificial variable</br>
- * RHS: Right hand side</br>
- * </p>
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-class SimplexTableau implements Serializable {
-
-    /** Column label for negative vars. */
-    private static final String NEGATIVE_VAR_COLUMN_LABEL = "x-";
-
-    /** Default amount of error to accept in floating point comparisons (as ulps). */
-    private static final int DEFAULT_ULPS = 10;
-
-    /** The cut-off threshold to zero-out entries. */
-    private static final double CUTOFF_THRESHOLD = 1e-12;
-
-    /** Serializable version identifier. */
-    private static final long serialVersionUID = -1369660067587938365L;
-
-    /** Linear objective function. */
-    private final LinearObjectiveFunction f;
-
-    /** Linear constraints. */
-    private final List<LinearConstraint> constraints;
-
-    /** Whether to restrict the variables to non-negative values. */
-    private final boolean restrictToNonNegative;
-
-    /** The variables each column represents */
-    private final List<String> columnLabels = new ArrayList<String>();
-
-    /** Simple tableau. */
-    private transient RealMatrix tableau;
-
-    /** Number of decision variables. */
-    private final int numDecisionVariables;
-
-    /** Number of slack variables. */
-    private final int numSlackVariables;
-
-    /** Number of artificial variables. */
-    private int numArtificialVariables;
-
-    /** Amount of error to accept when checking for optimality. */
-    private final double epsilon;
-
-    /** Amount of error to accept in floating point comparisons. */
-    private final int maxUlps;
-
-    /**
-     * Build a tableau for a linear problem.
-     * @param f linear objective function
-     * @param constraints linear constraints
-     * @param goalType type of optimization goal: either {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}
-     * @param restrictToNonNegative whether to restrict the variables to non-negative values
-     * @param epsilon amount of error to accept when checking for optimality
-     */
-    SimplexTableau(final LinearObjectiveFunction f,
-                   final Collection<LinearConstraint> constraints,
-                   final GoalType goalType, final boolean restrictToNonNegative,
-                   final double epsilon) {
-        this(f, constraints, goalType, restrictToNonNegative, epsilon, DEFAULT_ULPS);
-    }
-
-    /**
-     * Build a tableau for a linear problem.
-     * @param f linear objective function
-     * @param constraints linear constraints
-     * @param goalType type of optimization goal: either {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}
-     * @param restrictToNonNegative whether to restrict the variables to non-negative values
-     * @param epsilon amount of error to accept when checking for optimality
-     * @param maxUlps amount of error to accept in floating point comparisons
-     */
-    SimplexTableau(final LinearObjectiveFunction f,
-                   final Collection<LinearConstraint> constraints,
-                   final GoalType goalType, final boolean restrictToNonNegative,
-                   final double epsilon,
-                   final int maxUlps) {
-        this.f                      = f;
-        this.constraints            = normalizeConstraints(constraints);
-        this.restrictToNonNegative  = restrictToNonNegative;
-        this.epsilon                = epsilon;
-        this.maxUlps                = maxUlps;
-        this.numDecisionVariables   = f.getCoefficients().getDimension() +
-                                      (restrictToNonNegative ? 0 : 1);
-        this.numSlackVariables      = getConstraintTypeCounts(Relationship.LEQ) +
-                                      getConstraintTypeCounts(Relationship.GEQ);
-        this.numArtificialVariables = getConstraintTypeCounts(Relationship.EQ) +
-                                      getConstraintTypeCounts(Relationship.GEQ);
-        this.tableau = createTableau(goalType == GoalType.MAXIMIZE);
-        initializeColumnLabels();
-    }
-
-    /**
-     * Initialize the labels for the columns.
-     */
-    protected void initializeColumnLabels() {
-      if (getNumObjectiveFunctions() == 2) {
-        columnLabels.add("W");
-      }
-      columnLabels.add("Z");
-      for (int i = 0; i < getOriginalNumDecisionVariables(); i++) {
-        columnLabels.add("x" + i);
-      }
-      if (!restrictToNonNegative) {
-        columnLabels.add(NEGATIVE_VAR_COLUMN_LABEL);
-      }
-      for (int i = 0; i < getNumSlackVariables(); i++) {
-        columnLabels.add("s" + i);
-      }
-      for (int i = 0; i < getNumArtificialVariables(); i++) {
-        columnLabels.add("a" + i);
-      }
-      columnLabels.add("RHS");
-    }
-
-    /**
-     * Create the tableau by itself.
-     * @param maximize if true, goal is to maximize the objective function
-     * @return created tableau
-     */
-    protected RealMatrix createTableau(final boolean maximize) {
-
-        // create a matrix of the correct size
-        int width = numDecisionVariables + numSlackVariables +
-        numArtificialVariables + getNumObjectiveFunctions() + 1; // + 1 is for RHS
-        int height = constraints.size() + getNumObjectiveFunctions();
-        Array2DRowRealMatrix matrix = new Array2DRowRealMatrix(height, width);
-
-        // initialize the objective function rows
-        if (getNumObjectiveFunctions() == 2) {
-            matrix.setEntry(0, 0, -1);
-        }
-        int zIndex = (getNumObjectiveFunctions() == 1) ? 0 : 1;
-        matrix.setEntry(zIndex, zIndex, maximize ? 1 : -1);
-        RealVector objectiveCoefficients =
-            maximize ? f.getCoefficients().mapMultiply(-1) : f.getCoefficients();
-        copyArray(objectiveCoefficients.toArray(), matrix.getDataRef()[zIndex]);
-        matrix.setEntry(zIndex, width - 1,
-            maximize ? f.getConstantTerm() : -1 * f.getConstantTerm());
-
-        if (!restrictToNonNegative) {
-            matrix.setEntry(zIndex, getSlackVariableOffset() - 1,
-                getInvertedCoefficientSum(objectiveCoefficients));
-        }
-
-        // initialize the constraint rows
-        int slackVar = 0;
-        int artificialVar = 0;
-        for (int i = 0; i < constraints.size(); i++) {
-            LinearConstraint constraint = constraints.get(i);
-            int row = getNumObjectiveFunctions() + i;
-
-            // decision variable coefficients
-            copyArray(constraint.getCoefficients().toArray(), matrix.getDataRef()[row]);
-
-            // x-
-            if (!restrictToNonNegative) {
-                matrix.setEntry(row, getSlackVariableOffset() - 1,
-                    getInvertedCoefficientSum(constraint.getCoefficients()));
-            }
-
-            // RHS
-            matrix.setEntry(row, width - 1, constraint.getValue());
-
-            // slack variables
-            if (constraint.getRelationship() == Relationship.LEQ) {
-                matrix.setEntry(row, getSlackVariableOffset() + slackVar++, 1);  // slack
-            } else if (constraint.getRelationship() == Relationship.GEQ) {
-                matrix.setEntry(row, getSlackVariableOffset() + slackVar++, -1); // excess
-            }
-
-            // artificial variables
-            if ((constraint.getRelationship() == Relationship.EQ) ||
-                    (constraint.getRelationship() == Relationship.GEQ)) {
-                matrix.setEntry(0, getArtificialVariableOffset() + artificialVar, 1);
-                matrix.setEntry(row, getArtificialVariableOffset() + artificialVar++, 1);
-                matrix.setRowVector(0, matrix.getRowVector(0).subtract(matrix.getRowVector(row)));
-            }
-        }
-
-        return matrix;
-    }
-
-    /**
-     * Get new versions of the constraints which have positive right hand sides.
-     * @param originalConstraints original (not normalized) constraints
-     * @return new versions of the constraints
-     */
-    public List<LinearConstraint> normalizeConstraints(Collection<LinearConstraint> originalConstraints) {
-        List<LinearConstraint> normalized = new ArrayList<LinearConstraint>(originalConstraints.size());
-        for (LinearConstraint constraint : originalConstraints) {
-            normalized.add(normalize(constraint));
-        }
-        return normalized;
-    }
-
-    /**
-     * Get a new equation equivalent to this one with a positive right hand side.
-     * @param constraint reference constraint
-     * @return new equation
-     */
-    private LinearConstraint normalize(final LinearConstraint constraint) {
-        if (constraint.getValue() < 0) {
-            return new LinearConstraint(constraint.getCoefficients().mapMultiply(-1),
-                                        constraint.getRelationship().oppositeRelationship(),
-                                        -1 * constraint.getValue());
-        }
-        return new LinearConstraint(constraint.getCoefficients(),
-                                    constraint.getRelationship(), constraint.getValue());
-    }
-
-    /**
-     * Get the number of objective functions in this tableau.
-     * @return 2 for Phase 1.  1 for Phase 2.
-     */
-    protected final int getNumObjectiveFunctions() {
-        return this.numArtificialVariables > 0 ? 2 : 1;
-    }
-
-    /**
-     * Get a count of constraints corresponding to a specified relationship.
-     * @param relationship relationship to count
-     * @return number of constraint with the specified relationship
-     */
-    private int getConstraintTypeCounts(final Relationship relationship) {
-        int count = 0;
-        for (final LinearConstraint constraint : constraints) {
-            if (constraint.getRelationship() == relationship) {
-                ++count;
-            }
-        }
-        return count;
-    }
-
-    /**
-     * Get the -1 times the sum of all coefficients in the given array.
-     * @param coefficients coefficients to sum
-     * @return the -1 times the sum of all coefficients in the given array.
-     */
-    protected static double getInvertedCoefficientSum(final RealVector coefficients) {
-        double sum = 0;
-        for (double coefficient : coefficients.toArray()) {
-            sum -= coefficient;
-        }
-        return sum;
-    }
-
-    /**
-     * Checks whether the given column is basic.
-     * @param col index of the column to check
-     * @return the row that the variable is basic in.  null if the column is not basic
-     */
-    protected Integer getBasicRow(final int col) {
-        Integer row = null;
-        for (int i = 0; i < getHeight(); i++) {
-            final double entry = getEntry(i, col);
-            if (Precision.equals(entry, 1d, maxUlps) && (row == null)) {
-                row = i;
-            } else if (!Precision.equals(entry, 0d, maxUlps)) {
-                return null;
-            }
-        }
-        return row;
-    }
-
-    /**
-     * Removes the phase 1 objective function, positive cost non-artificial variables,
-     * and the non-basic artificial variables from this tableau.
-     */
-    protected void dropPhase1Objective() {
-        if (getNumObjectiveFunctions() == 1) {
-            return;
-        }
-
-        Set<Integer> columnsToDrop = new TreeSet<Integer>();
-        columnsToDrop.add(0);
-
-        // positive cost non-artificial variables
-        for (int i = getNumObjectiveFunctions(); i < getArtificialVariableOffset(); i++) {
-            final double entry = tableau.getEntry(0, i);
-            if (Precision.compareTo(entry, 0d, epsilon) > 0) {
-                columnsToDrop.add(i);
-            }
-        }
-
-        // non-basic artificial variables
-        for (int i = 0; i < getNumArtificialVariables(); i++) {
-            int col = i + getArtificialVariableOffset();
-            if (getBasicRow(col) == null) {
-                columnsToDrop.add(col);
-            }
-        }
-
-        double[][] matrix = new double[getHeight() - 1][getWidth() - columnsToDrop.size()];
-        for (int i = 1; i < getHeight(); i++) {
-            int col = 0;
-            for (int j = 0; j < getWidth(); j++) {
-                if (!columnsToDrop.contains(j)) {
-                    matrix[i - 1][col++] = tableau.getEntry(i, j);
-                }
-            }
-        }
-
-        // remove the columns in reverse order so the indices are correct
-        Integer[] drop = columnsToDrop.toArray(new Integer[columnsToDrop.size()]);
-        for (int i = drop.length - 1; i >= 0; i--) {
-            columnLabels.remove((int) drop[i]);
-        }
-
-        this.tableau = new Array2DRowRealMatrix(matrix);
-        this.numArtificialVariables = 0;
-    }
-
-    /**
-     * @param src the source array
-     * @param dest the destination array
-     */
-    private void copyArray(final double[] src, final double[] dest) {
-        System.arraycopy(src, 0, dest, getNumObjectiveFunctions(), src.length);
-    }
-
-    /**
-     * Returns whether the problem is at an optimal state.
-     * @return whether the model has been solved
-     */
-    boolean isOptimal() {
-        for (int i = getNumObjectiveFunctions(); i < getWidth() - 1; i++) {
-            final double entry = tableau.getEntry(0, i);
-            if (Precision.compareTo(entry, 0d, epsilon) < 0) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    /**
-     * Get the current solution.
-     * @return current solution
-     */
-    protected PointValuePair getSolution() {
-      int negativeVarColumn = columnLabels.indexOf(NEGATIVE_VAR_COLUMN_LABEL);
-      Integer negativeVarBasicRow = negativeVarColumn > 0 ? getBasicRow(negativeVarColumn) : null;
-      double mostNegative = negativeVarBasicRow == null ? 0 : getEntry(negativeVarBasicRow, getRhsOffset());
-
-      Set<Integer> basicRows = new HashSet<Integer>();
-      double[] coefficients = new double[getOriginalNumDecisionVariables()];
-      for (int i = 0; i < coefficients.length; i++) {
-          int colIndex = columnLabels.indexOf("x" + i);
-          if (colIndex < 0) {
-            coefficients[i] = 0;
-            continue;
-          }
-          Integer basicRow = getBasicRow(colIndex);
-          if (basicRow != null && basicRow == 0) {
-              // if the basic row is found to be the objective function row
-              // set the coefficient to 0 -> this case handles unconstrained
-              // variables that are still part of the objective function
-              coefficients[i] = 0;
-          } else if (basicRows.contains(basicRow)) {
-              // if multiple variables can take a given value
-              // then we choose the first and set the rest equal to 0
-              coefficients[i] = 0 - (restrictToNonNegative ? 0 : mostNegative);
-          } else {
-              basicRows.add(basicRow);
-              coefficients[i] =
-                  (basicRow == null ? 0 : getEntry(basicRow, getRhsOffset())) -
-                  (restrictToNonNegative ? 0 : mostNegative);
-          }
-      }
-      return new PointValuePair(coefficients, f.getValue(coefficients));
-    }
-
-    /**
-     * Subtracts a multiple of one row from another.
-     * <p>
-     * After application of this operation, the following will hold:
-     * <pre>minuendRow = minuendRow - multiple * subtrahendRow</pre>
-     *
-     * @param dividendRow index of the row
-     * @param divisor value of the divisor
-     */
-    protected void divideRow(final int dividendRow, final double divisor) {
-        for (int j = 0; j < getWidth(); j++) {
-            tableau.setEntry(dividendRow, j, tableau.getEntry(dividendRow, j) / divisor);
-        }
-    }
-
-    /**
-     * Subtracts a multiple of one row from another.
-     * <p>
-     * After application of this operation, the following will hold:
-     * <pre>minuendRow = minuendRow - multiple * subtrahendRow</pre>
-     *
-     * @param minuendRow row index
-     * @param subtrahendRow row index
-     * @param multiple multiplication factor
-     */
-    protected void subtractRow(final int minuendRow, final int subtrahendRow,
-                               final double multiple) {
-        for (int i = 0; i < getWidth(); i++) {
-            double result = tableau.getEntry(minuendRow, i) - tableau.getEntry(subtrahendRow, i) * multiple;
-            // cut-off values smaller than the CUTOFF_THRESHOLD, otherwise may lead to numerical instabilities
-            if (FastMath.abs(result) < CUTOFF_THRESHOLD) {
-                result = 0.0;
-            }
-            tableau.setEntry(minuendRow, i, result);
-        }
-    }
-
-    /**
-     * Get the width of the tableau.
-     * @return width of the tableau
-     */
-    protected final int getWidth() {
-        return tableau.getColumnDimension();
-    }
-
-    /**
-     * Get the height of the tableau.
-     * @return height of the tableau
-     */
-    protected final int getHeight() {
-        return tableau.getRowDimension();
-    }
-
-    /**
-     * Get an entry of the tableau.
-     * @param row row index
-     * @param column column index
-     * @return entry at (row, column)
-     */
-    protected final double getEntry(final int row, final int column) {
-        return tableau.getEntry(row, column);
-    }
-
-    /**
-     * Set an entry of the tableau.
-     * @param row row index
-     * @param column column index
-     * @param value for the entry
-     */
-    protected final void setEntry(final int row, final int column,
-                                  final double value) {
-        tableau.setEntry(row, column, value);
-    }
-
-    /**
-     * Get the offset of the first slack variable.
-     * @return offset of the first slack variable
-     */
-    protected final int getSlackVariableOffset() {
-        return getNumObjectiveFunctions() + numDecisionVariables;
-    }
-
-    /**
-     * Get the offset of the first artificial variable.
-     * @return offset of the first artificial variable
-     */
-    protected final int getArtificialVariableOffset() {
-        return getNumObjectiveFunctions() + numDecisionVariables + numSlackVariables;
-    }
-
-    /**
-     * Get the offset of the right hand side.
-     * @return offset of the right hand side
-     */
-    protected final int getRhsOffset() {
-        return getWidth() - 1;
-    }
-
-    /**
-     * Get the number of decision variables.
-     * <p>
-     * If variables are not restricted to positive values, this will include 1 extra decision variable to represent
-     * the absolute value of the most negative variable.
-     *
-     * @return number of decision variables
-     * @see #getOriginalNumDecisionVariables()
-     */
-    protected final int getNumDecisionVariables() {
-        return numDecisionVariables;
-    }
-
-    /**
-     * Get the original number of decision variables.
-     * @return original number of decision variables
-     * @see #getNumDecisionVariables()
-     */
-    protected final int getOriginalNumDecisionVariables() {
-        return f.getCoefficients().getDimension();
-    }
-
-    /**
-     * Get the number of slack variables.
-     * @return number of slack variables
-     */
-    protected final int getNumSlackVariables() {
-        return numSlackVariables;
-    }
-
-    /**
-     * Get the number of artificial variables.
-     * @return number of artificial variables
-     */
-    protected final int getNumArtificialVariables() {
-        return numArtificialVariables;
-    }
-
-    /**
-     * Get the tableau data.
-     * @return tableau data
-     */
-    protected final double[][] getData() {
-        return tableau.getData();
-    }
-
-    @Override
-    public boolean equals(Object other) {
-
-      if (this == other) {
-        return true;
-      }
-
-      if (other instanceof SimplexTableau) {
-          SimplexTableau rhs = (SimplexTableau) other;
-          return (restrictToNonNegative  == rhs.restrictToNonNegative) &&
-                 (numDecisionVariables   == rhs.numDecisionVariables) &&
-                 (numSlackVariables      == rhs.numSlackVariables) &&
-                 (numArtificialVariables == rhs.numArtificialVariables) &&
-                 (epsilon                == rhs.epsilon) &&
-                 (maxUlps                == rhs.maxUlps) &&
-                 f.equals(rhs.f) &&
-                 constraints.equals(rhs.constraints) &&
-                 tableau.equals(rhs.tableau);
-      }
-      return false;
-    }
-
-    @Override
-    public int hashCode() {
-        return Boolean.valueOf(restrictToNonNegative).hashCode() ^
-               numDecisionVariables ^
-               numSlackVariables ^
-               numArtificialVariables ^
-               Double.valueOf(epsilon).hashCode() ^
-               maxUlps ^
-               f.hashCode() ^
-               constraints.hashCode() ^
-               tableau.hashCode();
-    }
-
-    /**
-     * Serialize the instance.
-     * @param oos stream where object should be written
-     * @throws IOException if object cannot be written to stream
-     */
-    private void writeObject(ObjectOutputStream oos)
-        throws IOException {
-        oos.defaultWriteObject();
-        MatrixUtils.serializeRealMatrix(tableau, oos);
-    }
-
-    /**
-     * Deserialize the instance.
-     * @param ois stream from which the object should be read
-     * @throws ClassNotFoundException if a class in the stream cannot be found
-     * @throws IOException if object cannot be read from the stream
-     */
-    private void readObject(ObjectInputStream ois)
-      throws ClassNotFoundException, IOException {
-        ois.defaultReadObject();
-        MatrixUtils.deserializeRealMatrix(this, "tableau", ois);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/UnboundedSolutionException.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/UnboundedSolutionException.java b/src/main/java/org/apache/commons/math4/optimization/linear/UnboundedSolutionException.java
deleted file mode 100644
index 1332440..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/UnboundedSolutionException.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.linear;
-
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-
-/**
- * This class represents exceptions thrown by optimizers when a solution escapes to infinity.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class UnboundedSolutionException extends MathIllegalStateException {
-
-    /** Serializable version identifier. */
-    private static final long serialVersionUID = 940539497277290619L;
-
-    /**
-     * Simple constructor using a default message.
-     */
-    public UnboundedSolutionException() {
-        super(LocalizedFormats.UNBOUNDED_SOLUTION);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/linear/package-info.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/linear/package-info.java b/src/main/java/org/apache/commons/math4/optimization/linear/package-info.java
deleted file mode 100644
index 3e7c424..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/linear/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *
- * This package provides optimization algorithms for linear constrained problems.
- *
- */
-package org.apache.commons.math4.optimization.linear;

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/package-info.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/package-info.java b/src/main/java/org/apache/commons/math4/optimization/package-info.java
deleted file mode 100644
index f92cb0f..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/package-info.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * <h2>All classes and sub-packages of this package are deprecated.</h2>
- * <h3>Please use their replacements, to be found under
- *  <ul>
- *   <li>{@link org.apache.commons.math4.optim}</li>
- *   <li>{@link org.apache.commons.math4.fitting}</li>
- *  </ul>
- * </h3>
- *
- * <p>
- * This package provides common interfaces for the optimization algorithms
- * provided in sub-packages. The main interfaces defines optimizers and convergence
- * checkers. The functions that are optimized by the algorithms provided by this
- * package and its sub-packages are a subset of the one defined in the <code>analysis</code>
- * package, namely the real and vector valued functions. These functions are called
- * objective function here. When the goal is to minimize, the functions are often called
- * cost function, this name is not used in this package.
- * </p>
- *
- * <p>
- * Optimizers are the algorithms that will either minimize or maximize, the objective function
- * by changing its input variables set until an optimal set is found. There are only four
- * interfaces defining the common behavior of optimizers, one for each supported type of objective
- * function:
- * <ul>
- *  <li>{@link org.apache.commons.math4.optimization.univariate.UnivariateOptimizer
- *      UnivariateOptimizer} for {@link org.apache.commons.math4.analysis.UnivariateFunction
- *      univariate real functions}</li>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateOptimizer
- *      MultivariateOptimizer} for {@link org.apache.commons.math4.analysis.MultivariateFunction
- *      multivariate real functions}</li>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateDifferentiableOptimizer
- *      MultivariateDifferentiableOptimizer} for {@link
- *      org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableFunction
- *      multivariate differentiable real functions}</li>
- *  <li>{@link org.apache.commons.math4.optimization.MultivariateDifferentiableVectorOptimizer
- *      MultivariateDifferentiableVectorOptimizer} for {@link
- *      org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction
- *      multivariate differentiable vectorial functions}</li>
- * </ul>
- * </p>
- *
- * <p>
- * Despite there are only four types of supported optimizers, it is possible to optimize a
- * transform a {@link org.apache.commons.math4.analysis.MultivariateVectorFunction
- * non-differentiable multivariate vectorial function} by converting it to a {@link
- * org.apache.commons.math4.analysis.MultivariateFunction non-differentiable multivariate
- * real function} thanks to the {@link
- * org.apache.commons.math4.optimization.LeastSquaresConverter LeastSquaresConverter} helper class.
- * The transformed function can be optimized using any implementation of the {@link
- * org.apache.commons.math4.optimization.MultivariateOptimizer MultivariateOptimizer} interface.
- * </p>
- *
- * <p>
- * For each of the four types of supported optimizers, there is a special implementation which
- * wraps a classical optimizer in order to add it a multi-start feature. This feature call the
- * underlying optimizer several times in sequence with different starting points and returns
- * the best optimum found or all optima if desired. This is a classical way to prevent being
- * trapped into a local extremum when looking for a global one.
- * </p>
- *
- */
-package org.apache.commons.math4.optimization;

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/univariate/BaseAbstractUnivariateOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/univariate/BaseAbstractUnivariateOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/univariate/BaseAbstractUnivariateOptimizer.java
deleted file mode 100644
index 6b6a9b1..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/univariate/BaseAbstractUnivariateOptimizer.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.univariate;
-
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.exception.MaxCountExceededException;
-import org.apache.commons.math4.exception.NullArgumentException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.util.Incrementor;
-
-/**
- * Provide a default implementation for several functions useful to generic
- * optimizers.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public abstract class BaseAbstractUnivariateOptimizer
-    implements UnivariateOptimizer {
-    /** Convergence checker. */
-    private final ConvergenceChecker<UnivariatePointValuePair> checker;
-    /** Evaluations counter. */
-    private final Incrementor evaluations = new Incrementor();
-    /** Optimization type */
-    private GoalType goal;
-    /** Lower end of search interval. */
-    private double searchMin;
-    /** Higher end of search interval. */
-    private double searchMax;
-    /** Initial guess . */
-    private double searchStart;
-    /** Function to optimize. */
-    private UnivariateFunction function;
-
-    /**
-     * @param checker Convergence checking procedure.
-     */
-    protected BaseAbstractUnivariateOptimizer(ConvergenceChecker<UnivariatePointValuePair> checker) {
-        this.checker = checker;
-    }
-
-    /** {@inheritDoc} */
-    public int getMaxEvaluations() {
-        return evaluations.getMaximalCount();
-    }
-
-    /** {@inheritDoc} */
-    public int getEvaluations() {
-        return evaluations.getCount();
-    }
-
-    /**
-     * @return the optimization type.
-     */
-    public GoalType getGoalType() {
-        return goal;
-    }
-    /**
-     * @return the lower end of the search interval.
-     */
-    public double getMin() {
-        return searchMin;
-    }
-    /**
-     * @return the higher end of the search interval.
-     */
-    public double getMax() {
-        return searchMax;
-    }
-    /**
-     * @return the initial guess.
-     */
-    public double getStartValue() {
-        return searchStart;
-    }
-
-    /**
-     * Compute the objective function value.
-     *
-     * @param point Point at which the objective function must be evaluated.
-     * @return the objective function value at specified point.
-     * @throws TooManyEvaluationsException if the maximal number of evaluations
-     * is exceeded.
-     */
-    protected double computeObjectiveValue(double point) {
-        try {
-            evaluations.incrementCount();
-        } catch (MaxCountExceededException e) {
-            throw new TooManyEvaluationsException(e.getMax());
-        }
-        return function.value(point);
-    }
-
-    /** {@inheritDoc} */
-    public UnivariatePointValuePair optimize(int maxEval, UnivariateFunction f,
-                                             GoalType goalType,
-                                             double min, double max,
-                                             double startValue) {
-        // Checks.
-        if (f == null) {
-            throw new NullArgumentException();
-        }
-        if (goalType == null) {
-            throw new NullArgumentException();
-        }
-
-        // Reset.
-        searchMin = min;
-        searchMax = max;
-        searchStart = startValue;
-        goal = goalType;
-        function = f;
-        evaluations.setMaximalCount(maxEval);
-        evaluations.resetCount();
-
-        // Perform computation.
-        return doOptimize();
-    }
-
-    /** {@inheritDoc} */
-    public UnivariatePointValuePair optimize(int maxEval,
-                                             UnivariateFunction f,
-                                             GoalType goalType,
-                                             double min, double max){
-        return optimize(maxEval, f, goalType, min, max, min + 0.5 * (max - min));
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    public ConvergenceChecker<UnivariatePointValuePair> getConvergenceChecker() {
-        return checker;
-    }
-
-    /**
-     * Method for implementing actual optimization algorithms in derived
-     * classes.
-     *
-     * @return the optimum and its corresponding function value.
-     * @throws TooManyEvaluationsException if the maximal number of evaluations
-     * is exceeded.
-     */
-    protected abstract UnivariatePointValuePair doOptimize();
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/univariate/BaseUnivariateOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/univariate/BaseUnivariateOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/univariate/BaseUnivariateOptimizer.java
deleted file mode 100644
index 67e16ca..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/univariate/BaseUnivariateOptimizer.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.univariate;
-
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.optimization.BaseOptimizer;
-import org.apache.commons.math4.optimization.GoalType;
-
-/**
- * This interface is mainly intended to enforce the internal coherence of
- * Commons-Math. Users of the API are advised to base their code on
- * the following interfaces:
- * <ul>
- *  <li>{@link org.apache.commons.math4.optimization.univariate.UnivariateOptimizer}</li>
- * </ul>
- *
- * @param <FUNC> Type of the objective function to be optimized.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public interface BaseUnivariateOptimizer<FUNC extends UnivariateFunction>
-    extends BaseOptimizer<UnivariatePointValuePair> {
-    /**
-     * Find an optimum in the given interval.
-     *
-     * An optimizer may require that the interval brackets a single optimum.
-     *
-     * @param f Function to optimize.
-     * @param goalType Type of optimization goal: either
-     * {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}.
-     * @param min Lower bound for the interval.
-     * @param max Upper bound for the interval.
-     * @param maxEval Maximum number of function evaluations.
-     * @return a (point, value) pair where the function is optimum.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximum evaluation count is exceeded.
-     * @throws org.apache.commons.math4.exception.ConvergenceException
-     * if the optimizer detects a convergence problem.
-     * @throws IllegalArgumentException if {@code min > max} or the endpoints
-     * do not satisfy the requirements specified by the optimizer.
-     */
-    UnivariatePointValuePair optimize(int maxEval, FUNC f, GoalType goalType,
-                                          double min, double max);
-
-    /**
-     * Find an optimum in the given interval, start at startValue.
-     * An optimizer may require that the interval brackets a single optimum.
-     *
-     * @param f Function to optimize.
-     * @param goalType Type of optimization goal: either
-     * {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}.
-     * @param min Lower bound for the interval.
-     * @param max Upper bound for the interval.
-     * @param startValue Start value to use.
-     * @param maxEval Maximum number of function evaluations.
-     * @return a (point, value) pair where the function is optimum.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the maximum evaluation count is exceeded.
-     * @throws org.apache.commons.math4.exception.ConvergenceException if the
-     * optimizer detects a convergence problem.
-     * @throws IllegalArgumentException if {@code min > max} or the endpoints
-     * do not satisfy the requirements specified by the optimizer.
-     * @throws org.apache.commons.math4.exception.NullArgumentException if any
-     * argument is {@code null}.
-     */
-    UnivariatePointValuePair optimize(int maxEval, FUNC f, GoalType goalType,
-                                          double min, double max,
-                                          double startValue);
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/univariate/BracketFinder.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/univariate/BracketFinder.java b/src/main/java/org/apache/commons/math4/optimization/univariate/BracketFinder.java
deleted file mode 100644
index 2727a2f..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/univariate/BracketFinder.java
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.univariate;
-
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.exception.MaxCountExceededException;
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.exception.TooManyEvaluationsException;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.Incrementor;
-
-/**
- * Provide an interval that brackets a local optimum of a function.
- * This code is based on a Python implementation (from <em>SciPy</em>,
- * module {@code optimize.py} v0.5).
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.2
- */
-@Deprecated
-public class BracketFinder {
-    /** Tolerance to avoid division by zero. */
-    private static final double EPS_MIN = 1e-21;
-    /**
-     * Golden section.
-     */
-    private static final double GOLD = 1.618034;
-    /**
-     * Factor for expanding the interval.
-     */
-    private final double growLimit;
-    /**
-     * Counter for function evaluations.
-     */
-    private final Incrementor evaluations = new Incrementor();
-    /**
-     * Lower bound of the bracket.
-     */
-    private double lo;
-    /**
-     * Higher bound of the bracket.
-     */
-    private double hi;
-    /**
-     * Point inside the bracket.
-     */
-    private double mid;
-    /**
-     * Function value at {@link #lo}.
-     */
-    private double fLo;
-    /**
-     * Function value at {@link #hi}.
-     */
-    private double fHi;
-    /**
-     * Function value at {@link #mid}.
-     */
-    private double fMid;
-
-    /**
-     * Constructor with default values {@code 100, 50} (see the
-     * {@link #BracketFinder(double,int) other constructor}).
-     */
-    public BracketFinder() {
-        this(100, 50);
-    }
-
-    /**
-     * Create a bracketing interval finder.
-     *
-     * @param growLimit Expanding factor.
-     * @param maxEvaluations Maximum number of evaluations allowed for finding
-     * a bracketing interval.
-     */
-    public BracketFinder(double growLimit,
-                         int maxEvaluations) {
-        if (growLimit <= 0) {
-            throw new NotStrictlyPositiveException(growLimit);
-        }
-        if (maxEvaluations <= 0) {
-            throw new NotStrictlyPositiveException(maxEvaluations);
-        }
-
-        this.growLimit = growLimit;
-        evaluations.setMaximalCount(maxEvaluations);
-    }
-
-    /**
-     * Search new points that bracket a local optimum of the function.
-     *
-     * @param func Function whose optimum should be bracketed.
-     * @param goal {@link GoalType Goal type}.
-     * @param xA Initial point.
-     * @param xB Initial point.
-     * @throws TooManyEvaluationsException if the maximum number of evaluations
-     * is exceeded.
-     */
-    public void search(UnivariateFunction func, GoalType goal, double xA, double xB) {
-        evaluations.resetCount();
-        final boolean isMinim = goal == GoalType.MINIMIZE;
-
-        double fA = eval(func, xA);
-        double fB = eval(func, xB);
-        if (isMinim ?
-            fA < fB :
-            fA > fB) {
-
-            double tmp = xA;
-            xA = xB;
-            xB = tmp;
-
-            tmp = fA;
-            fA = fB;
-            fB = tmp;
-        }
-
-        double xC = xB + GOLD * (xB - xA);
-        double fC = eval(func, xC);
-
-        while (isMinim ? fC < fB : fC > fB) {
-            double tmp1 = (xB - xA) * (fB - fC);
-            double tmp2 = (xB - xC) * (fB - fA);
-
-            double val = tmp2 - tmp1;
-            double denom = FastMath.abs(val) < EPS_MIN ? 2 * EPS_MIN : 2 * val;
-
-            double w = xB - ((xB - xC) * tmp2 - (xB - xA) * tmp1) / denom;
-            double wLim = xB + growLimit * (xC - xB);
-
-            double fW;
-            if ((w - xC) * (xB - w) > 0) {
-                fW = eval(func, w);
-                if (isMinim ?
-                    fW < fC :
-                    fW > fC) {
-                    xA = xB;
-                    xB = w;
-                    fA = fB;
-                    fB = fW;
-                    break;
-                } else if (isMinim ?
-                           fW > fB :
-                           fW < fB) {
-                    xC = w;
-                    fC = fW;
-                    break;
-                }
-                w = xC + GOLD * (xC - xB);
-                fW = eval(func, w);
-            } else if ((w - wLim) * (wLim - xC) >= 0) {
-                w = wLim;
-                fW = eval(func, w);
-            } else if ((w - wLim) * (xC - w) > 0) {
-                fW = eval(func, w);
-                if (isMinim ?
-                    fW < fC :
-                    fW > fC) {
-                    xB = xC;
-                    xC = w;
-                    w = xC + GOLD * (xC - xB);
-                    fB = fC;
-                    fC =fW;
-                    fW = eval(func, w);
-                }
-            } else {
-                w = xC + GOLD * (xC - xB);
-                fW = eval(func, w);
-            }
-
-            xA = xB;
-            fA = fB;
-            xB = xC;
-            fB = fC;
-            xC = w;
-            fC = fW;
-        }
-
-        lo = xA;
-        fLo = fA;
-        mid = xB;
-        fMid = fB;
-        hi = xC;
-        fHi = fC;
-
-        if (lo > hi) {
-            double tmp = lo;
-            lo = hi;
-            hi = tmp;
-
-            tmp = fLo;
-            fLo = fHi;
-            fHi = tmp;
-        }
-    }
-
-    /**
-     * @return the number of evalutations.
-     */
-    public int getMaxEvaluations() {
-        return evaluations.getMaximalCount();
-    }
-
-    /**
-     * @return the number of evalutations.
-     */
-    public int getEvaluations() {
-        return evaluations.getCount();
-    }
-
-    /**
-     * @return the lower bound of the bracket.
-     * @see #getFLo()
-     */
-    public double getLo() {
-        return lo;
-    }
-
-    /**
-     * Get function value at {@link #getLo()}.
-     * @return function value at {@link #getLo()}
-     */
-    public double getFLo() {
-        return fLo;
-    }
-
-    /**
-     * @return the higher bound of the bracket.
-     * @see #getFHi()
-     */
-    public double getHi() {
-        return hi;
-    }
-
-    /**
-     * Get function value at {@link #getHi()}.
-     * @return function value at {@link #getHi()}
-     */
-    public double getFHi() {
-        return fHi;
-    }
-
-    /**
-     * @return a point in the middle of the bracket.
-     * @see #getFMid()
-     */
-    public double getMid() {
-        return mid;
-    }
-
-    /**
-     * Get function value at {@link #getMid()}.
-     * @return function value at {@link #getMid()}
-     */
-    public double getFMid() {
-        return fMid;
-    }
-
-    /**
-     * @param f Function.
-     * @param x Argument.
-     * @return {@code f(x)}
-     * @throws TooManyEvaluationsException if the maximal number of evaluations is
-     * exceeded.
-     */
-    private double eval(UnivariateFunction f, double x) {
-        try {
-            evaluations.incrementCount();
-        } catch (MaxCountExceededException e) {
-            throw new TooManyEvaluationsException(e.getMax());
-        }
-        return f.value(x);
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/univariate/BrentOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/univariate/BrentOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/univariate/BrentOptimizer.java
deleted file mode 100644
index a7d39df..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/univariate/BrentOptimizer.java
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math4.optimization.univariate;
-
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.Precision;
-
-/**
- * For a function defined on some interval {@code (lo, hi)}, this class
- * finds an approximation {@code x} to the point at which the function
- * attains its minimum.
- * It implements Richard Brent's algorithm (from his book "Algorithms for
- * Minimization without Derivatives", p. 79) for finding minima of real
- * univariate functions.
- * <br/>
- * This code is an adaptation, partly based on the Python code from SciPy
- * (module "optimize.py" v0.5); the original algorithm is also modified
- * <ul>
- *  <li>to use an initial guess provided by the user,</li>
- *  <li>to ensure that the best point encountered is the one returned.</li>
- * </ul>
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class BrentOptimizer extends BaseAbstractUnivariateOptimizer {
-    /**
-     * Golden section.
-     */
-    private static final double GOLDEN_SECTION = 0.5 * (3 - FastMath.sqrt(5));
-    /**
-     * Minimum relative tolerance.
-     */
-    private static final double MIN_RELATIVE_TOLERANCE = 2 * FastMath.ulp(1d);
-    /**
-     * Relative threshold.
-     */
-    private final double relativeThreshold;
-    /**
-     * Absolute threshold.
-     */
-    private final double absoluteThreshold;
-
-    /**
-     * The arguments are used implement the original stopping criterion
-     * of Brent's algorithm.
-     * {@code abs} and {@code rel} define a tolerance
-     * {@code tol = rel |x| + abs}. {@code rel} should be no smaller than
-     * <em>2 macheps</em> and preferably not much less than <em>sqrt(macheps)</em>,
-     * where <em>macheps</em> is the relative machine precision. {@code abs} must
-     * be positive.
-     *
-     * @param rel Relative threshold.
-     * @param abs Absolute threshold.
-     * @param checker Additional, user-defined, convergence checking
-     * procedure.
-     * @throws NotStrictlyPositiveException if {@code abs <= 0}.
-     * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
-     */
-    public BrentOptimizer(double rel,
-                          double abs,
-                          ConvergenceChecker<UnivariatePointValuePair> checker) {
-        super(checker);
-
-        if (rel < MIN_RELATIVE_TOLERANCE) {
-            throw new NumberIsTooSmallException(rel, MIN_RELATIVE_TOLERANCE, true);
-        }
-        if (abs <= 0) {
-            throw new NotStrictlyPositiveException(abs);
-        }
-
-        relativeThreshold = rel;
-        absoluteThreshold = abs;
-    }
-
-    /**
-     * The arguments are used for implementing the original stopping criterion
-     * of Brent's algorithm.
-     * {@code abs} and {@code rel} define a tolerance
-     * {@code tol = rel |x| + abs}. {@code rel} should be no smaller than
-     * <em>2 macheps</em> and preferably not much less than <em>sqrt(macheps)</em>,
-     * where <em>macheps</em> is the relative machine precision. {@code abs} must
-     * be positive.
-     *
-     * @param rel Relative threshold.
-     * @param abs Absolute threshold.
-     * @throws NotStrictlyPositiveException if {@code abs <= 0}.
-     * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
-     */
-    public BrentOptimizer(double rel,
-                          double abs) {
-        this(rel, abs, null);
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected UnivariatePointValuePair doOptimize() {
-        final boolean isMinim = getGoalType() == GoalType.MINIMIZE;
-        final double lo = getMin();
-        final double mid = getStartValue();
-        final double hi = getMax();
-
-        // Optional additional convergence criteria.
-        final ConvergenceChecker<UnivariatePointValuePair> checker
-            = getConvergenceChecker();
-
-        double a;
-        double b;
-        if (lo < hi) {
-            a = lo;
-            b = hi;
-        } else {
-            a = hi;
-            b = lo;
-        }
-
-        double x = mid;
-        double v = x;
-        double w = x;
-        double d = 0;
-        double e = 0;
-        double fx = computeObjectiveValue(x);
-        if (!isMinim) {
-            fx = -fx;
-        }
-        double fv = fx;
-        double fw = fx;
-
-        UnivariatePointValuePair previous = null;
-        UnivariatePointValuePair current
-            = new UnivariatePointValuePair(x, isMinim ? fx : -fx);
-        // Best point encountered so far (which is the initial guess).
-        UnivariatePointValuePair best = current;
-
-        int iter = 0;
-        while (true) {
-            final double m = 0.5 * (a + b);
-            final double tol1 = relativeThreshold * FastMath.abs(x) + absoluteThreshold;
-            final double tol2 = 2 * tol1;
-
-            // Default stopping criterion.
-            final boolean stop = FastMath.abs(x - m) <= tol2 - 0.5 * (b - a);
-            if (!stop) {
-                double p = 0;
-                double q = 0;
-                double r = 0;
-                double u = 0;
-
-                if (FastMath.abs(e) > tol1) { // Fit parabola.
-                    r = (x - w) * (fx - fv);
-                    q = (x - v) * (fx - fw);
-                    p = (x - v) * q - (x - w) * r;
-                    q = 2 * (q - r);
-
-                    if (q > 0) {
-                        p = -p;
-                    } else {
-                        q = -q;
-                    }
-
-                    r = e;
-                    e = d;
-
-                    if (p > q * (a - x) &&
-                        p < q * (b - x) &&
-                        FastMath.abs(p) < FastMath.abs(0.5 * q * r)) {
-                        // Parabolic interpolation step.
-                        d = p / q;
-                        u = x + d;
-
-                        // f must not be evaluated too close to a or b.
-                        if (u - a < tol2 || b - u < tol2) {
-                            if (x <= m) {
-                                d = tol1;
-                            } else {
-                                d = -tol1;
-                            }
-                        }
-                    } else {
-                        // Golden section step.
-                        if (x < m) {
-                            e = b - x;
-                        } else {
-                            e = a - x;
-                        }
-                        d = GOLDEN_SECTION * e;
-                    }
-                } else {
-                    // Golden section step.
-                    if (x < m) {
-                        e = b - x;
-                    } else {
-                        e = a - x;
-                    }
-                    d = GOLDEN_SECTION * e;
-                }
-
-                // Update by at least "tol1".
-                if (FastMath.abs(d) < tol1) {
-                    if (d >= 0) {
-                        u = x + tol1;
-                    } else {
-                        u = x - tol1;
-                    }
-                } else {
-                    u = x + d;
-                }
-
-                double fu = computeObjectiveValue(u);
-                if (!isMinim) {
-                    fu = -fu;
-                }
-
-                // User-defined convergence checker.
-                previous = current;
-                current = new UnivariatePointValuePair(u, isMinim ? fu : -fu);
-                best = best(best,
-                            best(previous,
-                                 current,
-                                 isMinim),
-                            isMinim);
-
-                if (checker != null && checker.converged(iter, previous, current)) {
-                    return best;
-                }
-
-                // Update a, b, v, w and x.
-                if (fu <= fx) {
-                    if (u < x) {
-                        b = x;
-                    } else {
-                        a = x;
-                    }
-                    v = w;
-                    fv = fw;
-                    w = x;
-                    fw = fx;
-                    x = u;
-                    fx = fu;
-                } else {
-                    if (u < x) {
-                        a = u;
-                    } else {
-                        b = u;
-                    }
-                    if (fu <= fw ||
-                        Precision.equals(w, x)) {
-                        v = w;
-                        fv = fw;
-                        w = u;
-                        fw = fu;
-                    } else if (fu <= fv ||
-                               Precision.equals(v, x) ||
-                               Precision.equals(v, w)) {
-                        v = u;
-                        fv = fu;
-                    }
-                }
-            } else { // Default termination (Brent's criterion).
-                return best(best,
-                            best(previous,
-                                 current,
-                                 isMinim),
-                            isMinim);
-            }
-            ++iter;
-        }
-    }
-
-    /**
-     * Selects the best of two points.
-     *
-     * @param a Point and value.
-     * @param b Point and value.
-     * @param isMinim {@code true} if the selected point must be the one with
-     * the lowest value.
-     * @return the best point, or {@code null} if {@code a} and {@code b} are
-     * both {@code null}. When {@code a} and {@code b} have the same function
-     * value, {@code a} is returned.
-     */
-    private UnivariatePointValuePair best(UnivariatePointValuePair a,
-                                          UnivariatePointValuePair b,
-                                          boolean isMinim) {
-        if (a == null) {
-            return b;
-        }
-        if (b == null) {
-            return a;
-        }
-
-        if (isMinim) {
-            return a.getValue() <= b.getValue() ? a : b;
-        } else {
-            return a.getValue() >= b.getValue() ? a : b;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/univariate/SimpleUnivariateValueChecker.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/univariate/SimpleUnivariateValueChecker.java b/src/main/java/org/apache/commons/math4/optimization/univariate/SimpleUnivariateValueChecker.java
deleted file mode 100644
index 29928e1..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/univariate/SimpleUnivariateValueChecker.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.univariate;
-
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.optimization.AbstractConvergenceChecker;
-import org.apache.commons.math4.util.FastMath;
-
-/**
- * Simple implementation of the
- * {@link org.apache.commons.math4.optimization.ConvergenceChecker} interface
- * that uses only objective function values.
- *
- * Convergence is considered to have been reached if either the relative
- * difference between the objective function values is smaller than a
- * threshold or if either the absolute difference between the objective
- * function values is smaller than another threshold.
- * <br/>
- * The {@link #converged(int,UnivariatePointValuePair,UnivariatePointValuePair)
- * converged} method will also return {@code true} if the number of iterations
- * has been set (see {@link #SimpleUnivariateValueChecker(double,double,int)
- * this constructor}).
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.1
- */
-@Deprecated
-public class SimpleUnivariateValueChecker
-    extends AbstractConvergenceChecker<UnivariatePointValuePair> {
-    /**
-     * If {@link #maxIterationCount} is set to this value, the number of
-     * iterations will never cause
-     * {@link #converged(int,UnivariatePointValuePair,UnivariatePointValuePair)}
-     * to return {@code true}.
-     */
-    private static final int ITERATION_CHECK_DISABLED = -1;
-    /**
-     * Number of iterations after which the
-     * {@link #converged(int,UnivariatePointValuePair,UnivariatePointValuePair)}
-     * method will return true (unless the check is disabled).
-     */
-    private final int maxIterationCount;
-
-    /**
-     * Build an instance with default thresholds.
-     * @deprecated See {@link AbstractConvergenceChecker#AbstractConvergenceChecker()}
-     */
-    @Deprecated
-    public SimpleUnivariateValueChecker() {
-        maxIterationCount = ITERATION_CHECK_DISABLED;
-    }
-
-    /** Build an instance with specified thresholds.
-     *
-     * In order to perform only relative checks, the absolute tolerance
-     * must be set to a negative value. In order to perform only absolute
-     * checks, the relative tolerance must be set to a negative value.
-     *
-     * @param relativeThreshold relative tolerance threshold
-     * @param absoluteThreshold absolute tolerance threshold
-     */
-    public SimpleUnivariateValueChecker(final double relativeThreshold,
-                                        final double absoluteThreshold) {
-        super(relativeThreshold, absoluteThreshold);
-        maxIterationCount = ITERATION_CHECK_DISABLED;
-    }
-
-    /**
-     * Builds an instance with specified thresholds.
-     *
-     * In order to perform only relative checks, the absolute tolerance
-     * must be set to a negative value. In order to perform only absolute
-     * checks, the relative tolerance must be set to a negative value.
-     *
-     * @param relativeThreshold relative tolerance threshold
-     * @param absoluteThreshold absolute tolerance threshold
-     * @param maxIter Maximum iteration count.
-     * @throws NotStrictlyPositiveException if {@code maxIter <= 0}.
-     *
-     * @since 3.1
-     */
-    public SimpleUnivariateValueChecker(final double relativeThreshold,
-                                        final double absoluteThreshold,
-                                        final int maxIter) {
-        super(relativeThreshold, absoluteThreshold);
-
-        if (maxIter <= 0) {
-            throw new NotStrictlyPositiveException(maxIter);
-        }
-        maxIterationCount = maxIter;
-    }
-
-    /**
-     * Check if the optimization algorithm has converged considering the
-     * last two points.
-     * This method may be called several time from the same algorithm
-     * iteration with different points. This can be detected by checking the
-     * iteration number at each call if needed. Each time this method is
-     * called, the previous and current point correspond to points with the
-     * same role at each iteration, so they can be compared. As an example,
-     * simplex-based algorithms call this method for all points of the simplex,
-     * not only for the best or worst ones.
-     *
-     * @param iteration Index of current iteration
-     * @param previous Best point in the previous iteration.
-     * @param current Best point in the current iteration.
-     * @return {@code true} if the algorithm has converged.
-     */
-    @Override
-    public boolean converged(final int iteration,
-                             final UnivariatePointValuePair previous,
-                             final UnivariatePointValuePair current) {
-        if (maxIterationCount != ITERATION_CHECK_DISABLED && iteration >= maxIterationCount) {
-            return true;
-        }
-
-        final double p = previous.getValue();
-        final double c = current.getValue();
-        final double difference = FastMath.abs(p - c);
-        final double size = FastMath.max(FastMath.abs(p), FastMath.abs(c));
-        return difference <= size * getRelativeThreshold() ||
-            difference <= getAbsoluteThreshold();
-    }
-}


[12/18] [math] Remove deprecated optimization package.

Posted by tn...@apache.org.
http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/NelderMeadSimplex.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/NelderMeadSimplex.java b/src/main/java/org/apache/commons/math4/optimization/direct/NelderMeadSimplex.java
deleted file mode 100644
index f193ccf..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/NelderMeadSimplex.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import java.util.Comparator;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.optimization.PointValuePair;
-
-/**
- * This class implements the Nelder-Mead simplex algorithm.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@Deprecated
-public class NelderMeadSimplex extends AbstractSimplex {
-    /** Default value for {@link #rho}: {@value}. */
-    private static final double DEFAULT_RHO = 1;
-    /** Default value for {@link #khi}: {@value}. */
-    private static final double DEFAULT_KHI = 2;
-    /** Default value for {@link #gamma}: {@value}. */
-    private static final double DEFAULT_GAMMA = 0.5;
-    /** Default value for {@link #sigma}: {@value}. */
-    private static final double DEFAULT_SIGMA = 0.5;
-    /** Reflection coefficient. */
-    private final double rho;
-    /** Expansion coefficient. */
-    private final double khi;
-    /** Contraction coefficient. */
-    private final double gamma;
-    /** Shrinkage coefficient. */
-    private final double sigma;
-
-    /**
-     * Build a Nelder-Mead simplex with default coefficients.
-     * The default coefficients are 1.0 for rho, 2.0 for khi and 0.5
-     * for both gamma and sigma.
-     *
-     * @param n Dimension of the simplex.
-     */
-    public NelderMeadSimplex(final int n) {
-        this(n, 1d);
-    }
-
-    /**
-     * Build a Nelder-Mead simplex with default coefficients.
-     * The default coefficients are 1.0 for rho, 2.0 for khi and 0.5
-     * for both gamma and sigma.
-     *
-     * @param n Dimension of the simplex.
-     * @param sideLength Length of the sides of the default (hypercube)
-     * simplex. See {@link AbstractSimplex#AbstractSimplex(int,double)}.
-     */
-    public NelderMeadSimplex(final int n, double sideLength) {
-        this(n, sideLength,
-             DEFAULT_RHO, DEFAULT_KHI, DEFAULT_GAMMA, DEFAULT_SIGMA);
-    }
-
-    /**
-     * Build a Nelder-Mead simplex with specified coefficients.
-     *
-     * @param n Dimension of the simplex. See
-     * {@link AbstractSimplex#AbstractSimplex(int,double)}.
-     * @param sideLength Length of the sides of the default (hypercube)
-     * simplex. See {@link AbstractSimplex#AbstractSimplex(int,double)}.
-     * @param rho Reflection coefficient.
-     * @param khi Expansion coefficient.
-     * @param gamma Contraction coefficient.
-     * @param sigma Shrinkage coefficient.
-     */
-    public NelderMeadSimplex(final int n, double sideLength,
-                             final double rho, final double khi,
-                             final double gamma, final double sigma) {
-        super(n, sideLength);
-
-        this.rho = rho;
-        this.khi = khi;
-        this.gamma = gamma;
-        this.sigma = sigma;
-    }
-
-    /**
-     * Build a Nelder-Mead simplex with specified coefficients.
-     *
-     * @param n Dimension of the simplex. See
-     * {@link AbstractSimplex#AbstractSimplex(int)}.
-     * @param rho Reflection coefficient.
-     * @param khi Expansion coefficient.
-     * @param gamma Contraction coefficient.
-     * @param sigma Shrinkage coefficient.
-     */
-    public NelderMeadSimplex(final int n,
-                             final double rho, final double khi,
-                             final double gamma, final double sigma) {
-        this(n, 1d, rho, khi, gamma, sigma);
-    }
-
-    /**
-     * Build a Nelder-Mead simplex with default coefficients.
-     * The default coefficients are 1.0 for rho, 2.0 for khi and 0.5
-     * for both gamma and sigma.
-     *
-     * @param steps Steps along the canonical axes representing box edges.
-     * They may be negative but not zero. See
-     */
-    public NelderMeadSimplex(final double[] steps) {
-        this(steps, DEFAULT_RHO, DEFAULT_KHI, DEFAULT_GAMMA, DEFAULT_SIGMA);
-    }
-
-    /**
-     * Build a Nelder-Mead simplex with specified coefficients.
-     *
-     * @param steps Steps along the canonical axes representing box edges.
-     * They may be negative but not zero. See
-     * {@link AbstractSimplex#AbstractSimplex(double[])}.
-     * @param rho Reflection coefficient.
-     * @param khi Expansion coefficient.
-     * @param gamma Contraction coefficient.
-     * @param sigma Shrinkage coefficient.
-     * @throws IllegalArgumentException if one of the steps is zero.
-     */
-    public NelderMeadSimplex(final double[] steps,
-                             final double rho, final double khi,
-                             final double gamma, final double sigma) {
-        super(steps);
-
-        this.rho = rho;
-        this.khi = khi;
-        this.gamma = gamma;
-        this.sigma = sigma;
-    }
-
-    /**
-     * Build a Nelder-Mead simplex with default coefficients.
-     * The default coefficients are 1.0 for rho, 2.0 for khi and 0.5
-     * for both gamma and sigma.
-     *
-     * @param referenceSimplex Reference simplex. See
-     * {@link AbstractSimplex#AbstractSimplex(double[][])}.
-     */
-    public NelderMeadSimplex(final double[][] referenceSimplex) {
-        this(referenceSimplex, DEFAULT_RHO, DEFAULT_KHI, DEFAULT_GAMMA, DEFAULT_SIGMA);
-    }
-
-    /**
-     * Build a Nelder-Mead simplex with specified coefficients.
-     *
-     * @param referenceSimplex Reference simplex. See
-     * {@link AbstractSimplex#AbstractSimplex(double[][])}.
-     * @param rho Reflection coefficient.
-     * @param khi Expansion coefficient.
-     * @param gamma Contraction coefficient.
-     * @param sigma Shrinkage coefficient.
-     * @throws org.apache.commons.math4.exception.NotStrictlyPositiveException
-     * if the reference simplex does not contain at least one point.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if there is a dimension mismatch in the reference simplex.
-     */
-    public NelderMeadSimplex(final double[][] referenceSimplex,
-                             final double rho, final double khi,
-                             final double gamma, final double sigma) {
-        super(referenceSimplex);
-
-        this.rho = rho;
-        this.khi = khi;
-        this.gamma = gamma;
-        this.sigma = sigma;
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    public void iterate(final MultivariateFunction evaluationFunction,
-                        final Comparator<PointValuePair> comparator) {
-        // The simplex has n + 1 points if dimension is n.
-        final int n = getDimension();
-
-        // Interesting values.
-        final PointValuePair best = getPoint(0);
-        final PointValuePair secondBest = getPoint(n - 1);
-        final PointValuePair worst = getPoint(n);
-        final double[] xWorst = worst.getPointRef();
-
-        // Compute the centroid of the best vertices (dismissing the worst
-        // point at index n).
-        final double[] centroid = new double[n];
-        for (int i = 0; i < n; i++) {
-            final double[] x = getPoint(i).getPointRef();
-            for (int j = 0; j < n; j++) {
-                centroid[j] += x[j];
-            }
-        }
-        final double scaling = 1.0 / n;
-        for (int j = 0; j < n; j++) {
-            centroid[j] *= scaling;
-        }
-
-        // compute the reflection point
-        final double[] xR = new double[n];
-        for (int j = 0; j < n; j++) {
-            xR[j] = centroid[j] + rho * (centroid[j] - xWorst[j]);
-        }
-        final PointValuePair reflected
-            = new PointValuePair(xR, evaluationFunction.value(xR), false);
-
-        if (comparator.compare(best, reflected) <= 0 &&
-            comparator.compare(reflected, secondBest) < 0) {
-            // Accept the reflected point.
-            replaceWorstPoint(reflected, comparator);
-        } else if (comparator.compare(reflected, best) < 0) {
-            // Compute the expansion point.
-            final double[] xE = new double[n];
-            for (int j = 0; j < n; j++) {
-                xE[j] = centroid[j] + khi * (xR[j] - centroid[j]);
-            }
-            final PointValuePair expanded
-                = new PointValuePair(xE, evaluationFunction.value(xE), false);
-
-            if (comparator.compare(expanded, reflected) < 0) {
-                // Accept the expansion point.
-                replaceWorstPoint(expanded, comparator);
-            } else {
-                // Accept the reflected point.
-                replaceWorstPoint(reflected, comparator);
-            }
-        } else {
-            if (comparator.compare(reflected, worst) < 0) {
-                // Perform an outside contraction.
-                final double[] xC = new double[n];
-                for (int j = 0; j < n; j++) {
-                    xC[j] = centroid[j] + gamma * (xR[j] - centroid[j]);
-                }
-                final PointValuePair outContracted
-                    = new PointValuePair(xC, evaluationFunction.value(xC), false);
-                if (comparator.compare(outContracted, reflected) <= 0) {
-                    // Accept the contraction point.
-                    replaceWorstPoint(outContracted, comparator);
-                    return;
-                }
-            } else {
-                // Perform an inside contraction.
-                final double[] xC = new double[n];
-                for (int j = 0; j < n; j++) {
-                    xC[j] = centroid[j] - gamma * (centroid[j] - xWorst[j]);
-                }
-                final PointValuePair inContracted
-                    = new PointValuePair(xC, evaluationFunction.value(xC), false);
-
-                if (comparator.compare(inContracted, worst) < 0) {
-                    // Accept the contraction point.
-                    replaceWorstPoint(inContracted, comparator);
-                    return;
-                }
-            }
-
-            // Perform a shrink.
-            final double[] xSmallest = getPoint(0).getPointRef();
-            for (int i = 1; i <= n; i++) {
-                final double[] x = getPoint(i).getPoint();
-                for (int j = 0; j < n; j++) {
-                    x[j] = xSmallest[j] + sigma * (x[j] - xSmallest[j]);
-                }
-                setPoint(i, new PointValuePair(x, Double.NaN, false));
-            }
-            evaluate(evaluationFunction, comparator);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/PowellOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/PowellOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/direct/PowellOptimizer.java
deleted file mode 100644
index a0a396e..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/PowellOptimizer.java
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.analysis.UnivariateFunction;
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.MultivariateOptimizer;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.univariate.BracketFinder;
-import org.apache.commons.math4.optimization.univariate.BrentOptimizer;
-import org.apache.commons.math4.optimization.univariate.SimpleUnivariateValueChecker;
-import org.apache.commons.math4.optimization.univariate.UnivariatePointValuePair;
-import org.apache.commons.math4.util.FastMath;
-import org.apache.commons.math4.util.MathArrays;
-
-/**
- * Powell algorithm.
- * This code is translated and adapted from the Python version of this
- * algorithm (as implemented in module {@code optimize.py} v0.5 of
- * <em>SciPy</em>).
- * <br/>
- * The default stopping criterion is based on the differences of the
- * function value between two successive iterations. It is however possible
- * to define a custom convergence checker that might terminate the algorithm
- * earlier.
- * <br/>
- * The internal line search optimizer is a {@link BrentOptimizer} with a
- * convergence checker set to {@link SimpleUnivariateValueChecker}.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.2
- */
-@Deprecated
-public class PowellOptimizer
-    extends BaseAbstractMultivariateOptimizer<MultivariateFunction>
-    implements MultivariateOptimizer {
-    /**
-     * Minimum relative tolerance.
-     */
-    private static final double MIN_RELATIVE_TOLERANCE = 2 * FastMath.ulp(1d);
-    /**
-     * Relative threshold.
-     */
-    private final double relativeThreshold;
-    /**
-     * Absolute threshold.
-     */
-    private final double absoluteThreshold;
-    /**
-     * Line search.
-     */
-    private final LineSearch line;
-
-    /**
-     * This constructor allows to specify a user-defined convergence checker,
-     * in addition to the parameters that control the default convergence
-     * checking procedure.
-     * <br/>
-     * The internal line search tolerances are set to the square-root of their
-     * corresponding value in the multivariate optimizer.
-     *
-     * @param rel Relative threshold.
-     * @param abs Absolute threshold.
-     * @param checker Convergence checker.
-     * @throws NotStrictlyPositiveException if {@code abs <= 0}.
-     * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
-     */
-    public PowellOptimizer(double rel,
-                           double abs,
-                           ConvergenceChecker<PointValuePair> checker) {
-        this(rel, abs, FastMath.sqrt(rel), FastMath.sqrt(abs), checker);
-    }
-
-    /**
-     * This constructor allows to specify a user-defined convergence checker,
-     * in addition to the parameters that control the default convergence
-     * checking procedure and the line search tolerances.
-     *
-     * @param rel Relative threshold for this optimizer.
-     * @param abs Absolute threshold for this optimizer.
-     * @param lineRel Relative threshold for the internal line search optimizer.
-     * @param lineAbs Absolute threshold for the internal line search optimizer.
-     * @param checker Convergence checker.
-     * @throws NotStrictlyPositiveException if {@code abs <= 0}.
-     * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
-     */
-    public PowellOptimizer(double rel,
-                           double abs,
-                           double lineRel,
-                           double lineAbs,
-                           ConvergenceChecker<PointValuePair> checker) {
-        super(checker);
-
-        if (rel < MIN_RELATIVE_TOLERANCE) {
-            throw new NumberIsTooSmallException(rel, MIN_RELATIVE_TOLERANCE, true);
-        }
-        if (abs <= 0) {
-            throw new NotStrictlyPositiveException(abs);
-        }
-        relativeThreshold = rel;
-        absoluteThreshold = abs;
-
-        // Create the line search optimizer.
-        line = new LineSearch(lineRel,
-                              lineAbs);
-    }
-
-    /**
-     * The parameters control the default convergence checking procedure.
-     * <br/>
-     * The internal line search tolerances are set to the square-root of their
-     * corresponding value in the multivariate optimizer.
-     *
-     * @param rel Relative threshold.
-     * @param abs Absolute threshold.
-     * @throws NotStrictlyPositiveException if {@code abs <= 0}.
-     * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
-     */
-    public PowellOptimizer(double rel,
-                           double abs) {
-        this(rel, abs, null);
-    }
-
-    /**
-     * Builds an instance with the default convergence checking procedure.
-     *
-     * @param rel Relative threshold.
-     * @param abs Absolute threshold.
-     * @param lineRel Relative threshold for the internal line search optimizer.
-     * @param lineAbs Absolute threshold for the internal line search optimizer.
-     * @throws NotStrictlyPositiveException if {@code abs <= 0}.
-     * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
-     * @since 3.1
-     */
-    public PowellOptimizer(double rel,
-                           double abs,
-                           double lineRel,
-                           double lineAbs) {
-        this(rel, abs, lineRel, lineAbs, null);
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected PointValuePair doOptimize() {
-        final GoalType goal = getGoalType();
-        final double[] guess = getStartPoint();
-        final int n = guess.length;
-
-        final double[][] direc = new double[n][n];
-        for (int i = 0; i < n; i++) {
-            direc[i][i] = 1;
-        }
-
-        final ConvergenceChecker<PointValuePair> checker
-            = getConvergenceChecker();
-
-        double[] x = guess;
-        double fVal = computeObjectiveValue(x);
-        double[] x1 = x.clone();
-        int iter = 0;
-        while (true) {
-            ++iter;
-
-            double fX = fVal;
-            double fX2 = 0;
-            double delta = 0;
-            int bigInd = 0;
-            double alphaMin = 0;
-
-            for (int i = 0; i < n; i++) {
-                final double[] d = MathArrays.copyOf(direc[i]);
-
-                fX2 = fVal;
-
-                final UnivariatePointValuePair optimum = line.search(x, d);
-                fVal = optimum.getValue();
-                alphaMin = optimum.getPoint();
-                final double[][] result = newPointAndDirection(x, d, alphaMin);
-                x = result[0];
-
-                if ((fX2 - fVal) > delta) {
-                    delta = fX2 - fVal;
-                    bigInd = i;
-                }
-            }
-
-            // Default convergence check.
-            boolean stop = 2 * (fX - fVal) <=
-                (relativeThreshold * (FastMath.abs(fX) + FastMath.abs(fVal)) +
-                 absoluteThreshold);
-
-            final PointValuePair previous = new PointValuePair(x1, fX);
-            final PointValuePair current = new PointValuePair(x, fVal);
-            if (!stop && checker != null) {
-                stop = checker.converged(iter, previous, current);
-            }
-            if (stop) {
-                if (goal == GoalType.MINIMIZE) {
-                    return (fVal < fX) ? current : previous;
-                } else {
-                    return (fVal > fX) ? current : previous;
-                }
-            }
-
-            final double[] d = new double[n];
-            final double[] x2 = new double[n];
-            for (int i = 0; i < n; i++) {
-                d[i] = x[i] - x1[i];
-                x2[i] = 2 * x[i] - x1[i];
-            }
-
-            x1 = x.clone();
-            fX2 = computeObjectiveValue(x2);
-
-            if (fX > fX2) {
-                double t = 2 * (fX + fX2 - 2 * fVal);
-                double temp = fX - fVal - delta;
-                t *= temp * temp;
-                temp = fX - fX2;
-                t -= delta * temp * temp;
-
-                if (t < 0.0) {
-                    final UnivariatePointValuePair optimum = line.search(x, d);
-                    fVal = optimum.getValue();
-                    alphaMin = optimum.getPoint();
-                    final double[][] result = newPointAndDirection(x, d, alphaMin);
-                    x = result[0];
-
-                    final int lastInd = n - 1;
-                    direc[bigInd] = direc[lastInd];
-                    direc[lastInd] = result[1];
-                }
-            }
-        }
-    }
-
-    /**
-     * Compute a new point (in the original space) and a new direction
-     * vector, resulting from the line search.
-     *
-     * @param p Point used in the line search.
-     * @param d Direction used in the line search.
-     * @param optimum Optimum found by the line search.
-     * @return a 2-element array containing the new point (at index 0) and
-     * the new direction (at index 1).
-     */
-    private double[][] newPointAndDirection(double[] p,
-                                            double[] d,
-                                            double optimum) {
-        final int n = p.length;
-        final double[] nP = new double[n];
-        final double[] nD = new double[n];
-        for (int i = 0; i < n; i++) {
-            nD[i] = d[i] * optimum;
-            nP[i] = p[i] + nD[i];
-        }
-
-        final double[][] result = new double[2][];
-        result[0] = nP;
-        result[1] = nD;
-
-        return result;
-    }
-
-    /**
-     * Class for finding the minimum of the objective function along a given
-     * direction.
-     */
-    private class LineSearch extends BrentOptimizer {
-        /**
-         * Value that will pass the precondition check for {@link BrentOptimizer}
-         * but will not pass the convergence check, so that the custom checker
-         * will always decide when to stop the line search.
-         */
-        private static final double REL_TOL_UNUSED = 1e-15;
-        /**
-         * Value that will pass the precondition check for {@link BrentOptimizer}
-         * but will not pass the convergence check, so that the custom checker
-         * will always decide when to stop the line search.
-         */
-        private static final double ABS_TOL_UNUSED = Double.MIN_VALUE;
-        /**
-         * Automatic bracketing.
-         */
-        private final BracketFinder bracket = new BracketFinder();
-
-        /**
-         * The "BrentOptimizer" default stopping criterion uses the tolerances
-         * to check the domain (point) values, not the function values.
-         * We thus create a custom checker to use function values.
-         *
-         * @param rel Relative threshold.
-         * @param abs Absolute threshold.
-         */
-        LineSearch(double rel,
-                   double abs) {
-            super(REL_TOL_UNUSED,
-                  ABS_TOL_UNUSED,
-                  new SimpleUnivariateValueChecker(rel, abs));
-        }
-
-        /**
-         * Find the minimum of the function {@code f(p + alpha * d)}.
-         *
-         * @param p Starting point.
-         * @param d Search direction.
-         * @return the optimum.
-         * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-         * if the number of evaluations is exceeded.
-         */
-        public UnivariatePointValuePair search(final double[] p, final double[] d) {
-            final int n = p.length;
-            final UnivariateFunction f = new UnivariateFunction() {
-                    public double value(double alpha) {
-                        final double[] x = new double[n];
-                        for (int i = 0; i < n; i++) {
-                            x[i] = p[i] + alpha * d[i];
-                        }
-                        final double obj = PowellOptimizer.this.computeObjectiveValue(x);
-                        return obj;
-                    }
-                };
-
-            final GoalType goal = PowellOptimizer.this.getGoalType();
-            bracket.search(f, goal, 0, 1);
-            // Passing "MAX_VALUE" as a dummy value because it is the enclosing
-            // class that counts the number of evaluations (and will eventually
-            // generate the exception).
-            return optimize(Integer.MAX_VALUE, f, goal,
-                            bracket.getLo(), bracket.getHi(), bracket.getMid());
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/SimplexOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/SimplexOptimizer.java b/src/main/java/org/apache/commons/math4/optimization/direct/SimplexOptimizer.java
deleted file mode 100644
index 0adcdb3..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/SimplexOptimizer.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.direct;
-
-import java.util.Comparator;
-
-import org.apache.commons.math4.analysis.MultivariateFunction;
-import org.apache.commons.math4.exception.NullArgumentException;
-import org.apache.commons.math4.optimization.ConvergenceChecker;
-import org.apache.commons.math4.optimization.GoalType;
-import org.apache.commons.math4.optimization.MultivariateOptimizer;
-import org.apache.commons.math4.optimization.OptimizationData;
-import org.apache.commons.math4.optimization.PointValuePair;
-import org.apache.commons.math4.optimization.SimpleValueChecker;
-
-/**
- * This class implements simplex-based direct search optimization.
- *
- * <p>
- *  Direct search methods only use objective function values, they do
- *  not need derivatives and don't either try to compute approximation
- *  of the derivatives. According to a 1996 paper by Margaret H. Wright
- *  (<a href="http://cm.bell-labs.com/cm/cs/doc/96/4-02.ps.gz">Direct
- *  Search Methods: Once Scorned, Now Respectable</a>), they are used
- *  when either the computation of the derivative is impossible (noisy
- *  functions, unpredictable discontinuities) or difficult (complexity,
- *  computation cost). In the first cases, rather than an optimum, a
- *  <em>not too bad</em> point is desired. In the latter cases, an
- *  optimum is desired but cannot be reasonably found. In all cases
- *  direct search methods can be useful.
- * </p>
- * <p>
- *  Simplex-based direct search methods are based on comparison of
- *  the objective function values at the vertices of a simplex (which is a
- *  set of n+1 points in dimension n) that is updated by the algorithms
- *  steps.
- * <p>
- * <p>
- *  The {@link #setSimplex(AbstractSimplex) setSimplex} method <em>must</em>
- *  be called prior to calling the {@code optimize} method.
- * </p>
- * <p>
- *  Each call to {@link #optimize(int,MultivariateFunction,GoalType,double[])
- *  optimize} will re-use the start configuration of the current simplex and
- *  move it such that its first vertex is at the provided start point of the
- *  optimization. If the {@code optimize} method is called to solve a different
- *  problem and the number of parameters change, the simplex must be
- *  re-initialized to one with the appropriate dimensions.
- * </p>
- * <p>
- *  Convergence is checked by providing the <em>worst</em> points of
- *  previous and current simplex to the convergence checker, not the best
- *  ones.
- * </p>
- * <p>
- * This simplex optimizer implementation does not directly support constrained
- * optimization with simple bounds, so for such optimizations, either a more
- * dedicated method must be used like {@link CMAESOptimizer} or {@link
- * BOBYQAOptimizer}, or the optimized method must be wrapped in an adapter like
- * {@link MultivariateFunctionMappingAdapter} or {@link
- * MultivariateFunctionPenaltyAdapter}.
- * </p>
- *
- * @see AbstractSimplex
- * @see MultivariateFunctionMappingAdapter
- * @see MultivariateFunctionPenaltyAdapter
- * @see CMAESOptimizer
- * @see BOBYQAOptimizer
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 3.0
- */
-@SuppressWarnings("boxing") // deprecated anyway
-@Deprecated
-public class SimplexOptimizer
-    extends BaseAbstractMultivariateOptimizer<MultivariateFunction>
-    implements MultivariateOptimizer {
-    /** Simplex. */
-    private AbstractSimplex simplex;
-
-    /**
-     * Constructor using a default {@link SimpleValueChecker convergence
-     * checker}.
-     * @deprecated See {@link SimpleValueChecker#SimpleValueChecker()}
-     */
-    @Deprecated
-    public SimplexOptimizer() {
-        this(new SimpleValueChecker());
-    }
-
-    /**
-     * @param checker Convergence checker.
-     */
-    public SimplexOptimizer(ConvergenceChecker<PointValuePair> checker) {
-        super(checker);
-    }
-
-    /**
-     * @param rel Relative threshold.
-     * @param abs Absolute threshold.
-     */
-    public SimplexOptimizer(double rel, double abs) {
-        this(new SimpleValueChecker(rel, abs));
-    }
-
-    /**
-     * Set the simplex algorithm.
-     *
-     * @param simplex Simplex.
-     * @deprecated As of 3.1. The initial simplex can now be passed as an
-     * argument of the {@link #optimize(int,MultivariateFunction,GoalType,OptimizationData[])}
-     * method.
-     */
-    @Deprecated
-    public void setSimplex(AbstractSimplex simplex) {
-        parseOptimizationData(simplex);
-    }
-
-    /**
-     * Optimize an objective function.
-     *
-     * @param maxEval Allowed number of evaluations of the objective function.
-     * @param f Objective function.
-     * @param goalType Optimization type.
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link org.apache.commons.math4.optimization.InitialGuess InitialGuess}</li>
-     *  <li>{@link AbstractSimplex}</li>
-     * </ul>
-     * @return the point/value pair giving the optimal value for objective
-     * function.
-     */
-    @Override
-    protected PointValuePair optimizeInternal(int maxEval, MultivariateFunction f,
-                                              GoalType goalType,
-                                              OptimizationData... optData) {
-        // Scan "optData" for the input specific to this optimizer.
-        parseOptimizationData(optData);
-
-        // The parent's method will retrieve the common parameters from
-        // "optData" and call "doOptimize".
-        return super.optimizeInternal(maxEval, f, goalType, optData);
-    }
-
-    /**
-     * Scans the list of (required and optional) optimization data that
-     * characterize the problem.
-     *
-     * @param optData Optimization data. The following data will be looked for:
-     * <ul>
-     *  <li>{@link AbstractSimplex}</li>
-     * </ul>
-     */
-    private void parseOptimizationData(OptimizationData... optData) {
-        // The existing values (as set by the previous call) are reused if
-        // not provided in the argument list.
-        for (OptimizationData data : optData) {
-            if (data instanceof AbstractSimplex) {
-                simplex = (AbstractSimplex) data;
-                continue;
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected PointValuePair doOptimize() {
-        if (simplex == null) {
-            throw new NullArgumentException();
-        }
-
-        // Indirect call to "computeObjectiveValue" in order to update the
-        // evaluations counter.
-        final MultivariateFunction evalFunc
-            = new MultivariateFunction() {
-                public double value(double[] point) {
-                    return computeObjectiveValue(point);
-                }
-            };
-
-        final boolean isMinim = getGoalType() == GoalType.MINIMIZE;
-        final Comparator<PointValuePair> comparator
-            = new Comparator<PointValuePair>() {
-            public int compare(final PointValuePair o1,
-                               final PointValuePair o2) {
-                final double v1 = o1.getValue();
-                final double v2 = o2.getValue();
-                return isMinim ? Double.compare(v1, v2) : Double.compare(v2, v1);
-            }
-        };
-
-        // Initialize search.
-        simplex.build(getStartPoint());
-        simplex.evaluate(evalFunc, comparator);
-
-        PointValuePair[] previous = null;
-        int iteration = 0;
-        final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker();
-        while (true) {
-            if (iteration > 0) {
-                boolean converged = true;
-                for (int i = 0; i < simplex.getSize(); i++) {
-                    PointValuePair prev = previous[i];
-                    converged = converged &&
-                        checker.converged(iteration, prev, simplex.getPoint(i));
-                }
-                if (converged) {
-                    // We have found an optimum.
-                    return simplex.getPoint(0);
-                }
-            }
-
-            // We still need to search.
-            previous = simplex.getPoints();
-            simplex.iterate(evalFunc, comparator);
-            ++iteration;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/direct/package-info.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/direct/package-info.java b/src/main/java/org/apache/commons/math4/optimization/direct/package-info.java
deleted file mode 100644
index 57b385d..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/direct/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- *
- * <p>
- * This package provides optimization algorithms that don't require derivatives.
- * </p>
- *
- */
-package org.apache.commons.math4.optimization.direct;

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/fitting/CurveFitter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/fitting/CurveFitter.java b/src/main/java/org/apache/commons/math4/optimization/fitting/CurveFitter.java
deleted file mode 100644
index 7b3a429..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/fitting/CurveFitter.java
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.fitting;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.math4.analysis.DifferentiableMultivariateVectorFunction;
-import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
-import org.apache.commons.math4.analysis.ParametricUnivariateFunction;
-import org.apache.commons.math4.analysis.differentiation.DerivativeStructure;
-import org.apache.commons.math4.analysis.differentiation.MultivariateDifferentiableVectorFunction;
-import org.apache.commons.math4.optimization.DifferentiableMultivariateVectorOptimizer;
-import org.apache.commons.math4.optimization.MultivariateDifferentiableVectorOptimizer;
-import org.apache.commons.math4.optimization.PointVectorValuePair;
-
-/** Fitter for parametric univariate real functions y = f(x).
- * <br/>
- * When a univariate real function y = f(x) does depend on some
- * unknown parameters p<sub>0</sub>, p<sub>1</sub> ... p<sub>n-1</sub>,
- * this class can be used to find these parameters. It does this
- * by <em>fitting</em> the curve so it remains very close to a set of
- * observed points (x<sub>0</sub>, y<sub>0</sub>), (x<sub>1</sub>,
- * y<sub>1</sub>) ... (x<sub>k-1</sub>, y<sub>k-1</sub>). This fitting
- * is done by finding the parameters values that minimizes the objective
- * function &sum;(y<sub>i</sub>-f(x<sub>i</sub>))<sup>2</sup>. This is
- * really a least squares problem.
- *
- * @param <T> Function to use for the fit.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class CurveFitter<T extends ParametricUnivariateFunction> {
-
-    /** Optimizer to use for the fitting.
-     * @deprecated as of 3.1 replaced by {@link #optimizer}
-     */
-    @Deprecated
-    private final DifferentiableMultivariateVectorOptimizer oldOptimizer;
-
-    /** Optimizer to use for the fitting. */
-    private final MultivariateDifferentiableVectorOptimizer optimizer;
-
-    /** Observed points. */
-    private final List<WeightedObservedPoint> observations;
-
-    /** Simple constructor.
-     * @param optimizer optimizer to use for the fitting
-     * @deprecated as of 3.1 replaced by {@link #CurveFitter(MultivariateDifferentiableVectorOptimizer)}
-     */
-    @Deprecated
-    public CurveFitter(final DifferentiableMultivariateVectorOptimizer optimizer) {
-        this.oldOptimizer = optimizer;
-        this.optimizer    = null;
-        observations      = new ArrayList<WeightedObservedPoint>();
-    }
-
-    /** Simple constructor.
-     * @param optimizer optimizer to use for the fitting
-     * @since 3.1
-     */
-    public CurveFitter(final MultivariateDifferentiableVectorOptimizer optimizer) {
-        this.oldOptimizer = null;
-        this.optimizer    = optimizer;
-        observations      = new ArrayList<WeightedObservedPoint>();
-    }
-
-    /** Add an observed (x,y) point to the sample with unit weight.
-     * <p>Calling this method is equivalent to call
-     * {@code addObservedPoint(1.0, x, y)}.</p>
-     * @param x abscissa of the point
-     * @param y observed value of the point at x, after fitting we should
-     * have f(x) as close as possible to this value
-     * @see #addObservedPoint(double, double, double)
-     * @see #addObservedPoint(WeightedObservedPoint)
-     * @see #getObservations()
-     */
-    public void addObservedPoint(double x, double y) {
-        addObservedPoint(1.0, x, y);
-    }
-
-    /** Add an observed weighted (x,y) point to the sample.
-     * @param weight weight of the observed point in the fit
-     * @param x abscissa of the point
-     * @param y observed value of the point at x, after fitting we should
-     * have f(x) as close as possible to this value
-     * @see #addObservedPoint(double, double)
-     * @see #addObservedPoint(WeightedObservedPoint)
-     * @see #getObservations()
-     */
-    public void addObservedPoint(double weight, double x, double y) {
-        observations.add(new WeightedObservedPoint(weight, x, y));
-    }
-
-    /** Add an observed weighted (x,y) point to the sample.
-     * @param observed observed point to add
-     * @see #addObservedPoint(double, double)
-     * @see #addObservedPoint(double, double, double)
-     * @see #getObservations()
-     */
-    public void addObservedPoint(WeightedObservedPoint observed) {
-        observations.add(observed);
-    }
-
-    /** Get the observed points.
-     * @return observed points
-     * @see #addObservedPoint(double, double)
-     * @see #addObservedPoint(double, double, double)
-     * @see #addObservedPoint(WeightedObservedPoint)
-     */
-    public WeightedObservedPoint[] getObservations() {
-        return observations.toArray(new WeightedObservedPoint[observations.size()]);
-    }
-
-    /**
-     * Remove all observations.
-     */
-    public void clearObservations() {
-        observations.clear();
-    }
-
-    /**
-     * Fit a curve.
-     * This method compute the coefficients of the curve that best
-     * fit the sample of observed points previously given through calls
-     * to the {@link #addObservedPoint(WeightedObservedPoint)
-     * addObservedPoint} method.
-     *
-     * @param f parametric function to fit.
-     * @param initialGuess first guess of the function parameters.
-     * @return the fitted parameters.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if the start point dimension is wrong.
-     */
-    public double[] fit(T f, final double[] initialGuess) {
-        return fit(Integer.MAX_VALUE, f, initialGuess);
-    }
-
-    /**
-     * Fit a curve.
-     * This method compute the coefficients of the curve that best
-     * fit the sample of observed points previously given through calls
-     * to the {@link #addObservedPoint(WeightedObservedPoint)
-     * addObservedPoint} method.
-     *
-     * @param f parametric function to fit.
-     * @param initialGuess first guess of the function parameters.
-     * @param maxEval Maximum number of function evaluations.
-     * @return the fitted parameters.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException
-     * if the number of allowed evaluations is exceeded.
-     * @throws org.apache.commons.math4.exception.DimensionMismatchException
-     * if the start point dimension is wrong.
-     * @since 3.0
-     */
-    public double[] fit(int maxEval, T f,
-                        final double[] initialGuess) {
-        // prepare least squares problem
-        double[] target  = new double[observations.size()];
-        double[] weights = new double[observations.size()];
-        int i = 0;
-        for (WeightedObservedPoint point : observations) {
-            target[i]  = point.getY();
-            weights[i] = point.getWeight();
-            ++i;
-        }
-
-        // perform the fit
-        final PointVectorValuePair optimum;
-        if (optimizer == null) {
-            // to be removed in 4.0
-            optimum = oldOptimizer.optimize(maxEval, new OldTheoreticalValuesFunction(f),
-                                            target, weights, initialGuess);
-        } else {
-            optimum = optimizer.optimize(maxEval, new TheoreticalValuesFunction(f),
-                                         target, weights, initialGuess);
-        }
-
-        // extract the coefficients
-        return optimum.getPointRef();
-    }
-
-    /** Vectorial function computing function theoretical values. */
-    @Deprecated
-    private class OldTheoreticalValuesFunction
-        implements DifferentiableMultivariateVectorFunction {
-        /** Function to fit. */
-        private final ParametricUnivariateFunction f;
-
-        /** Simple constructor.
-         * @param f function to fit.
-         */
-        public OldTheoreticalValuesFunction(final ParametricUnivariateFunction f) {
-            this.f = f;
-        }
-
-        /** {@inheritDoc} */
-        public MultivariateMatrixFunction jacobian() {
-            return new MultivariateMatrixFunction() {
-                public double[][] value(double[] point) {
-                    final double[][] jacobian = new double[observations.size()][];
-
-                    int i = 0;
-                    for (WeightedObservedPoint observed : observations) {
-                        jacobian[i++] = f.gradient(observed.getX(), point);
-                    }
-
-                    return jacobian;
-                }
-            };
-        }
-
-        /** {@inheritDoc} */
-        public double[] value(double[] point) {
-            // compute the residuals
-            final double[] values = new double[observations.size()];
-            int i = 0;
-            for (WeightedObservedPoint observed : observations) {
-                values[i++] = f.value(observed.getX(), point);
-            }
-
-            return values;
-        }
-    }
-
-    /** Vectorial function computing function theoretical values. */
-    private class TheoreticalValuesFunction implements MultivariateDifferentiableVectorFunction {
-
-        /** Function to fit. */
-        private final ParametricUnivariateFunction f;
-
-        /** Simple constructor.
-         * @param f function to fit.
-         */
-        public TheoreticalValuesFunction(final ParametricUnivariateFunction f) {
-            this.f = f;
-        }
-
-        /** {@inheritDoc} */
-        public double[] value(double[] point) {
-            // compute the residuals
-            final double[] values = new double[observations.size()];
-            int i = 0;
-            for (WeightedObservedPoint observed : observations) {
-                values[i++] = f.value(observed.getX(), point);
-            }
-
-            return values;
-        }
-
-        /** {@inheritDoc} */
-        public DerivativeStructure[] value(DerivativeStructure[] point) {
-
-            // extract parameters
-            final double[] parameters = new double[point.length];
-            for (int k = 0; k < point.length; ++k) {
-                parameters[k] = point[k].getValue();
-            }
-
-            // compute the residuals
-            final DerivativeStructure[] values = new DerivativeStructure[observations.size()];
-            int i = 0;
-            for (WeightedObservedPoint observed : observations) {
-
-                // build the DerivativeStructure by adding first the value as a constant
-                // and then adding derivatives
-                DerivativeStructure vi = new DerivativeStructure(point.length, 1, f.value(observed.getX(), parameters));
-                for (int k = 0; k < point.length; ++k) {
-                    vi = vi.add(new DerivativeStructure(point.length, 1, k, 0.0));
-                }
-
-                values[i++] = vi;
-
-            }
-
-            return values;
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/fitting/GaussianFitter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/fitting/GaussianFitter.java b/src/main/java/org/apache/commons/math4/optimization/fitting/GaussianFitter.java
deleted file mode 100644
index bf0f03d..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/fitting/GaussianFitter.java
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.fitting;
-
-import java.util.Arrays;
-import java.util.Comparator;
-
-import org.apache.commons.math4.analysis.function.Gaussian;
-import org.apache.commons.math4.exception.NotStrictlyPositiveException;
-import org.apache.commons.math4.exception.NullArgumentException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.exception.OutOfRangeException;
-import org.apache.commons.math4.exception.ZeroException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.optimization.DifferentiableMultivariateVectorOptimizer;
-import org.apache.commons.math4.util.FastMath;
-
-/**
- * Fits points to a {@link
- * org.apache.commons.math4.analysis.function.Gaussian.Parametric Gaussian} function.
- * <p>
- * Usage example:
- * <pre>
- *   GaussianFitter fitter = new GaussianFitter(
- *     new LevenbergMarquardtOptimizer());
- *   fitter.addObservedPoint(4.0254623,  531026.0);
- *   fitter.addObservedPoint(4.03128248, 984167.0);
- *   fitter.addObservedPoint(4.03839603, 1887233.0);
- *   fitter.addObservedPoint(4.04421621, 2687152.0);
- *   fitter.addObservedPoint(4.05132976, 3461228.0);
- *   fitter.addObservedPoint(4.05326982, 3580526.0);
- *   fitter.addObservedPoint(4.05779662, 3439750.0);
- *   fitter.addObservedPoint(4.0636168,  2877648.0);
- *   fitter.addObservedPoint(4.06943698, 2175960.0);
- *   fitter.addObservedPoint(4.07525716, 1447024.0);
- *   fitter.addObservedPoint(4.08237071, 717104.0);
- *   fitter.addObservedPoint(4.08366408, 620014.0);
- *   double[] parameters = fitter.fit();
- * </pre>
- *
- * @since 2.2
- * @deprecated As of 3.1 (to be removed in 4.0).
- */
-@Deprecated
-public class GaussianFitter extends CurveFitter<Gaussian.Parametric> {
-    /**
-     * Constructs an instance using the specified optimizer.
-     *
-     * @param optimizer Optimizer to use for the fitting.
-     */
-    public GaussianFitter(DifferentiableMultivariateVectorOptimizer optimizer) {
-        super(optimizer);
-    }
-
-    /**
-     * Fits a Gaussian function to the observed points.
-     *
-     * @param initialGuess First guess values in the following order:
-     * <ul>
-     *  <li>Norm</li>
-     *  <li>Mean</li>
-     *  <li>Sigma</li>
-     * </ul>
-     * @return the parameters of the Gaussian function that best fits the
-     * observed points (in the same order as above).
-     * @since 3.0
-     */
-    public double[] fit(double[] initialGuess) {
-        final Gaussian.Parametric f = new Gaussian.Parametric() {
-                @Override
-                public double value(double x, double ... p) {
-                    double v = Double.POSITIVE_INFINITY;
-                    try {
-                        v = super.value(x, p);
-                    } catch (NotStrictlyPositiveException e) { // NOPMD
-                        // Do nothing.
-                    }
-                    return v;
-                }
-
-                @Override
-                public double[] gradient(double x, double ... p) {
-                    double[] v = { Double.POSITIVE_INFINITY,
-                                   Double.POSITIVE_INFINITY,
-                                   Double.POSITIVE_INFINITY };
-                    try {
-                        v = super.gradient(x, p);
-                    } catch (NotStrictlyPositiveException e) { // NOPMD
-                        // Do nothing.
-                    }
-                    return v;
-                }
-            };
-
-        return fit(f, initialGuess);
-    }
-
-    /**
-     * Fits a Gaussian function to the observed points.
-     *
-     * @return the parameters of the Gaussian function that best fits the
-     * observed points (in the same order as above).
-     */
-    public double[] fit() {
-        final double[] guess = (new ParameterGuesser(getObservations())).guess();
-        return fit(guess);
-    }
-
-    /**
-     * Guesses the parameters {@code norm}, {@code mean}, and {@code sigma}
-     * of a {@link org.apache.commons.math4.analysis.function.Gaussian.Parametric}
-     * based on the specified observed points.
-     */
-    public static class ParameterGuesser {
-        /** Normalization factor. */
-        private final double norm;
-        /** Mean. */
-        private final double mean;
-        /** Standard deviation. */
-        private final double sigma;
-
-        /**
-         * Constructs instance with the specified observed points.
-         *
-         * @param observations Observed points from which to guess the
-         * parameters of the Gaussian.
-         * @throws NullArgumentException if {@code observations} is
-         * {@code null}.
-         * @throws NumberIsTooSmallException if there are less than 3
-         * observations.
-         */
-        public ParameterGuesser(WeightedObservedPoint[] observations) {
-            if (observations == null) {
-                throw new NullArgumentException(LocalizedFormats.INPUT_ARRAY);
-            }
-            if (observations.length < 3) {
-                throw new NumberIsTooSmallException(observations.length, 3, true);
-            }
-
-            final WeightedObservedPoint[] sorted = sortObservations(observations);
-            final double[] params = basicGuess(sorted);
-
-            norm = params[0];
-            mean = params[1];
-            sigma = params[2];
-        }
-
-        /**
-         * Gets an estimation of the parameters.
-         *
-         * @return the guessed parameters, in the following order:
-         * <ul>
-         *  <li>Normalization factor</li>
-         *  <li>Mean</li>
-         *  <li>Standard deviation</li>
-         * </ul>
-         */
-        public double[] guess() {
-            return new double[] { norm, mean, sigma };
-        }
-
-        /**
-         * Sort the observations.
-         *
-         * @param unsorted Input observations.
-         * @return the input observations, sorted.
-         */
-        private WeightedObservedPoint[] sortObservations(WeightedObservedPoint[] unsorted) {
-            final WeightedObservedPoint[] observations = unsorted.clone();
-            final Comparator<WeightedObservedPoint> cmp
-                = new Comparator<WeightedObservedPoint>() {
-                public int compare(WeightedObservedPoint p1,
-                                   WeightedObservedPoint p2) {
-                    if (p1 == null && p2 == null) {
-                        return 0;
-                    }
-                    if (p1 == null) {
-                        return -1;
-                    }
-                    if (p2 == null) {
-                        return 1;
-                    }
-                    if (p1.getX() < p2.getX()) {
-                        return -1;
-                    }
-                    if (p1.getX() > p2.getX()) {
-                        return 1;
-                    }
-                    if (p1.getY() < p2.getY()) {
-                        return -1;
-                    }
-                    if (p1.getY() > p2.getY()) {
-                        return 1;
-                    }
-                    if (p1.getWeight() < p2.getWeight()) {
-                        return -1;
-                    }
-                    if (p1.getWeight() > p2.getWeight()) {
-                        return 1;
-                    }
-                    return 0;
-                }
-            };
-
-            Arrays.sort(observations, cmp);
-            return observations;
-        }
-
-        /**
-         * Guesses the parameters based on the specified observed points.
-         *
-         * @param points Observed points, sorted.
-         * @return the guessed parameters (normalization factor, mean and
-         * sigma).
-         */
-        private double[] basicGuess(WeightedObservedPoint[] points) {
-            final int maxYIdx = findMaxY(points);
-            final double n = points[maxYIdx].getY();
-            final double m = points[maxYIdx].getX();
-
-            double fwhmApprox;
-            try {
-                final double halfY = n + ((m - n) / 2);
-                final double fwhmX1 = interpolateXAtY(points, maxYIdx, -1, halfY);
-                final double fwhmX2 = interpolateXAtY(points, maxYIdx, 1, halfY);
-                fwhmApprox = fwhmX2 - fwhmX1;
-            } catch (OutOfRangeException e) {
-                // TODO: Exceptions should not be used for flow control.
-                fwhmApprox = points[points.length - 1].getX() - points[0].getX();
-            }
-            final double s = fwhmApprox / (2 * FastMath.sqrt(2 * FastMath.log(2)));
-
-            return new double[] { n, m, s };
-        }
-
-        /**
-         * Finds index of point in specified points with the largest Y.
-         *
-         * @param points Points to search.
-         * @return the index in specified points array.
-         */
-        private int findMaxY(WeightedObservedPoint[] points) {
-            int maxYIdx = 0;
-            for (int i = 1; i < points.length; i++) {
-                if (points[i].getY() > points[maxYIdx].getY()) {
-                    maxYIdx = i;
-                }
-            }
-            return maxYIdx;
-        }
-
-        /**
-         * Interpolates using the specified points to determine X at the
-         * specified Y.
-         *
-         * @param points Points to use for interpolation.
-         * @param startIdx Index within points from which to start the search for
-         * interpolation bounds points.
-         * @param idxStep Index step for searching interpolation bounds points.
-         * @param y Y value for which X should be determined.
-         * @return the value of X for the specified Y.
-         * @throws ZeroException if {@code idxStep} is 0.
-         * @throws OutOfRangeException if specified {@code y} is not within the
-         * range of the specified {@code points}.
-         */
-        private double interpolateXAtY(WeightedObservedPoint[] points,
-                                       int startIdx,
-                                       int idxStep,
-                                       double y)
-            throws OutOfRangeException {
-            if (idxStep == 0) {
-                throw new ZeroException();
-            }
-            final WeightedObservedPoint[] twoPoints
-                = getInterpolationPointsForY(points, startIdx, idxStep, y);
-            final WeightedObservedPoint p1 = twoPoints[0];
-            final WeightedObservedPoint p2 = twoPoints[1];
-            if (p1.getY() == y) {
-                return p1.getX();
-            }
-            if (p2.getY() == y) {
-                return p2.getX();
-            }
-            return p1.getX() + (((y - p1.getY()) * (p2.getX() - p1.getX())) /
-                                (p2.getY() - p1.getY()));
-        }
-
-        /**
-         * Gets the two bounding interpolation points from the specified points
-         * suitable for determining X at the specified Y.
-         *
-         * @param points Points to use for interpolation.
-         * @param startIdx Index within points from which to start search for
-         * interpolation bounds points.
-         * @param idxStep Index step for search for interpolation bounds points.
-         * @param y Y value for which X should be determined.
-         * @return the array containing two points suitable for determining X at
-         * the specified Y.
-         * @throws ZeroException if {@code idxStep} is 0.
-         * @throws OutOfRangeException if specified {@code y} is not within the
-         * range of the specified {@code points}.
-         */
-        private WeightedObservedPoint[] getInterpolationPointsForY(WeightedObservedPoint[] points,
-                                                                   int startIdx,
-                                                                   int idxStep,
-                                                                   double y)
-            throws OutOfRangeException {
-            if (idxStep == 0) {
-                throw new ZeroException();
-            }
-            for (int i = startIdx;
-                 idxStep < 0 ? i + idxStep >= 0 : i + idxStep < points.length;
-                 i += idxStep) {
-                final WeightedObservedPoint p1 = points[i];
-                final WeightedObservedPoint p2 = points[i + idxStep];
-                if (isBetween(y, p1.getY(), p2.getY())) {
-                    if (idxStep < 0) {
-                        return new WeightedObservedPoint[] { p2, p1 };
-                    } else {
-                        return new WeightedObservedPoint[] { p1, p2 };
-                    }
-                }
-            }
-
-            // Boundaries are replaced by dummy values because the raised
-            // exception is caught and the message never displayed.
-            // TODO: Exceptions should not be used for flow control.
-            throw new OutOfRangeException(y,
-                                          Double.NEGATIVE_INFINITY,
-                                          Double.POSITIVE_INFINITY);
-        }
-
-        /**
-         * Determines whether a value is between two other values.
-         *
-         * @param value Value to test whether it is between {@code boundary1}
-         * and {@code boundary2}.
-         * @param boundary1 One end of the range.
-         * @param boundary2 Other end of the range.
-         * @return {@code true} if {@code value} is between {@code boundary1} and
-         * {@code boundary2} (inclusive), {@code false} otherwise.
-         */
-        private boolean isBetween(double value,
-                                  double boundary1,
-                                  double boundary2) {
-            return (value >= boundary1 && value <= boundary2) ||
-                (value >= boundary2 && value <= boundary1);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/fitting/HarmonicFitter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/fitting/HarmonicFitter.java b/src/main/java/org/apache/commons/math4/optimization/fitting/HarmonicFitter.java
deleted file mode 100644
index 938156d..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/fitting/HarmonicFitter.java
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.fitting;
-
-import org.apache.commons.math4.analysis.function.HarmonicOscillator;
-import org.apache.commons.math4.exception.MathIllegalStateException;
-import org.apache.commons.math4.exception.NumberIsTooSmallException;
-import org.apache.commons.math4.exception.ZeroException;
-import org.apache.commons.math4.exception.util.LocalizedFormats;
-import org.apache.commons.math4.optimization.DifferentiableMultivariateVectorOptimizer;
-import org.apache.commons.math4.util.FastMath;
-
-/**
- * Class that implements a curve fitting specialized for sinusoids.
- *
- * Harmonic fitting is a very simple case of curve fitting. The
- * estimated coefficients are the amplitude a, the pulsation &omega; and
- * the phase &phi;: <code>f (t) = a cos (&omega; t + &phi;)</code>. They are
- * searched by a least square estimator initialized with a rough guess
- * based on integrals.
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class HarmonicFitter extends CurveFitter<HarmonicOscillator.Parametric> {
-    /**
-     * Simple constructor.
-     * @param optimizer Optimizer to use for the fitting.
-     */
-    public HarmonicFitter(final DifferentiableMultivariateVectorOptimizer optimizer) {
-        super(optimizer);
-    }
-
-    /**
-     * Fit an harmonic function to the observed points.
-     *
-     * @param initialGuess First guess values in the following order:
-     * <ul>
-     *  <li>Amplitude</li>
-     *  <li>Angular frequency</li>
-     *  <li>Phase</li>
-     * </ul>
-     * @return the parameters of the harmonic function that best fits the
-     * observed points (in the same order as above).
-     */
-    public double[] fit(double[] initialGuess) {
-        return fit(new HarmonicOscillator.Parametric(), initialGuess);
-    }
-
-    /**
-     * Fit an harmonic function to the observed points.
-     * An initial guess will be automatically computed.
-     *
-     * @return the parameters of the harmonic function that best fits the
-     * observed points (see the other {@link #fit(double[]) fit} method.
-     * @throws NumberIsTooSmallException if the sample is too short for the
-     * the first guess to be computed.
-     * @throws ZeroException if the first guess cannot be computed because
-     * the abscissa range is zero.
-     */
-    public double[] fit() {
-        return fit((new ParameterGuesser(getObservations())).guess());
-    }
-
-    /**
-     * This class guesses harmonic coefficients from a sample.
-     * <p>The algorithm used to guess the coefficients is as follows:</p>
-     *
-     * <p>We know f (t) at some sampling points t<sub>i</sub> and want to find a,
-     * &omega; and &phi; such that f (t) = a cos (&omega; t + &phi;).
-     * </p>
-     *
-     * <p>From the analytical expression, we can compute two primitives :
-     * <pre>
-     *     If2  (t) = &int; f<sup>2</sup>  = a<sup>2</sup> &times; [t + S (t)] / 2
-     *     If'2 (t) = &int; f'<sup>2</sup> = a<sup>2</sup> &omega;<sup>2</sup> &times; [t - S (t)] / 2
-     *     where S (t) = sin (2 (&omega; t + &phi;)) / (2 &omega;)
-     * </pre>
-     * </p>
-     *
-     * <p>We can remove S between these expressions :
-     * <pre>
-     *     If'2 (t) = a<sup>2</sup> &omega;<sup>2</sup> t - &omega;<sup>2</sup> If2 (t)
-     * </pre>
-     * </p>
-     *
-     * <p>The preceding expression shows that If'2 (t) is a linear
-     * combination of both t and If2 (t): If'2 (t) = A &times; t + B &times; If2 (t)
-     * </p>
-     *
-     * <p>From the primitive, we can deduce the same form for definite
-     * integrals between t<sub>1</sub> and t<sub>i</sub> for each t<sub>i</sub> :
-     * <pre>
-     *   If2 (t<sub>i</sub>) - If2 (t<sub>1</sub>) = A &times; (t<sub>i</sub> - t<sub>1</sub>) + B &times; (If2 (t<sub>i</sub>) - If2 (t<sub>1</sub>))
-     * </pre>
-     * </p>
-     *
-     * <p>We can find the coefficients A and B that best fit the sample
-     * to this linear expression by computing the definite integrals for
-     * each sample points.
-     * </p>
-     *
-     * <p>For a bilinear expression z (x<sub>i</sub>, y<sub>i</sub>) = A &times; x<sub>i</sub> + B &times; y<sub>i</sub>, the
-     * coefficients A and B that minimize a least square criterion
-     * &sum; (z<sub>i</sub> - z (x<sub>i</sub>, y<sub>i</sub>))<sup>2</sup> are given by these expressions:</p>
-     * <pre>
-     *
-     *         &sum;y<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub>
-     *     A = ------------------------
-     *         &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>y<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>y<sub>i</sub>
-     *
-     *         &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub>
-     *     B = ------------------------
-     *         &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>y<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>y<sub>i</sub>
-     * </pre>
-     * </p>
-     *
-     *
-     * <p>In fact, we can assume both a and &omega; are positive and
-     * compute them directly, knowing that A = a<sup>2</sup> &omega;<sup>2</sup> and that
-     * B = - &omega;<sup>2</sup>. The complete algorithm is therefore:</p>
-     * <pre>
-     *
-     * for each t<sub>i</sub> from t<sub>1</sub> to t<sub>n-1</sub>, compute:
-     *   f  (t<sub>i</sub>)
-     *   f' (t<sub>i</sub>) = (f (t<sub>i+1</sub>) - f(t<sub>i-1</sub>)) / (t<sub>i+1</sub> - t<sub>i-1</sub>)
-     *   x<sub>i</sub> = t<sub>i</sub> - t<sub>1</sub>
-     *   y<sub>i</sub> = &int; f<sup>2</sup> from t<sub>1</sub> to t<sub>i</sub>
-     *   z<sub>i</sub> = &int; f'<sup>2</sup> from t<sub>1</sub> to t<sub>i</sub>
-     *   update the sums &sum;x<sub>i</sub>x<sub>i</sub>, &sum;y<sub>i</sub>y<sub>i</sub>, &sum;x<sub>i</sub>y<sub>i</sub>, &sum;x<sub>i</sub>z<sub>i</sub> and &sum;y<sub>i</sub>z<sub>i</sub>
-     * end for
-     *
-     *            |--------------------------
-     *         \  | &sum;y<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub>
-     * a     =  \ | ------------------------
-     *           \| &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub>
-     *
-     *
-     *            |--------------------------
-     *         \  | &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub>
-     * &omega;     =  \ | ------------------------
-     *           \| &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>y<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>y<sub>i</sub>
-     *
-     * </pre>
-     * </p>
-     *
-     * <p>Once we know &omega;, we can compute:
-     * <pre>
-     *    fc = &omega; f (t) cos (&omega; t) - f' (t) sin (&omega; t)
-     *    fs = &omega; f (t) sin (&omega; t) + f' (t) cos (&omega; t)
-     * </pre>
-     * </p>
-     *
-     * <p>It appears that <code>fc = a &omega; cos (&phi;)</code> and
-     * <code>fs = -a &omega; sin (&phi;)</code>, so we can use these
-     * expressions to compute &phi;. The best estimate over the sample is
-     * given by averaging these expressions.
-     * </p>
-     *
-     * <p>Since integrals and means are involved in the preceding
-     * estimations, these operations run in O(n) time, where n is the
-     * number of measurements.</p>
-     */
-    public static class ParameterGuesser {
-        /** Amplitude. */
-        private final double a;
-        /** Angular frequency. */
-        private final double omega;
-        /** Phase. */
-        private final double phi;
-
-        /**
-         * Simple constructor.
-         *
-         * @param observations Sampled observations.
-         * @throws NumberIsTooSmallException if the sample is too short.
-         * @throws ZeroException if the abscissa range is zero.
-         * @throws MathIllegalStateException when the guessing procedure cannot
-         * produce sensible results.
-         */
-        public ParameterGuesser(WeightedObservedPoint[] observations) {
-            if (observations.length < 4) {
-                throw new NumberIsTooSmallException(LocalizedFormats.INSUFFICIENT_OBSERVED_POINTS_IN_SAMPLE,
-                                                    observations.length, 4, true);
-            }
-
-            final WeightedObservedPoint[] sorted = sortObservations(observations);
-
-            final double aOmega[] = guessAOmega(sorted);
-            a = aOmega[0];
-            omega = aOmega[1];
-
-            phi = guessPhi(sorted);
-        }
-
-        /**
-         * Gets an estimation of the parameters.
-         *
-         * @return the guessed parameters, in the following order:
-         * <ul>
-         *  <li>Amplitude</li>
-         *  <li>Angular frequency</li>
-         *  <li>Phase</li>
-         * </ul>
-         */
-        public double[] guess() {
-            return new double[] { a, omega, phi };
-        }
-
-        /**
-         * Sort the observations with respect to the abscissa.
-         *
-         * @param unsorted Input observations.
-         * @return the input observations, sorted.
-         */
-        private WeightedObservedPoint[] sortObservations(WeightedObservedPoint[] unsorted) {
-            final WeightedObservedPoint[] observations = unsorted.clone();
-
-            // Since the samples are almost always already sorted, this
-            // method is implemented as an insertion sort that reorders the
-            // elements in place. Insertion sort is very efficient in this case.
-            WeightedObservedPoint curr = observations[0];
-            for (int j = 1; j < observations.length; ++j) {
-                WeightedObservedPoint prec = curr;
-                curr = observations[j];
-                if (curr.getX() < prec.getX()) {
-                    // the current element should be inserted closer to the beginning
-                    int i = j - 1;
-                    WeightedObservedPoint mI = observations[i];
-                    while ((i >= 0) && (curr.getX() < mI.getX())) {
-                        observations[i + 1] = mI;
-                        if (i-- != 0) {
-                            mI = observations[i];
-                        }
-                    }
-                    observations[i + 1] = curr;
-                    curr = observations[j];
-                }
-            }
-
-            return observations;
-        }
-
-        /**
-         * Estimate a first guess of the amplitude and angular frequency.
-         * This method assumes that the {@link #sortObservations(WeightedObservedPoint[])} method
-         * has been called previously.
-         *
-         * @param observations Observations, sorted w.r.t. abscissa.
-         * @throws ZeroException if the abscissa range is zero.
-         * @throws MathIllegalStateException when the guessing procedure cannot
-         * produce sensible results.
-         * @return the guessed amplitude (at index 0) and circular frequency
-         * (at index 1).
-         */
-        private double[] guessAOmega(WeightedObservedPoint[] observations) {
-            final double[] aOmega = new double[2];
-
-            // initialize the sums for the linear model between the two integrals
-            double sx2 = 0;
-            double sy2 = 0;
-            double sxy = 0;
-            double sxz = 0;
-            double syz = 0;
-
-            double currentX = observations[0].getX();
-            double currentY = observations[0].getY();
-            double f2Integral = 0;
-            double fPrime2Integral = 0;
-            final double startX = currentX;
-            for (int i = 1; i < observations.length; ++i) {
-                // one step forward
-                final double previousX = currentX;
-                final double previousY = currentY;
-                currentX = observations[i].getX();
-                currentY = observations[i].getY();
-
-                // update the integrals of f<sup>2</sup> and f'<sup>2</sup>
-                // considering a linear model for f (and therefore constant f')
-                final double dx = currentX - previousX;
-                final double dy = currentY - previousY;
-                final double f2StepIntegral =
-                    dx * (previousY * previousY + previousY * currentY + currentY * currentY) / 3;
-                final double fPrime2StepIntegral = dy * dy / dx;
-
-                final double x = currentX - startX;
-                f2Integral += f2StepIntegral;
-                fPrime2Integral += fPrime2StepIntegral;
-
-                sx2 += x * x;
-                sy2 += f2Integral * f2Integral;
-                sxy += x * f2Integral;
-                sxz += x * fPrime2Integral;
-                syz += f2Integral * fPrime2Integral;
-            }
-
-            // compute the amplitude and pulsation coefficients
-            double c1 = sy2 * sxz - sxy * syz;
-            double c2 = sxy * sxz - sx2 * syz;
-            double c3 = sx2 * sy2 - sxy * sxy;
-            if ((c1 / c2 < 0) || (c2 / c3 < 0)) {
-                final int last = observations.length - 1;
-                // Range of the observations, assuming that the
-                // observations are sorted.
-                final double xRange = observations[last].getX() - observations[0].getX();
-                if (xRange == 0) {
-                    throw new ZeroException();
-                }
-                aOmega[1] = 2 * Math.PI / xRange;
-
-                double yMin = Double.POSITIVE_INFINITY;
-                double yMax = Double.NEGATIVE_INFINITY;
-                for (int i = 1; i < observations.length; ++i) {
-                    final double y = observations[i].getY();
-                    if (y < yMin) {
-                        yMin = y;
-                    }
-                    if (y > yMax) {
-                        yMax = y;
-                    }
-                }
-                aOmega[0] = 0.5 * (yMax - yMin);
-            } else {
-                if (c2 == 0) {
-                    // In some ill-conditioned cases (cf. MATH-844), the guesser
-                    // procedure cannot produce sensible results.
-                    throw new MathIllegalStateException(LocalizedFormats.ZERO_DENOMINATOR);
-                }
-
-                aOmega[0] = FastMath.sqrt(c1 / c2);
-                aOmega[1] = FastMath.sqrt(c2 / c3);
-            }
-
-            return aOmega;
-        }
-
-        /**
-         * Estimate a first guess of the phase.
-         *
-         * @param observations Observations, sorted w.r.t. abscissa.
-         * @return the guessed phase.
-         */
-        private double guessPhi(WeightedObservedPoint[] observations) {
-            // initialize the means
-            double fcMean = 0;
-            double fsMean = 0;
-
-            double currentX = observations[0].getX();
-            double currentY = observations[0].getY();
-            for (int i = 1; i < observations.length; ++i) {
-                // one step forward
-                final double previousX = currentX;
-                final double previousY = currentY;
-                currentX = observations[i].getX();
-                currentY = observations[i].getY();
-                final double currentYPrime = (currentY - previousY) / (currentX - previousX);
-
-                double omegaX = omega * currentX;
-                double cosine = FastMath.cos(omegaX);
-                double sine = FastMath.sin(omegaX);
-                fcMean += omega * currentY * cosine - currentYPrime * sine;
-                fsMean += omega * currentY * sine + currentYPrime * cosine;
-            }
-
-            return FastMath.atan2(-fsMean, fcMean);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/commons-math/blob/b4669aad/src/main/java/org/apache/commons/math4/optimization/fitting/PolynomialFitter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/commons/math4/optimization/fitting/PolynomialFitter.java b/src/main/java/org/apache/commons/math4/optimization/fitting/PolynomialFitter.java
deleted file mode 100644
index 3773acb..0000000
--- a/src/main/java/org/apache/commons/math4/optimization/fitting/PolynomialFitter.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math4.optimization.fitting;
-
-import org.apache.commons.math4.analysis.polynomials.PolynomialFunction;
-import org.apache.commons.math4.optimization.DifferentiableMultivariateVectorOptimizer;
-
-/**
- * Polynomial fitting is a very simple case of {@link CurveFitter curve fitting}.
- * The estimated coefficients are the polynomial coefficients (see the
- * {@link #fit(double[]) fit} method).
- *
- * @deprecated As of 3.1 (to be removed in 4.0).
- * @since 2.0
- */
-@Deprecated
-public class PolynomialFitter extends CurveFitter<PolynomialFunction.Parametric> {
-    /** Polynomial degree.
-     * @deprecated
-     */
-    @Deprecated
-    private final int degree;
-
-    /**
-     * Simple constructor.
-     * <p>The polynomial fitter built this way are complete polynomials,
-     * ie. a n-degree polynomial has n+1 coefficients.</p>
-     *
-     * @param degree Maximal degree of the polynomial.
-     * @param optimizer Optimizer to use for the fitting.
-     * @deprecated Since 3.1 (to be removed in 4.0). Please use
-     * {@link #PolynomialFitter(DifferentiableMultivariateVectorOptimizer)} instead.
-     */
-    @Deprecated
-    public PolynomialFitter(int degree, final DifferentiableMultivariateVectorOptimizer optimizer) {
-        super(optimizer);
-        this.degree = degree;
-    }
-
-    /**
-     * Simple constructor.
-     *
-     * @param optimizer Optimizer to use for the fitting.
-     * @since 3.1
-     */
-    public PolynomialFitter(DifferentiableMultivariateVectorOptimizer optimizer) {
-        super(optimizer);
-        degree = -1; // To avoid compilation error until the instance variable is removed.
-    }
-
-    /**
-     * Get the polynomial fitting the weighted (x, y) points.
-     *
-     * @return the coefficients of the polynomial that best fits the observed points.
-     * @throws org.apache.commons.math4.exception.ConvergenceException
-     * if the algorithm failed to converge.
-     * @deprecated Since 3.1 (to be removed in 4.0). Please use {@link #fit(double[])} instead.
-     */
-    @Deprecated
-    public double[] fit() {
-        return fit(new PolynomialFunction.Parametric(), new double[degree + 1]);
-    }
-
-    /**
-     * Get the coefficients of the polynomial fitting the weighted data points.
-     * The degree of the fitting polynomial is {@code guess.length - 1}.
-     *
-     * @param guess First guess for the coefficients. They must be sorted in
-     * increasing order of the polynomial's degree.
-     * @param maxEval Maximum number of evaluations of the polynomial.
-     * @return the coefficients of the polynomial that best fits the observed points.
-     * @throws org.apache.commons.math4.exception.TooManyEvaluationsException if
-     * the number of evaluations exceeds {@code maxEval}.
-     * @throws org.apache.commons.math4.exception.ConvergenceException
-     * if the algorithm failed to converge.
-     * @since 3.1
-     */
-    public double[] fit(int maxEval, double[] guess) {
-        return fit(maxEval, new PolynomialFunction.Parametric(), guess);
-    }
-
-    /**
-     * Get the coefficients of the polynomial fitting the weighted data points.
-     * The degree of the fitting polynomial is {@code guess.length - 1}.
-     *
-     * @param guess First guess for the coefficients. They must be sorted in
-     * increasing order of the polynomial's degree.
-     * @return the coefficients of the polynomial that best fits the observed points.
-     * @throws org.apache.commons.math4.exception.ConvergenceException
-     * if the algorithm failed to converge.
-     * @since 3.1
-     */
-    public double[] fit(double[] guess) {
-        return fit(new PolynomialFunction.Parametric(), guess);
-    }
-}