You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hivemall.apache.org by my...@apache.org on 2016/12/02 07:04:52 UTC

[50/50] [abbrv] incubator-hivemall git commit: Merge branch 'AddOptimizers' of https://github.com/maropu/hivemall into JIRA-22/pr-285

Merge branch 'AddOptimizers' of https://github.com/maropu/hivemall into JIRA-22/pr-285


Project: http://git-wip-us.apache.org/repos/asf/incubator-hivemall/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hivemall/commit/05766432
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hivemall/tree/05766432
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hivemall/diff/05766432

Branch: refs/heads/JIRA-22/pr-285
Commit: 05766432c45f89627e423245e5aec3ced6d0c100
Parents: 775ae4f 3620eb8
Author: myui <yu...@gmail.com>
Authored: Fri Dec 2 15:35:05 2016 +0900
Committer: myui <yu...@gmail.com>
Committed: Fri Dec 2 15:35:05 2016 +0900

----------------------------------------------------------------------
 .../src/main/java/hivemall/LearnerBaseUDTF.java |  55 +++
 .../hivemall/classifier/AROWClassifierUDTF.java |   2 +-
 .../hivemall/classifier/AdaGradRDAUDTF.java     |   6 +-
 .../classifier/BinaryOnlineClassifierUDTF.java  |  13 +
 .../classifier/GeneralClassifierUDTF.java       | 122 +++++
 .../classifier/PassiveAggressiveUDTF.java       |   2 +-
 .../main/java/hivemall/common/EtaEstimator.java | 160 -------
 .../java/hivemall/common/LossFunctions.java     | 467 -------------------
 .../java/hivemall/fm/FMHyperParameters.java     |   2 +-
 .../hivemall/fm/FactorizationMachineModel.java  |   2 +-
 .../hivemall/fm/FactorizationMachineUDTF.java   |   8 +-
 .../fm/FieldAwareFactorizationMachineModel.java |   1 +
 .../hivemall/mf/BPRMatrixFactorizationUDTF.java |   2 +-
 .../hivemall/mf/MatrixFactorizationSGDUDTF.java |   2 +-
 .../main/java/hivemall/model/DenseModel.java    |   5 +
 .../main/java/hivemall/model/IWeightValue.java  |  16 +-
 .../main/java/hivemall/model/NewDenseModel.java | 293 ++++++++++++
 .../model/NewSpaceEfficientDenseModel.java      | 317 +++++++++++++
 .../java/hivemall/model/NewSparseModel.java     | 197 ++++++++
 .../java/hivemall/model/PredictionModel.java    |   2 +
 .../model/SpaceEfficientDenseModel.java         |   5 +
 .../main/java/hivemall/model/SparseModel.java   |   5 +
 .../model/SynchronizedModelWrapper.java         |  10 +
 .../main/java/hivemall/model/WeightValue.java   | 162 ++++++-
 .../hivemall/model/WeightValueWithClock.java    | 167 ++++++-
 .../optimizer/DenseOptimizerFactory.java        | 215 +++++++++
 .../java/hivemall/optimizer/EtaEstimator.java   | 191 ++++++++
 .../java/hivemall/optimizer/LossFunctions.java  | 467 +++++++++++++++++++
 .../main/java/hivemall/optimizer/Optimizer.java | 246 ++++++++++
 .../java/hivemall/optimizer/Regularization.java |  99 ++++
 .../optimizer/SparseOptimizerFactory.java       | 171 +++++++
 .../hivemall/regression/AROWRegressionUDTF.java |   2 +-
 .../java/hivemall/regression/AdaDeltaUDTF.java  |   5 +-
 .../java/hivemall/regression/AdaGradUDTF.java   |   5 +-
 .../regression/GeneralRegressionUDTF.java       | 126 +++++
 .../java/hivemall/regression/LogressUDTF.java   |  10 +-
 .../PassiveAggressiveRegressionUDTF.java        |   2 +-
 .../hivemall/regression/RegressionBaseUDTF.java |  26 +-
 .../NewSpaceEfficientNewDenseModelTest.java     |  60 +++
 .../model/SpaceEfficientDenseModelTest.java     |  60 ---
 .../java/hivemall/optimizer/OptimizerTest.java  | 172 +++++++
 .../java/hivemall/mix/server/MixServerTest.java |  18 +-
 resources/ddl/define-all-as-permanent.hive      |  13 +-
 resources/ddl/define-all.hive                   |  12 +-
 .../hivemall/mix/server/MixServerSuite.scala    |   6 +-
 45 files changed, 3195 insertions(+), 734 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/LearnerBaseUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/classifier/AROWClassifierUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/classifier/AdaGradRDAUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/classifier/BinaryOnlineClassifierUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/classifier/PassiveAggressiveUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/fm/FMHyperParameters.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/fm/FactorizationMachineModel.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/fm/FactorizationMachineUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/fm/FieldAwareFactorizationMachineModel.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/mf/BPRMatrixFactorizationUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/mf/MatrixFactorizationSGDUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/model/DenseModel.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/model/IWeightValue.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/model/PredictionModel.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/model/SpaceEfficientDenseModel.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/model/SparseModel.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/model/SynchronizedModelWrapper.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/model/WeightValue.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/model/WeightValueWithClock.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/optimizer/EtaEstimator.java
----------------------------------------------------------------------
diff --cc core/src/main/java/hivemall/optimizer/EtaEstimator.java
index 0000000,ac1d112..a17c349
mode 000000,100644..100644
--- a/core/src/main/java/hivemall/optimizer/EtaEstimator.java
+++ b/core/src/main/java/hivemall/optimizer/EtaEstimator.java
@@@ -1,0 -1,191 +1,191 @@@
+ /*
 - * Hivemall: Hive scalable Machine Learning Library
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
+  *
 - * Copyright (C) 2015 Makoto YUI
 - * Copyright (C) 2013-2015 National Institute of Advanced Industrial Science and Technology (AIST)
++ *   http://www.apache.org/licenses/LICENSE-2.0
+  *
 - * Licensed under the Apache License, Version 2.0 (the "License");
 - * you may not use this file except in compliance with the License.
 - * You may obtain a copy of the License at
 - *
 - *         http://www.apache.org/licenses/LICENSE-2.0
 - *
 - * Unless required by applicable law or agreed to in writing, software
 - * distributed under the License is distributed on an "AS IS" BASIS,
 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 - * See the License for the specific language governing permissions and
 - * limitations under the License.
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied.  See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
+  */
+ package hivemall.optimizer;
+ 
+ import hivemall.utils.lang.NumberUtils;
+ import hivemall.utils.lang.Primitives;
+ 
+ import java.util.Map;
+ import javax.annotation.Nonnegative;
+ import javax.annotation.Nonnull;
+ import javax.annotation.Nullable;
+ 
+ import org.apache.commons.cli.CommandLine;
+ import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+ 
+ public abstract class EtaEstimator {
+ 
+     protected final float eta0;
+ 
+     public EtaEstimator(float eta0) {
+         this.eta0 = eta0;
+     }
+ 
+     public float eta0() {
+         return eta0;
+     }
+ 
+     public abstract float eta(long t);
+ 
+     public void update(@Nonnegative float multipler) {}
+ 
+     public static final class FixedEtaEstimator extends EtaEstimator {
+ 
+         public FixedEtaEstimator(float eta) {
+             super(eta);
+         }
+ 
+         @Override
+         public float eta(long t) {
+             return eta0;
+         }
+ 
+     }
+ 
+     public static final class SimpleEtaEstimator extends EtaEstimator {
+ 
+         private final float finalEta;
+         private final double total_steps;
+ 
+         public SimpleEtaEstimator(float eta0, long total_steps) {
+             super(eta0);
+             this.finalEta = (float) (eta0 / 2.d);
+             this.total_steps = total_steps;
+         }
+ 
+         @Override
+         public float eta(final long t) {
+             if (t > total_steps) {
+                 return finalEta;
+             }
+             return (float) (eta0 / (1.d + (t / total_steps)));
+         }
+ 
+     }
+ 
+     public static final class InvscalingEtaEstimator extends EtaEstimator {
+ 
+         private final double power_t;
+ 
+         public InvscalingEtaEstimator(float eta0, double power_t) {
+             super(eta0);
+             this.power_t = power_t;
+         }
+ 
+         @Override
+         public float eta(final long t) {
+             return (float) (eta0 / Math.pow(t, power_t));
+         }
+ 
+     }
+ 
+     /**
+      * bold driver: Gemulla et al., Large-scale matrix factorization with distributed stochastic
+      * gradient descent, KDD 2011.
+      */
+     public static final class AdjustingEtaEstimator extends EtaEstimator {
+ 
+         private float eta;
+ 
+         public AdjustingEtaEstimator(float eta) {
+             super(eta);
+             this.eta = eta;
+         }
+ 
+         @Override
+         public float eta(long t) {
+             return eta;
+         }
+ 
+         @Override
+         public void update(@Nonnegative float multipler) {
+             float newEta = eta * multipler;
+             if (!NumberUtils.isFinite(newEta)) {
+                 // avoid NaN or INFINITY
+                 return;
+             }
+             this.eta = Math.min(eta0, newEta); // never be larger than eta0
+         }
+ 
+     }
+ 
+     @Nonnull
+     public static EtaEstimator get(@Nullable CommandLine cl) throws UDFArgumentException {
+         return get(cl, 0.1f);
+     }
+ 
+     @Nonnull
+     public static EtaEstimator get(@Nullable CommandLine cl, float defaultEta0)
+             throws UDFArgumentException {
+         if (cl == null) {
+             return new InvscalingEtaEstimator(defaultEta0, 0.1d);
+         }
+ 
+         if (cl.hasOption("boldDriver")) {
+             float eta = Primitives.parseFloat(cl.getOptionValue("eta"), 0.3f);
+             return new AdjustingEtaEstimator(eta);
+         }
+ 
+         String etaValue = cl.getOptionValue("eta");
+         if (etaValue != null) {
+             float eta = Float.parseFloat(etaValue);
+             return new FixedEtaEstimator(eta);
+         }
+ 
+         float eta0 = Primitives.parseFloat(cl.getOptionValue("eta0"), defaultEta0);
+         if (cl.hasOption("t")) {
+             long t = Long.parseLong(cl.getOptionValue("t"));
+             return new SimpleEtaEstimator(eta0, t);
+         }
+ 
+         double power_t = Primitives.parseDouble(cl.getOptionValue("power_t"), 0.1d);
+         return new InvscalingEtaEstimator(eta0, power_t);
+     }
+ 
+     @Nonnull
+     public static EtaEstimator get(@Nonnull final Map<String, String> options)
+             throws IllegalArgumentException {
+         final String etaName = options.get("eta");
+         if(etaName == null) {
+             return new FixedEtaEstimator(1.f);
+         }
+         float eta0 = 0.1f;
+         if(options.containsKey("eta0")) {
+             eta0 = Float.parseFloat(options.get("eta0"));
+         }
+         if(etaName.toLowerCase().equals("fixed")) {
+             return new FixedEtaEstimator(eta0);
+         } else if(etaName.toLowerCase().equals("simple")) {
+             long t = 10000;
+             if(options.containsKey("t")) {
+                 t = Long.parseLong(options.get("t"));
+             }
+             return new SimpleEtaEstimator(eta0, t);
+         } else if(etaName.toLowerCase().equals("inverse")) {
+             double power_t = 0.1;
+             if(options.containsKey("power_t")) {
+                 power_t = Double.parseDouble(options.get("power_t"));
+             }
+             return new InvscalingEtaEstimator(eta0, power_t);
+         } else {
+             throw new IllegalArgumentException("Unsupported ETA name: " + etaName);
+         }
+     }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/optimizer/LossFunctions.java
----------------------------------------------------------------------
diff --cc core/src/main/java/hivemall/optimizer/LossFunctions.java
index 0000000,d11be9b..07f7cb8
mode 000000,100644..100644
--- a/core/src/main/java/hivemall/optimizer/LossFunctions.java
+++ b/core/src/main/java/hivemall/optimizer/LossFunctions.java
@@@ -1,0 -1,467 +1,467 @@@
+ /*
 - * Hivemall: Hive scalable Machine Learning Library
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
+  *
 - * Copyright (C) 2015 Makoto YUI
 - * Copyright (C) 2013-2015 National Institute of Advanced Industrial Science and Technology (AIST)
++ *   http://www.apache.org/licenses/LICENSE-2.0
+  *
 - * Licensed under the Apache License, Version 2.0 (the "License");
 - * you may not use this file except in compliance with the License.
 - * You may obtain a copy of the License at
 - *
 - *         http://www.apache.org/licenses/LICENSE-2.0
 - *
 - * Unless required by applicable law or agreed to in writing, software
 - * distributed under the License is distributed on an "AS IS" BASIS,
 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 - * See the License for the specific language governing permissions and
 - * limitations under the License.
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied.  See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
+  */
+ package hivemall.optimizer;
+ 
+ import hivemall.utils.math.MathUtils;
+ 
+ /**
+  * @link https://github.com/JohnLangford/vowpal_wabbit/wiki/Loss-functions
+  */
+ public final class LossFunctions {
+ 
+     public enum LossType {
+         SquaredLoss, LogLoss, HingeLoss, SquaredHingeLoss, QuantileLoss, EpsilonInsensitiveLoss
+     }
+ 
+     public static LossFunction getLossFunction(String type) {
+         if ("SquaredLoss".equalsIgnoreCase(type)) {
+             return new SquaredLoss();
+         } else if ("LogLoss".equalsIgnoreCase(type)) {
+             return new LogLoss();
+         } else if ("HingeLoss".equalsIgnoreCase(type)) {
+             return new HingeLoss();
+         } else if ("SquaredHingeLoss".equalsIgnoreCase(type)) {
+             return new SquaredHingeLoss();
+         } else if ("QuantileLoss".equalsIgnoreCase(type)) {
+             return new QuantileLoss();
+         } else if ("EpsilonInsensitiveLoss".equalsIgnoreCase(type)) {
+             return new EpsilonInsensitiveLoss();
+         }
+         throw new IllegalArgumentException("Unsupported type: " + type);
+     }
+ 
+     public static LossFunction getLossFunction(LossType type) {
+         switch (type) {
+             case SquaredLoss:
+                 return new SquaredLoss();
+             case LogLoss:
+                 return new LogLoss();
+             case HingeLoss:
+                 return new HingeLoss();
+             case SquaredHingeLoss:
+                 return new SquaredHingeLoss();
+             case QuantileLoss:
+                 return new QuantileLoss();
+             case EpsilonInsensitiveLoss:
+                 return new EpsilonInsensitiveLoss();
+             default:
+                 throw new IllegalArgumentException("Unsupported type: " + type);
+         }
+     }
+ 
+     public interface LossFunction {
+ 
+         /**
+          * Evaluate the loss function.
+          *
+          * @param p The prediction, p = w^T x
+          * @param y The true value (aka target)
+          * @return The loss evaluated at `p` and `y`.
+          */
+         public float loss(float p, float y);
+ 
+         public double loss(double p, double y);
+ 
+         /**
+          * Evaluate the derivative of the loss function with respect to the prediction `p`.
+          *
+          * @param p The prediction, p = w^T x
+          * @param y The true value (aka target)
+          * @return The derivative of the loss function w.r.t. `p`.
+          */
+         public float dloss(float p, float y);
+ 
+         public boolean forBinaryClassification();
+ 
+         public boolean forRegression();
+ 
+     }
+ 
+     public static abstract class BinaryLoss implements LossFunction {
+ 
+         protected static void checkTarget(float y) {
+             if (!(y == 1.f || y == -1.f)) {
+                 throw new IllegalArgumentException("target must be [+1,-1]: " + y);
+             }
+         }
+ 
+         protected static void checkTarget(double y) {
+             if (!(y == 1.d || y == -1.d)) {
+                 throw new IllegalArgumentException("target must be [+1,-1]: " + y);
+             }
+         }
+ 
+         @Override
+         public boolean forBinaryClassification() {
+             return true;
+         }
+ 
+         @Override
+         public boolean forRegression() {
+             return false;
+         }
+     }
+ 
+     public static abstract class RegressionLoss implements LossFunction {
+ 
+         @Override
+         public boolean forBinaryClassification() {
+             return false;
+         }
+ 
+         @Override
+         public boolean forRegression() {
+             return true;
+         }
+ 
+     }
+ 
+     /**
+      * Squared loss for regression problems.
+      *
+      * If you're trying to minimize the mean error, use squared-loss.
+      */
+     public static final class SquaredLoss extends RegressionLoss {
+ 
+         @Override
+         public float loss(float p, float y) {
+             final float z = p - y;
+             return z * z * 0.5f;
+         }
+ 
+         @Override
+         public double loss(double p, double y) {
+             final double z = p - y;
+             return z * z * 0.5d;
+         }
+ 
+         @Override
+         public float dloss(float p, float y) {
+             return p - y; // 2 (p - y) / 2
+         }
+     }
+ 
+     /**
+      * Logistic regression loss for binary classification with y in {-1, 1}.
+      */
+     public static final class LogLoss extends BinaryLoss {
+ 
+         /**
+          * <code>logloss(p,y) = log(1+exp(-p*y))</code>
+          */
+         @Override
+         public float loss(float p, float y) {
+             checkTarget(y);
+ 
+             final float z = y * p;
+             if (z > 18.f) {
+                 return (float) Math.exp(-z);
+             }
+             if (z < -18.f) {
+                 return -z;
+             }
+             return (float) Math.log(1.d + Math.exp(-z));
+         }
+ 
+         @Override
+         public double loss(double p, double y) {
+             checkTarget(y);
+ 
+             final double z = y * p;
+             if (z > 18.d) {
+                 return Math.exp(-z);
+             }
+             if (z < -18.d) {
+                 return -z;
+             }
+             return Math.log(1.d + Math.exp(-z));
+         }
+ 
+         @Override
+         public float dloss(float p, float y) {
+             checkTarget(y);
+ 
+             float z = y * p;
+             if (z > 18.f) {
+                 return (float) Math.exp(-z) * -y;
+             }
+             if (z < -18.f) {
+                 return -y;
+             }
+             return -y / ((float) Math.exp(z) + 1.f);
+         }
+     }
+ 
+     /**
+      * Hinge loss for binary classification tasks with y in {-1,1}.
+      */
+     public static final class HingeLoss extends BinaryLoss {
+ 
+         private float threshold;
+ 
+         public HingeLoss() {
+             this(1.f);
+         }
+ 
+         /**
+          * @param threshold Margin threshold. When threshold=1.0, one gets the loss used by SVM.
+          *        When threshold=0.0, one gets the loss used by the Perceptron.
+          */
+         public HingeLoss(float threshold) {
+             this.threshold = threshold;
+         }
+ 
+         public void setThreshold(float threshold) {
+             this.threshold = threshold;
+         }
+ 
+         @Override
+         public float loss(float p, float y) {
+             float loss = hingeLoss(p, y, threshold);
+             return (loss > 0.f) ? loss : 0.f;
+         }
+ 
+         @Override
+         public double loss(double p, double y) {
+             double loss = hingeLoss(p, y, threshold);
+             return (loss > 0.d) ? loss : 0.d;
+         }
+ 
+         @Override
+         public float dloss(float p, float y) {
+             float loss = hingeLoss(p, y, threshold);
+             return (loss > 0.f) ? -y : 0.f;
+         }
+     }
+ 
+     /**
+      * Squared Hinge loss for binary classification tasks with y in {-1,1}.
+      */
+     public static final class SquaredHingeLoss extends BinaryLoss {
+ 
+         @Override
+         public float loss(float p, float y) {
+             return squaredHingeLoss(p, y);
+         }
+ 
+         @Override
+         public double loss(double p, double y) {
+             return squaredHingeLoss(p, y);
+         }
+ 
+         @Override
+         public float dloss(float p, float y) {
+             checkTarget(y);
+ 
+             float d = 1 - (y * p);
+             return (d > 0.f) ? -2.f * d * y : 0.f;
+         }
+ 
+     }
+ 
+     /**
+      * Quantile loss is useful to predict rank/order and you do not mind the mean error to increase
+      * as long as you get the relative order correct.
+      *
+      * @link http://en.wikipedia.org/wiki/Quantile_regression
+      */
+     public static final class QuantileLoss extends RegressionLoss {
+ 
+         private float tau;
+ 
+         public QuantileLoss() {
+             this.tau = 0.5f;
+         }
+ 
+         public QuantileLoss(float tau) {
+             setTau(tau);
+         }
+ 
+         public void setTau(float tau) {
+             if (tau <= 0 || tau >= 1.0) {
+                 throw new IllegalArgumentException("tau must be in range (0, 1): " + tau);
+             }
+             this.tau = tau;
+         }
+ 
+         @Override
+         public float loss(float p, float y) {
+             float e = y - p;
+             if (e > 0.f) {
+                 return tau * e;
+             } else {
+                 return -(1.f - tau) * e;
+             }
+         }
+ 
+         @Override
+         public double loss(double p, double y) {
+             double e = y - p;
+             if (e > 0.d) {
+                 return tau * e;
+             } else {
+                 return -(1.d - tau) * e;
+             }
+         }
+ 
+         @Override
+         public float dloss(float p, float y) {
+             float e = y - p;
+             if (e == 0.f) {
+                 return 0.f;
+             }
+             return (e > 0.f) ? -tau : (1.f - tau);
+         }
+ 
+     }
+ 
+     /**
+      * Epsilon-Insensitive loss used by Support Vector Regression (SVR).
+      * <code>loss = max(0, |y - p| - epsilon)</code>
+      */
+     public static final class EpsilonInsensitiveLoss extends RegressionLoss {
+ 
+         private float epsilon;
+ 
+         public EpsilonInsensitiveLoss() {
+             this(0.1f);
+         }
+ 
+         public EpsilonInsensitiveLoss(float epsilon) {
+             this.epsilon = epsilon;
+         }
+ 
+         public void setEpsilon(float epsilon) {
+             this.epsilon = epsilon;
+         }
+ 
+         @Override
+         public float loss(float p, float y) {
+             float loss = Math.abs(y - p) - epsilon;
+             return (loss > 0.f) ? loss : 0.f;
+         }
+ 
+         @Override
+         public double loss(double p, double y) {
+             double loss = Math.abs(y - p) - epsilon;
+             return (loss > 0.d) ? loss : 0.d;
+         }
+ 
+         @Override
+         public float dloss(float p, float y) {
+             if ((y - p) > epsilon) {// real value > predicted value - epsilon
+                 return -1.f;
+             }
+             if ((p - y) > epsilon) {// real value < predicted value - epsilon
+                 return 1.f;
+             }
+             return 0.f;
+         }
+ 
+     }
+ 
+     public static float logisticLoss(final float target, final float predicted) {
+         if (predicted > -100.d) {
+             return target - (float) MathUtils.sigmoid(predicted);
+         } else {
+             return target;
+         }
+     }
+ 
+     public static float logLoss(final float p, final float y) {
+         BinaryLoss.checkTarget(y);
+ 
+         final float z = y * p;
+         if (z > 18.f) {
+             return (float) Math.exp(-z);
+         }
+         if (z < -18.f) {
+             return -z;
+         }
+         return (float) Math.log(1.d + Math.exp(-z));
+     }
+ 
+     public static double logLoss(final double p, final double y) {
+         BinaryLoss.checkTarget(y);
+ 
+         final double z = y * p;
+         if (z > 18.d) {
+             return Math.exp(-z);
+         }
+         if (z < -18.d) {
+             return -z;
+         }
+         return Math.log(1.d + Math.exp(-z));
+     }
+ 
+     public static float squaredLoss(float p, float y) {
+         final float z = p - y;
+         return z * z * 0.5f;
+     }
+ 
+     public static double squaredLoss(double p, double y) {
+         final double z = p - y;
+         return z * z * 0.5d;
+     }
+ 
+     public static float hingeLoss(final float p, final float y, final float threshold) {
+         BinaryLoss.checkTarget(y);
+ 
+         float z = y * p;
+         return threshold - z;
+     }
+ 
+     public static double hingeLoss(final double p, final double y, final double threshold) {
+         BinaryLoss.checkTarget(y);
+ 
+         double z = y * p;
+         return threshold - z;
+     }
+ 
+     public static float hingeLoss(float p, float y) {
+         return hingeLoss(p, y, 1.f);
+     }
+ 
+     public static double hingeLoss(double p, double y) {
+         return hingeLoss(p, y, 1.d);
+     }
+ 
+     public static float squaredHingeLoss(final float p, final float y) {
+         BinaryLoss.checkTarget(y);
+ 
+         float z = y * p;
+         float d = 1.f - z;
+         return (d > 0.f) ? (d * d) : 0.f;
+     }
+ 
+     public static double squaredHingeLoss(final double p, final double y) {
+         BinaryLoss.checkTarget(y);
+ 
+         double z = y * p;
+         double d = 1.d - z;
+         return (d > 0.d) ? d * d : 0.d;
+     }
+ 
+     /**
+      * Math.abs(target - predicted) - epsilon
+      */
+     public static float epsilonInsensitiveLoss(float predicted, float target, float epsilon) {
+         return Math.abs(target - predicted) - epsilon;
+     }
+ }

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/regression/AROWRegressionUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/regression/AdaDeltaUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/regression/AdaGradUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/regression/LogressUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/regression/PassiveAggressiveRegressionUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/main/java/hivemall/regression/RegressionBaseUDTF.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/core/src/test/java/hivemall/model/NewSpaceEfficientNewDenseModelTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/hivemall/model/NewSpaceEfficientNewDenseModelTest.java
index 0000000,dd9c4ec..c892071
mode 000000,100644..100644
--- a/core/src/test/java/hivemall/model/NewSpaceEfficientNewDenseModelTest.java
+++ b/core/src/test/java/hivemall/model/NewSpaceEfficientNewDenseModelTest.java
@@@ -1,0 -1,60 +1,60 @@@
+ /*
 - * Hivemall: Hive scalable Machine Learning Library
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
+  *
 - * Copyright (C) 2015 Makoto YUI
 - * Copyright (C) 2013-2015 National Institute of Advanced Industrial Science and Technology (AIST)
++ *   http://www.apache.org/licenses/LICENSE-2.0
+  *
 - * Licensed under the Apache License, Version 2.0 (the "License");
 - * you may not use this file except in compliance with the License.
 - * You may obtain a copy of the License at
 - *
 - *         http://www.apache.org/licenses/LICENSE-2.0
 - *
 - * Unless required by applicable law or agreed to in writing, software
 - * distributed under the License is distributed on an "AS IS" BASIS,
 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 - * See the License for the specific language governing permissions and
 - * limitations under the License.
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied.  See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
+  */
+ package hivemall.model;
+ 
+ import static org.junit.Assert.assertEquals;
+ import hivemall.utils.collections.IMapIterator;
+ import hivemall.utils.lang.HalfFloat;
+ 
+ import java.util.Random;
+ 
+ import org.junit.Test;
+ 
+ public class NewSpaceEfficientNewDenseModelTest {
+ 
+     @Test
+     public void testGetSet() {
+         final int size = 1 << 12;
+ 
+         final NewSpaceEfficientDenseModel model1 = new NewSpaceEfficientDenseModel(size);
+         //model1.configureClock();
+         final NewDenseModel model2 = new NewDenseModel(size);
+         //model2.configureClock();
+ 
+         final Random rand = new Random();
+         for (int t = 0; t < 1000; t++) {
+             int i = rand.nextInt(size);
+             float f = HalfFloat.MAX_FLOAT * rand.nextFloat();
+             IWeightValue w = new WeightValue(f);
+             model1.set(i, w);
+             model2.set(i, w);
+         }
+ 
+         assertEquals(model2.size(), model1.size());
+ 
+         IMapIterator<Integer, IWeightValue> itor = model1.entries();
+         while (itor.next() != -1) {
+             int k = itor.getKey();
+             float expected = itor.getValue().get();
+             float actual = model2.getWeight(k);
+             assertEquals(expected, actual, 32f);
+         }
+     }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/mixserv/src/test/java/hivemall/mix/server/MixServerTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/resources/ddl/define-all-as-permanent.hive
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/resources/ddl/define-all.hive
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-hivemall/blob/05766432/spark/spark-2.0/src/test/scala/hivemall/mix/server/MixServerSuite.scala
----------------------------------------------------------------------
diff --cc spark/spark-2.0/src/test/scala/hivemall/mix/server/MixServerSuite.scala
index dbb818b,c0ee72f..3d53bec
--- a/spark/spark-2.0/src/test/scala/hivemall/mix/server/MixServerSuite.scala
+++ b/spark/spark-2.0/src/test/scala/hivemall/mix/server/MixServerSuite.scala
@@@ -19,13 -17,16 +19,15 @@@
  package hivemall.mix.server
  
  import java.util.Random
 -import java.util.concurrent.{TimeUnit, ExecutorService, Executors}
 +import java.util.concurrent.{Executors, ExecutorService, TimeUnit}
  import java.util.logging.Logger
  
+ import org.scalatest.{BeforeAndAfter, FunSuite}
+ 
 -import hivemall.model.{NewDenseModel, PredictionModel, WeightValue}
  import hivemall.mix.MixMessage.MixEventName
  import hivemall.mix.client.MixClient
  import hivemall.mix.server.MixServer.ServerState
- import hivemall.model.{DenseModel, PredictionModel, WeightValue}
++import hivemall.model.{NewDenseModel, PredictionModel, WeightValue}
  import hivemall.utils.io.IOUtils
  import hivemall.utils.lang.CommandLineUtils
  import hivemall.utils.net.NetUtils