You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hama.apache.org by ed...@apache.org on 2015/11/24 00:47:59 UTC

[3/6] hama git commit: HAMA-961: Remove ann package

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/ann/AbstractLayeredNeuralNetwork.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/ann/AbstractLayeredNeuralNetwork.java b/ml/src/main/java/org/apache/hama/ml/ann/AbstractLayeredNeuralNetwork.java
deleted file mode 100644
index eaa1c72..0000000
--- a/ml/src/main/java/org/apache/hama/ml/ann/AbstractLayeredNeuralNetwork.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.ann;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hama.commons.math.DoubleDoubleFunction;
-import org.apache.hama.commons.math.DoubleFunction;
-import org.apache.hama.commons.math.DoubleMatrix;
-import org.apache.hama.commons.math.DoubleVector;
-import org.apache.hama.commons.math.FunctionFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * AbstractLayeredNeuralNetwork defines the general operations for derivative
- * layered models, include Linear Regression, Logistic Regression, Multilayer
- * Perceptron, Autoencoder, and Restricted Boltzmann Machine, etc.
- * 
- * In general, these models consist of neurons which are aligned in layers.
- * Between layers, for any two adjacent layers, the neurons are connected to
- * form a bipartite weighted graph.
- * 
- */
-abstract class AbstractLayeredNeuralNetwork extends NeuralNetwork {
-
-  private static final double DEFAULT_REGULARIZATION_WEIGHT = 0;
-  private static final double DEFAULT_MOMENTUM_WEIGHT = 0.1;
-
-  double trainingError;
-
-  /* The weight of regularization */
-  protected double regularizationWeight;
-
-  /* The momentumWeight */
-  protected double momentumWeight;
-
-  /* The cost function of the model */
-  protected DoubleDoubleFunction costFunction;
-
-  /* Record the size of each layer */
-  protected List<Integer> layerSizeList;
-
-  protected TrainingMethod trainingMethod;
-  
-  protected LearningStyle learningStyle;
-
-  public static enum TrainingMethod {
-    GRADIENT_DESCENT
-  }
-  
-  public static enum LearningStyle {
-    UNSUPERVISED,
-    SUPERVISED
-  }
-  
-  public AbstractLayeredNeuralNetwork() {
-    this.regularizationWeight = DEFAULT_REGULARIZATION_WEIGHT;
-    this.momentumWeight = DEFAULT_MOMENTUM_WEIGHT;
-    this.trainingMethod = TrainingMethod.GRADIENT_DESCENT;
-    this.learningStyle = LearningStyle.SUPERVISED;
-  }
-
-  public AbstractLayeredNeuralNetwork(String modelPath) {
-    super(modelPath);
-  }
-
-  /**
-   * Set the regularization weight. Recommend in the range [0, 0.1), More
-   * complex the model is, less weight the regularization is.
-   * 
-   * @param regularizationWeight
-   */
-  public void setRegularizationWeight(double regularizationWeight) {
-    Preconditions.checkArgument(regularizationWeight >= 0
-        && regularizationWeight < 1.0,
-        "Regularization weight must be in range [0, 1.0)");
-    this.regularizationWeight = regularizationWeight;
-  }
-
-  public double getRegularizationWeight() {
-    return this.regularizationWeight;
-  }
-
-  /**
-   * Set the momemtum weight for the model. Recommend in range [0, 0.5].
-   * 
-   * @param momentumWeight
-   */
-  public void setMomemtumWeight(double momentumWeight) {
-    Preconditions.checkArgument(momentumWeight >= 0 && momentumWeight <= 1.0,
-        "Momentum weight must be in range [0, 1.0]");
-    this.momentumWeight = momentumWeight;
-  }
-
-  public double getMomemtumWeight() {
-    return this.momentumWeight;
-  }
-
-  public void setTrainingMethod(TrainingMethod method) {
-    this.trainingMethod = method;
-  }
-
-  public TrainingMethod getTrainingMethod() {
-    return this.trainingMethod;
-  }
-  
-  public void setLearningStyle(LearningStyle style) {
-    this.learningStyle = style;
-  }
-  
-  public LearningStyle getLearningStyle() {
-    return this.learningStyle;
-  }
-
-  /**
-   * Set the cost function for the model.
-   * 
-   * @param costFunction
-   */
-  public void setCostFunction(DoubleDoubleFunction costFunction) {
-    this.costFunction = costFunction;
-  }
-
-  /**
-   * Add a layer of neurons with specified size. If the added layer is not the
-   * first layer, it will automatically connects the neurons between with the
-   * previous layer.
-   * 
-   * @param size
-   * @param isFinalLayer If false, add a bias neuron.
-   * @param squashingFunction The squashing function for this layer, input layer
-   *          is f(x) = x by default.
-   * @return The layer index, starts with 0.
-   */
-  public abstract int addLayer(int size, boolean isFinalLayer,
-      DoubleFunction squashingFunction);
-
-  /**
-   * Get the size of a particular layer.
-   * 
-   * @param layer
-   * @return The layer size.
-   */
-  public int getLayerSize(int layer) {
-    Preconditions.checkArgument(
-        layer >= 0 && layer < this.layerSizeList.size(),
-        String.format("Input must be in range [0, %d]\n",
-            this.layerSizeList.size() - 1));
-    return this.layerSizeList.get(layer);
-  }
-
-  /**
-   * Get the layer size list.
-   * 
-   * @return The layer size list.
-   */
-  protected List<Integer> getLayerSizeList() {
-    return this.layerSizeList;
-  }
-
-  /**
-   * Get the weights between layer layerIdx and layerIdx + 1
-   * 
-   * @param layerIdx The index of the layer
-   * @return The weights in form of {@link DoubleMatrix}
-   */
-  public abstract DoubleMatrix getWeightsByLayer(int layerIdx);
-
-  /**
-   * Get the updated weights using one training instance.
-   * 
-   * @param trainingInstance The trainingInstance is the concatenation of
-   *          feature vector and class label vector.
-   * @return The update of each weight, in form of matrix list.
-   * @throws Exception
-   */
-  public abstract DoubleMatrix[] trainByInstance(DoubleVector trainingInstance);
-
-  /**
-   * Get the output calculated by the model.
-   * 
-   * @param instance The feature instance.
-   * @return a new vector with the result of the operation.
-   */
-  public abstract DoubleVector getOutput(DoubleVector instance);
-
-  /**
-   * Calculate the training error based on the labels and outputs.
-   * 
-   * @param labels
-   * @param output
-   */
-  protected abstract void calculateTrainingError(DoubleVector labels,
-      DoubleVector output);
-
-  @Override
-  public void readFields(DataInput input) throws IOException {
-    super.readFields(input);
-    // read regularization weight
-    this.regularizationWeight = input.readDouble();
-    // read momentum weight
-    this.momentumWeight = input.readDouble();
-
-    // read cost function
-    this.costFunction = FunctionFactory
-        .createDoubleDoubleFunction(WritableUtils.readString(input));
-
-    // read layer size list
-    int numLayers = input.readInt();
-    this.layerSizeList = Lists.newArrayList();
-    for (int i = 0; i < numLayers; ++i) {
-      this.layerSizeList.add(input.readInt());
-    }
-
-    this.trainingMethod = WritableUtils.readEnum(input, TrainingMethod.class);
-    this.learningStyle = WritableUtils.readEnum(input, LearningStyle.class);
-  }
-
-  @Override
-  public void write(DataOutput output) throws IOException {
-    super.write(output);
-    // write regularization weight
-    output.writeDouble(this.regularizationWeight);
-    // write momentum weight
-    output.writeDouble(this.momentumWeight);
-
-    // write cost function
-    WritableUtils.writeString(output, costFunction.getFunctionName());
-
-    // write layer size list
-    output.writeInt(this.layerSizeList.size());
-    for (Integer aLayerSizeList : this.layerSizeList) {
-      output.writeInt(aLayerSizeList);
-    }
-
-    WritableUtils.writeEnum(output, this.trainingMethod);
-    WritableUtils.writeEnum(output, this.learningStyle);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/ann/AutoEncoder.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/ann/AutoEncoder.java b/ml/src/main/java/org/apache/hama/ml/ann/AutoEncoder.java
deleted file mode 100644
index d591f42..0000000
--- a/ml/src/main/java/org/apache/hama/ml/ann/AutoEncoder.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hama.ml.ann;
-
-import java.util.Map;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hama.commons.math.DenseDoubleVector;
-import org.apache.hama.commons.math.DoubleFunction;
-import org.apache.hama.commons.math.DoubleMatrix;
-import org.apache.hama.commons.math.DoubleVector;
-import org.apache.hama.commons.math.FunctionFactory;
-import org.apache.hama.ml.ann.AbstractLayeredNeuralNetwork.LearningStyle;
-import org.apache.hama.ml.util.FeatureTransformer;
-
-import com.google.common.base.Preconditions;
-
-/**
- * AutoEncoder is a model used for dimensional reduction and feature learning.
- * It is a special kind of {@link NeuralNetwork} that consists of three layers
- * of neurons, where the first layer and third layer contains the same number of
- * neurons.
- * 
- */
-public class AutoEncoder {
-
-  private final SmallLayeredNeuralNetwork model;
-
-  /**
-   * Initialize the autoencoder.
-   * 
-   * @param inputDimensions The number of dimensions for the input feature.
-   * @param compressedDimensions The number of dimensions for the compressed
-   *          information.
-   */
-  public AutoEncoder(int inputDimensions, int compressedDimensions) {
-    model = new SmallLayeredNeuralNetwork();
-    model.addLayer(inputDimensions, false,
-        FunctionFactory.createDoubleFunction("Sigmoid"));
-    model.addLayer(compressedDimensions, false,
-        FunctionFactory.createDoubleFunction("Sigmoid"));
-    model.addLayer(inputDimensions, true,
-        FunctionFactory.createDoubleFunction("Sigmoid"));
-    model.setLearningStyle(LearningStyle.UNSUPERVISED);
-    model.setCostFunction(FunctionFactory
-        .createDoubleDoubleFunction("SquaredError"));
-  }
-
-  public AutoEncoder(String modelPath) {
-    model = new SmallLayeredNeuralNetwork(modelPath);
-  }
-
-  public AutoEncoder setLearningRate(double learningRate) {
-    model.setLearningRate(learningRate);
-    return this;
-  }
-
-  public AutoEncoder setMomemtumWeight(double momentumWeight) {
-    model.setMomemtumWeight(momentumWeight);
-    return this;
-  }
-
-  public AutoEncoder setRegularizationWeight(double regularizationWeight) {
-    model.setRegularizationWeight(regularizationWeight);
-    return this;
-  }
-  
-  public AutoEncoder setModelPath(String modelPath) {
-    model.setModelPath(modelPath);
-    return this;
-  }
-
-  /**
-   * Train the autoencoder with given data. Note that the training data is
-   * pre-processed, where the features
-   * 
-   * @param dataInputPath
-   * @param trainingParams
-   */
-  public void train(Path dataInputPath, Map<String, String> trainingParams) {
-    model.train(dataInputPath, trainingParams);
-  }
-
-  /**
-   * Train the model with one instance.
-   * 
-   * @param trainingInstance
-   */
-  public void trainOnline(DoubleVector trainingInstance) {
-    model.trainOnline(trainingInstance);
-  }
-
-  /**
-   * Get the matrix M used to encode the input features.
-   * 
-   * @return this matrix with encode the input.
-   */
-  public DoubleMatrix getEncodeWeightMatrix() {
-    return model.getWeightsByLayer(0);
-  }
-
-  /**
-   * Get the matrix M used to decode the compressed information.
-   * 
-   * @return this matrix with decode the compressed information.
-   */
-  public DoubleMatrix getDecodeWeightMatrix() {
-    return model.getWeightsByLayer(1);
-  }
-
-  /**
-   * Transform the input features.
-   * 
-   * @param inputInstance
-   * @return The compressed information.
-   */
-  private DoubleVector transform(DoubleVector inputInstance, int inputLayer) {
-    DoubleVector internalInstance = new DenseDoubleVector(inputInstance.getDimension() + 1);
-    internalInstance.set(0, 1);
-    for (int i = 0; i < inputInstance.getDimension(); ++i) {
-      internalInstance.set(i + 1, inputInstance.get(i));
-    }
-    DoubleFunction squashingFunction = model
-        .getSquashingFunction(inputLayer);
-    DoubleMatrix weightMatrix = null;
-    if (inputLayer == 0) {
-      weightMatrix = this.getEncodeWeightMatrix();
-    } else {
-      weightMatrix = this.getDecodeWeightMatrix();
-    }
-    DoubleVector vec = weightMatrix.multiplyVectorUnsafe(internalInstance);
-    vec = vec.applyToElements(squashingFunction);
-    return vec;
-  }
-
-  /**
-   * Encode the input instance.
-   * @param inputInstance
-   * @return a new vector with the encode input instance.
-   */
-  public DoubleVector encode(DoubleVector inputInstance) {
-    Preconditions
-        .checkArgument(
-            inputInstance.getDimension() == model.getLayerSize(0) - 1,
-            String.format("The dimension of input instance is %d, but the model requires dimension %d.",
-                    inputInstance.getDimension(), model.getLayerSize(1) - 1));
-    return this.transform(inputInstance, 0);
-  }
-
-  /**
-   * Decode the input instance.
-   * @param inputInstance
-   * @return a new vector with the decode input instance.
-   */
-  public DoubleVector decode(DoubleVector inputInstance) {
-    Preconditions
-        .checkArgument(
-            inputInstance.getDimension() == model.getLayerSize(1) - 1,
-            String.format("The dimension of input instance is %d, but the model requires dimension %d.",
-                    inputInstance.getDimension(), model.getLayerSize(1) - 1));
-    return this.transform(inputInstance, 1);
-  }
-  
-  /**
-   * Get the label(s) according to the given features.
-   * @param inputInstance
-   * @return a new vector with output of the model according to given feature instance.
-   */
-  public DoubleVector getOutput(DoubleVector inputInstance) {
-    return model.getOutput(inputInstance);
-  }
-  
-  /**
-   * Set the feature transformer.
-   * @param featureTransformer
-   */
-  public void setFeatureTransformer(FeatureTransformer featureTransformer) {
-    this.model.setFeatureTransformer(featureTransformer);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/ann/NeuralNetwork.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/ann/NeuralNetwork.java b/ml/src/main/java/org/apache/hama/ml/ann/NeuralNetwork.java
deleted file mode 100644
index 64de418..0000000
--- a/ml/src/main/java/org/apache/hama/ml/ann/NeuralNetwork.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.ann;
-
-import com.google.common.base.Preconditions;
-import com.google.common.io.Closeables;
-import org.apache.commons.lang.SerializationUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hama.ml.util.DefaultFeatureTransformer;
-import org.apache.hama.ml.util.FeatureTransformer;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Map;
-
-/**
- * NeuralNetwork defines the general operations for all the derivative models.
- * Typically, all derivative models such as Linear Regression, Logistic
- * Regression, and Multilayer Perceptron consist of neurons and the weights
- * between neurons.
- * 
- */
-abstract class NeuralNetwork implements Writable {
-
-  private static final double DEFAULT_LEARNING_RATE = 0.5;
-
-  protected double learningRate;
-  protected boolean learningRateDecay = false;
-
-  // the name of the model
-  protected String modelType;
-  // the path to store the model
-  protected String modelPath;
-
-  protected FeatureTransformer featureTransformer;
-
-  public NeuralNetwork() {
-    this.learningRate = DEFAULT_LEARNING_RATE;
-    this.modelType = this.getClass().getSimpleName();
-    this.featureTransformer = new DefaultFeatureTransformer();
-  }
-
-  public NeuralNetwork(String modelPath) {
-    try {
-      this.modelPath = modelPath;
-      this.readFromModel();
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
-  }
-
-  /**
-   * Set the degree of aggression during model training, a large learning rate
-   * can increase the training speed, but it also decrease the chance of model
-   * converge. Recommend in range (0, 0.3).
-   * 
-   * @param learningRate
-   */
-  public void setLearningRate(double learningRate) {
-    Preconditions.checkArgument(learningRate > 0,
-        "Learning rate must be larger than 0.");
-    this.learningRate = learningRate;
-  }
-
-  public double getLearningRate() {
-    return this.learningRate;
-  }
-
-  public void isLearningRateDecay(boolean decay) {
-    this.learningRateDecay = decay;
-  }
-
-  public String getModelType() {
-    return this.modelType;
-  }
-
-  /**
-   * Train the model with the path of given training data and parameters.
-   * 
-   * @param dataInputPath The path of the training data.
-   * @param trainingParams The parameters for training.
-   * @throws IOException
-   */
-  public void train(Path dataInputPath, Map<String, String> trainingParams) {
-    Preconditions.checkArgument(this.modelPath != null,
-        "Please set the model path before training.");
-    // train with BSP job
-    try {
-      trainInternal(dataInputPath, trainingParams);
-      // write the trained model back to model path
-      this.readFromModel();
-    } catch (IOException e) {
-      e.printStackTrace();
-    } catch (InterruptedException e) {
-      e.printStackTrace();
-    } catch (ClassNotFoundException e) {
-      e.printStackTrace();
-    }
-  }
-
-  /**
-   * Train the model with the path of given training data and parameters.
-   * 
-   * @param dataInputPath
-   * @param trainingParams
-   */
-  protected abstract void trainInternal(Path dataInputPath,
-      Map<String, String> trainingParams) throws IOException,
-      InterruptedException, ClassNotFoundException;
-
-  /**
-   * Read the model meta-data from the specified location.
-   * 
-   * @throws IOException
-   */
-  protected void readFromModel() throws IOException {
-    Preconditions.checkArgument(this.modelPath != null,
-        "Model path has not been set.");
-    Configuration conf = new Configuration();
-    FSDataInputStream is = null;
-    try {
-      URI uri = new URI(this.modelPath);
-      FileSystem fs = FileSystem.get(uri, conf);
-      is = new FSDataInputStream(fs.open(new Path(modelPath)));
-      this.readFields(is);
-    } catch (URISyntaxException e) {
-      e.printStackTrace();
-    } finally {
-      Closeables.close(is, false);
-    }
-  }
-
-  /**
-   * Write the model data to specified location.
-   * 
-   * @throws IOException
-   */
-  public void writeModelToFile() throws IOException {
-    Preconditions.checkArgument(this.modelPath != null,
-        "Model path has not been set.");
-    Configuration conf = new Configuration();
-    FSDataOutputStream is = null;
-    try {
-      URI uri = new URI(this.modelPath);
-      FileSystem fs = FileSystem.get(uri, conf);
-      is = fs.create(new Path(this.modelPath), true);
-      this.write(is);
-    } catch (URISyntaxException e) {
-      e.printStackTrace();
-    }
-
-    Closeables.close(is, false);
-  }
-
-  /**
-   * Set the model path.
-   * 
-   * @param modelPath
-   */
-  public void setModelPath(String modelPath) {
-    this.modelPath = modelPath;
-  }
-
-  /**
-   * Get the model path.
-   * 
-   * @return the path to store the model.
-   */
-  public String getModelPath() {
-    return this.modelPath;
-  }
-
-  @SuppressWarnings({ "rawtypes", "unchecked" })
-  @Override
-  public void readFields(DataInput input) throws IOException {
-    // read model type
-    this.modelType = WritableUtils.readString(input);
-    // read learning rate
-    this.learningRate = input.readDouble();
-    // read model path
-    this.modelPath = WritableUtils.readString(input);
-
-    if (this.modelPath.equals("null")) {
-      this.modelPath = null;
-    }
-
-    // read feature transformer
-    int bytesLen = input.readInt();
-    byte[] featureTransformerBytes = new byte[bytesLen];
-    for (int i = 0; i < featureTransformerBytes.length; ++i) {
-      featureTransformerBytes[i] = input.readByte();
-    }
-
-    Class<? extends FeatureTransformer> featureTransformerCls = (Class<? extends FeatureTransformer>) SerializationUtils
-        .deserialize(featureTransformerBytes);
-
-    Constructor[] constructors = featureTransformerCls
-        .getDeclaredConstructors();
-    Constructor constructor = constructors[0];
-
-    try {
-      this.featureTransformer = (FeatureTransformer) constructor
-          .newInstance(new Object[] {});
-    } catch (InstantiationException e) {
-      e.printStackTrace();
-    } catch (IllegalAccessException e) {
-      e.printStackTrace();
-    } catch (IllegalArgumentException e) {
-      e.printStackTrace();
-    } catch (InvocationTargetException e) {
-      e.printStackTrace();
-    }
-  }
-
-  @Override
-  public void write(DataOutput output) throws IOException {
-    // write model type
-    WritableUtils.writeString(output, modelType);
-    // write learning rate
-    output.writeDouble(learningRate);
-    // write model path
-    if (this.modelPath != null) {
-      WritableUtils.writeString(output, modelPath);
-    } else {
-      WritableUtils.writeString(output, "null");
-    }
-
-    // serialize the class
-    Class<? extends FeatureTransformer> featureTransformerCls = this.featureTransformer
-        .getClass();
-    byte[] featureTransformerBytes = SerializationUtils
-        .serialize(featureTransformerCls);
-    output.writeInt(featureTransformerBytes.length);
-    output.write(featureTransformerBytes);
-  }
-
-  public void setFeatureTransformer(FeatureTransformer featureTransformer) {
-    this.featureTransformer = featureTransformer;
-  }
-
-  public FeatureTransformer getFeatureTransformer() {
-    return this.featureTransformer;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/ann/NeuralNetworkTrainer.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/ann/NeuralNetworkTrainer.java b/ml/src/main/java/org/apache/hama/ml/ann/NeuralNetworkTrainer.java
deleted file mode 100644
index d1e43b9..0000000
--- a/ml/src/main/java/org/apache/hama/ml/ann/NeuralNetworkTrainer.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.ann;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hama.bsp.BSP;
-import org.apache.hama.bsp.BSPPeer;
-import org.apache.hama.bsp.sync.SyncException;
-import org.apache.hama.commons.io.VectorWritable;
-import org.apache.hama.ml.perception.MLPMessage;
-import org.apache.hama.ml.util.DefaultFeatureTransformer;
-import org.apache.hama.ml.util.FeatureTransformer;
-
-/**
- * The trainer that is used to train the {@link SmallLayeredNeuralNetwork} with
- * BSP. The trainer would read the training data and obtain the trained
- * parameters of the model.
- * 
- */
-public abstract class NeuralNetworkTrainer extends
-    BSP<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> {
-
-  protected static final Log LOG = LogFactory
-      .getLog(NeuralNetworkTrainer.class);
-
-  protected Configuration conf;
-  protected int maxIteration;
-  protected int batchSize;
-  protected String trainingMode;
-  
-  protected FeatureTransformer featureTransformer;
-  
-  @Override
-  final public void setup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException, SyncException, InterruptedException {
-    conf = peer.getConfiguration();
-    featureTransformer = new DefaultFeatureTransformer();
-    this.extraSetup(peer);
-  }
-
-  /**
-   * Handle extra setup for sub-classes.
-   * 
-   * @param peer
-   * @throws IOException
-   * @throws SyncException
-   * @throws InterruptedException
-   */
-  protected void extraSetup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException, SyncException, InterruptedException {
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public abstract void bsp(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException, SyncException, InterruptedException;
-
-  @Override
-  public void cleanup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException {
-    this.extraCleanup(peer);
-    // write model to modelPath
-  }
-
-  /**
-   * Handle cleanup for sub-classes. Write the trained model back.
-   * 
-   * @param peer
-   * @throws IOException
-   * @throws SyncException
-   * @throws InterruptedException
-   */
-  protected void extraCleanup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException {
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetwork.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetwork.java b/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetwork.java
deleted file mode 100644
index fdda61f..0000000
--- a/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetwork.java
+++ /dev/null
@@ -1,567 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.ann;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.lang.math.RandomUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hama.HamaConfiguration;
-import org.apache.hama.bsp.BSPJob;
-import org.apache.hama.commons.io.MatrixWritable;
-import org.apache.hama.commons.io.VectorWritable;
-import org.apache.hama.commons.math.DenseDoubleMatrix;
-import org.apache.hama.commons.math.DenseDoubleVector;
-import org.apache.hama.commons.math.DoubleFunction;
-import org.apache.hama.commons.math.DoubleMatrix;
-import org.apache.hama.commons.math.DoubleVector;
-import org.apache.hama.commons.math.FunctionFactory;
-import org.mortbay.log.Log;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * SmallLayeredNeuralNetwork defines the general operations for derivative
- * layered models, include Linear Regression, Logistic Regression, Multilayer
- * Perceptron, Autoencoder, and Restricted Boltzmann Machine, etc. For
- * SmallLayeredNeuralNetwork, the training can be conducted in parallel, but the
- * parameters of the models are assumes to be stored in a single machine.
- * 
- * In general, these models consist of neurons which are aligned in layers.
- * Between layers, for any two adjacent layers, the neurons are connected to
- * form a bipartite weighted graph.
- * 
- */
-public class SmallLayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
-
-  /* Weights between neurons at adjacent layers */
-  protected List<DoubleMatrix> weightMatrixList;
-
-  /* Previous weight updates between neurons at adjacent layers */
-  protected List<DoubleMatrix> prevWeightUpdatesList;
-
-  /* Different layers can have different squashing function */
-  protected List<DoubleFunction> squashingFunctionList;
-
-  protected int finalLayerIdx;
-
-  public SmallLayeredNeuralNetwork() {
-    this.layerSizeList = Lists.newArrayList();
-    this.weightMatrixList = Lists.newArrayList();
-    this.prevWeightUpdatesList = Lists.newArrayList();
-    this.squashingFunctionList = Lists.newArrayList();
-  }
-
-  public SmallLayeredNeuralNetwork(String modelPath) {
-    super(modelPath);
-  }
-
-  @Override
-  /**
-   * {@inheritDoc}
-   */
-  public int addLayer(int size, boolean isFinalLayer,
-      DoubleFunction squashingFunction) {
-    Preconditions.checkArgument(size > 0,
-        "Size of layer must be larger than 0.");
-    if (!isFinalLayer) {
-      size += 1;
-    }
-
-    this.layerSizeList.add(size);
-    int layerIdx = this.layerSizeList.size() - 1;
-    if (isFinalLayer) {
-      this.finalLayerIdx = layerIdx;
-    }
-
-    // add weights between current layer and previous layer, and input layer has
-    // no squashing function
-    if (layerIdx > 0) {
-      int sizePrevLayer = this.layerSizeList.get(layerIdx - 1);
-      // row count equals to size of current size and column count equals to
-      // size of previous layer
-      int row = isFinalLayer ? size : size - 1;
-      int col = sizePrevLayer;
-      DoubleMatrix weightMatrix = new DenseDoubleMatrix(row, col);
-      // initialize weights
-      weightMatrix.applyToElements(new DoubleFunction() {
-        @Override
-        public double apply(double value) {
-          return RandomUtils.nextDouble() - 0.5;
-        }
-
-        @Override
-        public double applyDerivative(double value) {
-          throw new UnsupportedOperationException("");
-        }
-      });
-      this.weightMatrixList.add(weightMatrix);
-      this.prevWeightUpdatesList.add(new DenseDoubleMatrix(row, col));
-      this.squashingFunctionList.add(squashingFunction);
-    }
-    return layerIdx;
-  }
-
-  /**
-   * Update the weight matrices with given matrices.
-   * 
-   * @param matrices
-   */
-  public void updateWeightMatrices(DoubleMatrix[] matrices) {
-    for (int i = 0; i < matrices.length; ++i) {
-      DoubleMatrix matrix = this.weightMatrixList.get(i);
-      this.weightMatrixList.set(i, matrix.add(matrices[i]));
-    }
-  }
-
-  /**
-   * Set the previous weight matrices.
-   * @param prevUpdates
-   */
-  void setPrevWeightMatrices(DoubleMatrix[] prevUpdates) {
-    this.prevWeightUpdatesList.clear();
-    Collections.addAll(this.prevWeightUpdatesList, prevUpdates);
-  }
-
-  /**
-   * Add a batch of matrices onto the given destination matrices.
-   * 
-   * @param destMatrices
-   * @param sourceMatrices
-   */
-  static void matricesAdd(DoubleMatrix[] destMatrices,
-      DoubleMatrix[] sourceMatrices) {
-    for (int i = 0; i < destMatrices.length; ++i) {
-      destMatrices[i] = destMatrices[i].add(sourceMatrices[i]);
-    }
-  }
-
-  /**
-   * Get all the weight matrices.
-   * 
-   * @return The matrices in form of matrix array.
-   */
-  DoubleMatrix[] getWeightMatrices() {
-    DoubleMatrix[] matrices = new DoubleMatrix[this.weightMatrixList.size()];
-    this.weightMatrixList.toArray(matrices);
-    return matrices;
-  }
-
-  /**
-   * Set the weight matrices.
-   * 
-   * @param matrices
-   */
-  public void setWeightMatrices(DoubleMatrix[] matrices) {
-    this.weightMatrixList = new ArrayList<DoubleMatrix>();
-    Collections.addAll(this.weightMatrixList, matrices);
-  }
-
-  /**
-   * Get the previous matrices updates in form of array.
-   * 
-   * @return The matrices in form of matrix array.
-   */
-  public DoubleMatrix[] getPrevMatricesUpdates() {
-    DoubleMatrix[] prevMatricesUpdates = new DoubleMatrix[this.prevWeightUpdatesList
-        .size()];
-    for (int i = 0; i < this.prevWeightUpdatesList.size(); ++i) {
-      prevMatricesUpdates[i] = this.prevWeightUpdatesList.get(i);
-    }
-    return prevMatricesUpdates;
-  }
-
-  public void setWeightMatrix(int index, DoubleMatrix matrix) {
-    Preconditions.checkArgument(
-        0 <= index && index < this.weightMatrixList.size(), String.format(
-            "index [%d] should be in range[%d, %d].", index, 0,
-            this.weightMatrixList.size()));
-    this.weightMatrixList.set(index, matrix);
-  }
-
-  @Override
-  public void readFields(DataInput input) throws IOException {
-    super.readFields(input);
-
-    // read squash functions
-    int squashingFunctionSize = input.readInt();
-    this.squashingFunctionList = Lists.newArrayList();
-    for (int i = 0; i < squashingFunctionSize; ++i) {
-      this.squashingFunctionList.add(FunctionFactory
-          .createDoubleFunction(WritableUtils.readString(input)));
-    }
-
-    // read weights and construct matrices of previous updates
-    int numOfMatrices = input.readInt();
-    this.weightMatrixList = Lists.newArrayList();
-    this.prevWeightUpdatesList = Lists.newArrayList();
-    for (int i = 0; i < numOfMatrices; ++i) {
-      DoubleMatrix matrix = MatrixWritable.read(input);
-      this.weightMatrixList.add(matrix);
-      this.prevWeightUpdatesList.add(new DenseDoubleMatrix(
-          matrix.getRowCount(), matrix.getColumnCount()));
-    }
-
-  }
-
-  @Override
-  public void write(DataOutput output) throws IOException {
-    super.write(output);
-
-    // write squashing functions
-    output.writeInt(this.squashingFunctionList.size());
-    for (DoubleFunction aSquashingFunctionList : this.squashingFunctionList) {
-      WritableUtils.writeString(output, aSquashingFunctionList
-              .getFunctionName());
-    }
-
-    // write weight matrices
-    output.writeInt(this.weightMatrixList.size());
-    for (DoubleMatrix aWeightMatrixList : this.weightMatrixList) {
-      MatrixWritable.write(aWeightMatrixList, output);
-    }
-
-    // DO NOT WRITE WEIGHT UPDATE
-  }
-
-  @Override
-  public DoubleMatrix getWeightsByLayer(int layerIdx) {
-    return this.weightMatrixList.get(layerIdx);
-  }
-
-  /**
-   * Get the output of the model according to given feature instance.
-   */
-  @Override
-  public DoubleVector getOutput(DoubleVector instance) {
-    Preconditions.checkArgument(this.layerSizeList.get(0) - 1 == instance
-        .getDimension(), String.format(
-        "The dimension of input instance should be %d.",
-        this.layerSizeList.get(0) - 1));
-    // transform the features to another space
-    DoubleVector transformedInstance = this.featureTransformer
-        .transform(instance);
-    // add bias feature
-    DoubleVector instanceWithBias = new DenseDoubleVector(
-        transformedInstance.getDimension() + 1);
-    instanceWithBias.set(0, 0.99999); // set bias to be a little bit less than
-                                      // 1.0
-    for (int i = 1; i < instanceWithBias.getDimension(); ++i) {
-      instanceWithBias.set(i, transformedInstance.get(i - 1));
-    }
-
-    List<DoubleVector> outputCache = getOutputInternal(instanceWithBias);
-    // return the output of the last layer
-    DoubleVector result = outputCache.get(outputCache.size() - 1);
-    // remove bias
-    return result.sliceUnsafe(1, result.getDimension() - 1);
-  }
-
-  /**
-   * Calculate output internally, the intermediate output of each layer will be
-   * stored.
-   * 
-   * @param instanceWithBias The instance contains the features.
-   * @return Cached output of each layer.
-   */
-  public List<DoubleVector> getOutputInternal(DoubleVector instanceWithBias) {
-    List<DoubleVector> outputCache = new ArrayList<DoubleVector>();
-    // fill with instance
-    DoubleVector intermediateOutput = instanceWithBias;
-    outputCache.add(intermediateOutput);
-
-    for (int i = 0; i < this.layerSizeList.size() - 1; ++i) {
-      intermediateOutput = forward(i, intermediateOutput);
-      outputCache.add(intermediateOutput);
-    }
-    return outputCache;
-  }
-
-  /**
-   * Forward the calculation for one layer.
-   * 
-   * @param fromLayer The index of the previous layer.
-   * @param intermediateOutput The intermediateOutput of previous layer.
-   * @return a new vector with the result of the operation.
-   */
-  protected DoubleVector forward(int fromLayer, DoubleVector intermediateOutput) {
-    DoubleMatrix weightMatrix = this.weightMatrixList.get(fromLayer);
-
-    DoubleVector vec = weightMatrix.multiplyVectorUnsafe(intermediateOutput);
-    vec = vec.applyToElements(this.squashingFunctionList.get(fromLayer));
-
-    // add bias
-    DoubleVector vecWithBias = new DenseDoubleVector(vec.getDimension() + 1);
-    vecWithBias.set(0, 1);
-    for (int i = 0; i < vec.getDimension(); ++i) {
-      vecWithBias.set(i + 1, vec.get(i));
-    }
-    return vecWithBias;
-  }
-
-  /**
-   * Train the model online.
-   * 
-   * @param trainingInstance
-   */
-  public void trainOnline(DoubleVector trainingInstance) {
-    DoubleMatrix[] updateMatrices = this.trainByInstance(trainingInstance);
-    this.updateWeightMatrices(updateMatrices);
-  }
-
-  @Override
-  public DoubleMatrix[] trainByInstance(DoubleVector trainingInstance) {
-    DoubleVector transformedVector = this.featureTransformer
-        .transform(trainingInstance.sliceUnsafe(this.layerSizeList.get(0) - 1));
-
-    int inputDimension = this.layerSizeList.get(0) - 1;
-    int outputDimension;
-    DoubleVector inputInstance = null;
-    DoubleVector labels = null;
-    if (this.learningStyle == LearningStyle.SUPERVISED) {
-      outputDimension = this.layerSizeList.get(this.layerSizeList.size() - 1);
-      // validate training instance
-      Preconditions.checkArgument(
-          inputDimension + outputDimension == trainingInstance.getDimension(),
-          String
-              .format(
-                  "The dimension of training instance is %d, but requires %d.",
-                  trainingInstance.getDimension(), inputDimension
-                      + outputDimension));
-
-      inputInstance = new DenseDoubleVector(this.layerSizeList.get(0));
-      inputInstance.set(0, 1); // add bias
-      // get the features from the transformed vector
-      for (int i = 0; i < inputDimension; ++i) {
-        inputInstance.set(i + 1, transformedVector.get(i));
-      }
-      // get the labels from the original training instance
-      labels = trainingInstance.sliceUnsafe(inputInstance.getDimension() - 1,
-          trainingInstance.getDimension() - 1);
-    } else if (this.learningStyle == LearningStyle.UNSUPERVISED) {
-      // labels are identical to input features
-      outputDimension = inputDimension;
-      // validate training instance
-      Preconditions.checkArgument(inputDimension == trainingInstance
-          .getDimension(), String.format(
-          "The dimension of training instance is %d, but requires %d.",
-          trainingInstance.getDimension(), inputDimension));
-
-      inputInstance = new DenseDoubleVector(this.layerSizeList.get(0));
-      inputInstance.set(0, 1); // add bias
-      // get the features from the transformed vector
-      for (int i = 0; i < inputDimension; ++i) {
-        inputInstance.set(i + 1, transformedVector.get(i));
-      }
-      // get the labels by copying the transformed vector
-      labels = transformedVector.deepCopy();
-    }
-
-    List<DoubleVector> internalResults = this.getOutputInternal(inputInstance);
-    DoubleVector output = internalResults.get(internalResults.size() - 1);
-
-    // get the training error
-    calculateTrainingError(labels,
-        output.deepCopy().sliceUnsafe(1, output.getDimension() - 1));
-
-    if (this.trainingMethod.equals(TrainingMethod.GRADIENT_DESCENT)) {
-      return this.trainByInstanceGradientDescent(labels, internalResults);
-    } else {
-      throw new IllegalArgumentException(
-          String.format("Training method is not supported."));
-    }
-  }
-
-  /**
-   * Train by gradient descent. Get the updated weights using one training
-   * instance.
-   * 
-   * @param trainingInstance
-   * @return The weight update matrices.
-   */
-  private DoubleMatrix[] trainByInstanceGradientDescent(DoubleVector labels,
-      List<DoubleVector> internalResults) {
-
-    DoubleVector output = internalResults.get(internalResults.size() - 1);
-    // initialize weight update matrices
-    DenseDoubleMatrix[] weightUpdateMatrices = new DenseDoubleMatrix[this.weightMatrixList
-        .size()];
-    for (int m = 0; m < weightUpdateMatrices.length; ++m) {
-      weightUpdateMatrices[m] = new DenseDoubleMatrix(this.weightMatrixList
-          .get(m).getRowCount(), this.weightMatrixList.get(m).getColumnCount());
-    }
-    DoubleVector deltaVec = new DenseDoubleVector(
-        this.layerSizeList.get(this.layerSizeList.size() - 1));
-
-    DoubleFunction squashingFunction = this.squashingFunctionList
-        .get(this.squashingFunctionList.size() - 1);
-
-    DoubleMatrix lastWeightMatrix = this.weightMatrixList
-        .get(this.weightMatrixList.size() - 1);
-    for (int i = 0; i < deltaVec.getDimension(); ++i) {
-      double costFuncDerivative = this.costFunction.applyDerivative(
-          labels.get(i), output.get(i + 1));
-      // add regularization
-      costFuncDerivative += this.regularizationWeight
-          * lastWeightMatrix.getRowVector(i).sum();
-      deltaVec.set(
-          i,
-          costFuncDerivative
-              * squashingFunction.applyDerivative(output.get(i + 1)));
-    }
-
-    // start from previous layer of output layer
-    for (int layer = this.layerSizeList.size() - 2; layer >= 0; --layer) {
-      output = internalResults.get(layer);
-      deltaVec = backpropagate(layer, deltaVec, internalResults,
-          weightUpdateMatrices[layer]);
-    }
-
-    this.setPrevWeightMatrices(weightUpdateMatrices);
-
-    return weightUpdateMatrices;
-  }
-
-  /**
-   * Back-propagate the errors to from next layer to current layer. The weight
-   * updated information will be stored in the weightUpdateMatrices, and the
-   * delta of the prevLayer would be returned.
-   * 
-   * @param layer Index of current layer.
-   * @param internalOutput Internal output of current layer.
-   * @param deltaVec Delta of next layer.
-   * @return the squashing function of the specified position.
-   */
-  private DoubleVector backpropagate(int curLayerIdx,
-      DoubleVector nextLayerDelta, List<DoubleVector> outputCache,
-      DenseDoubleMatrix weightUpdateMatrix) {
-
-    // get layer related information
-    DoubleFunction squashingFunction = this.squashingFunctionList
-        .get(curLayerIdx);
-    DoubleVector curLayerOutput = outputCache.get(curLayerIdx);
-    DoubleMatrix weightMatrix = this.weightMatrixList.get(curLayerIdx);
-    DoubleMatrix prevWeightMatrix = this.prevWeightUpdatesList.get(curLayerIdx);
-
-    // next layer is not output layer, remove the delta of bias neuron
-    if (curLayerIdx != this.layerSizeList.size() - 2) {
-      nextLayerDelta = nextLayerDelta.slice(1,
-          nextLayerDelta.getDimension() - 1);
-    }
-
-    DoubleVector delta = weightMatrix.transpose()
-        .multiplyVector(nextLayerDelta);
-    for (int i = 0; i < delta.getDimension(); ++i) {
-      delta.set(
-          i,
-          delta.get(i)
-              * squashingFunction.applyDerivative(curLayerOutput.get(i)));
-    }
-
-    // update weights
-    for (int i = 0; i < weightUpdateMatrix.getRowCount(); ++i) {
-      for (int j = 0; j < weightUpdateMatrix.getColumnCount(); ++j) {
-        weightUpdateMatrix.set(i, j,
-            -learningRate * nextLayerDelta.get(i) * curLayerOutput.get(j)
-                + this.momentumWeight * prevWeightMatrix.get(i, j));
-      }
-    }
-
-    return delta;
-  }
-
-  @Override
-  protected void trainInternal(Path dataInputPath,
-      Map<String, String> trainingParams) throws IOException,
-      InterruptedException, ClassNotFoundException {
-    // add all training parameters to configuration
-    Configuration conf = new Configuration();
-    for (Map.Entry<String, String> entry : trainingParams.entrySet()) {
-      conf.set(entry.getKey(), entry.getValue());
-    }
-
-    // if training parameters contains the model path, update the model path
-    String modelPath = trainingParams.get("modelPath");
-    if (modelPath != null) {
-      this.modelPath = modelPath;
-    }
-    // modelPath must be set before training
-    if (this.modelPath == null) {
-      throw new IllegalArgumentException(
-          "Please specify the modelPath for model, "
-              + "either through setModelPath() or add 'modelPath' to the training parameters.");
-    }
-
-    conf.set("modelPath", this.modelPath);
-    this.writeModelToFile();
-
-    HamaConfiguration hamaConf = new HamaConfiguration(conf);
-
-    // create job
-    BSPJob job = new BSPJob(hamaConf, SmallLayeredNeuralNetworkTrainer.class);
-    job.setJobName("Small scale Neural Network training");
-    job.setJarByClass(SmallLayeredNeuralNetworkTrainer.class);
-    job.setBspClass(SmallLayeredNeuralNetworkTrainer.class);
-    job.setInputPath(dataInputPath);
-    job.setInputFormat(org.apache.hama.bsp.SequenceFileInputFormat.class);
-    job.setInputKeyClass(LongWritable.class);
-    job.setInputValueClass(VectorWritable.class);
-    job.setOutputKeyClass(NullWritable.class);
-    job.setOutputValueClass(NullWritable.class);
-    job.setOutputFormat(org.apache.hama.bsp.NullOutputFormat.class);
-
-    int numTasks = conf.getInt("tasks", 1);
-    Log.info(String.format("Number of tasks: %d\n", numTasks));
-    job.setNumBspTask(numTasks);
-    job.waitForCompletion(true);
-
-    // reload learned model
-    Log.info(String.format("Reload model from %s.", this.modelPath));
-    this.readFromModel();
-
-  }
-
-  @Override
-  protected void calculateTrainingError(DoubleVector labels, DoubleVector output) {
-    DoubleVector errors = labels.deepCopy().applyToElements(output,
-        this.costFunction);
-    this.trainingError = errors.sum();
-  }
-
-  /**
-   * Get the squashing function of a specified layer.
-   * 
-   * @param idx
-   * @return a new vector with the result of the operation.
-   */
-  public DoubleFunction getSquashingFunction(int idx) {
-    return this.squashingFunctionList.get(idx);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetworkMessage.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetworkMessage.java b/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetworkMessage.java
deleted file mode 100644
index f941614..0000000
--- a/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetworkMessage.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.ann;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hama.commons.io.MatrixWritable;
-import org.apache.hama.commons.math.DenseDoubleMatrix;
-import org.apache.hama.commons.math.DoubleMatrix;
-
-/**
- * NeuralNetworkMessage transmits the messages between peers during the training
- * of neural networks.
- * 
- */
-public class SmallLayeredNeuralNetworkMessage implements Writable {
-
-  protected double trainingError;
-  protected DoubleMatrix[] curMatrices;
-  protected DoubleMatrix[] prevMatrices;
-  protected boolean converge;
-
-  public SmallLayeredNeuralNetworkMessage() {
-  }
-  
-  public SmallLayeredNeuralNetworkMessage(double trainingError,
-      boolean converge, DoubleMatrix[] weightMatrices,
-      DoubleMatrix[] prevMatrices) {
-    this.trainingError = trainingError;
-    this.converge = converge;
-    this.curMatrices = weightMatrices;
-    this.prevMatrices = prevMatrices;
-  }
-
-  @Override
-  public void readFields(DataInput input) throws IOException {
-    trainingError = input.readDouble();
-    converge = input.readBoolean();
-    int numMatrices = input.readInt();
-    boolean hasPrevMatrices = input.readBoolean();
-    curMatrices = new DenseDoubleMatrix[numMatrices];
-    // read matrice updates
-    for (int i = 0; i < curMatrices.length; ++i) {
-      curMatrices[i] = (DenseDoubleMatrix) MatrixWritable.read(input);
-    }
-
-    if (hasPrevMatrices) {
-      prevMatrices = new DenseDoubleMatrix[numMatrices];
-      // read previous matrices updates
-      for (int i = 0; i < prevMatrices.length; ++i) {
-        prevMatrices[i] = (DenseDoubleMatrix) MatrixWritable.read(input);
-      }
-    }
-  }
-
-  @Override
-  public void write(DataOutput output) throws IOException {
-    output.writeDouble(trainingError);
-    output.writeBoolean(converge);
-    output.writeInt(curMatrices.length);
-    if (prevMatrices == null) {
-      output.writeBoolean(false);
-    } else {
-      output.writeBoolean(true);
-    }
-    for (DoubleMatrix matrix : curMatrices) {
-      MatrixWritable.write(matrix, output);
-    }
-    if (prevMatrices != null) {
-      for (DoubleMatrix matrix : prevMatrices) {
-        MatrixWritable.write(matrix, output);
-      }
-    }
-  }
-
-  public double getTrainingError() {
-    return trainingError;
-  }
-
-  public void setTrainingError(double trainingError) {
-    this.trainingError = trainingError;
-  }
-
-  public boolean isConverge() {
-    return converge;
-  }
-
-  public void setConverge(boolean converge) {
-    this.converge = converge;
-  }
-
-  public DoubleMatrix[] getCurMatrices() {
-    return curMatrices;
-  }
-
-  public void setMatrices(DoubleMatrix[] curMatrices) {
-    this.curMatrices = curMatrices;
-  }
-
-  public DoubleMatrix[] getPrevMatrices() {
-    return prevMatrices;
-  }
-
-  public void setPrevMatrices(DoubleMatrix[] prevMatrices) {
-    this.prevMatrices = prevMatrices;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetworkTrainer.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetworkTrainer.java b/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetworkTrainer.java
deleted file mode 100644
index 326b7a1..0000000
--- a/ml/src/main/java/org/apache/hama/ml/ann/SmallLayeredNeuralNetworkTrainer.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.ann;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hama.bsp.BSP;
-import org.apache.hama.bsp.BSPPeer;
-import org.apache.hama.bsp.sync.SyncException;
-import org.apache.hama.commons.io.VectorWritable;
-import org.apache.hama.commons.math.DenseDoubleMatrix;
-import org.apache.hama.commons.math.DoubleMatrix;
-import org.apache.hama.commons.math.DoubleVector;
-import org.mortbay.log.Log;
-
-/**
- * The trainer that train the {@link SmallLayeredNeuralNetwork} based on BSP
- * framework.
- * 
- */
-public final class SmallLayeredNeuralNetworkTrainer
-    extends
-    BSP<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> {
-
-  private SmallLayeredNeuralNetwork inMemoryModel;
-  private Configuration conf;
-  /* Default batch size */
-  private int batchSize;
-
-  /* check the interval between intervals */
-  private double prevAvgTrainingError;
-  private double curAvgTrainingError;
-  private long convergenceCheckInterval;
-  private long iterations;
-  private long maxIterations;
-  private boolean isConverge;
-
-  private String modelPath;
-
-  @Override
-  /**
-   * If the model path is specified, load the existing from storage location.
-   */
-  public void setup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer) {
-    if (peer.getPeerIndex() == 0) {
-      Log.info("Begin to train");
-    }
-    this.isConverge = false;
-    this.conf = peer.getConfiguration();
-    this.iterations = 0;
-    this.modelPath = conf.get("modelPath");
-    this.maxIterations = conf.getLong("training.max.iterations", 100000);
-    this.convergenceCheckInterval = conf.getLong("convergence.check.interval",
-        2000);
-    this.modelPath = conf.get("modelPath");
-    this.inMemoryModel = new SmallLayeredNeuralNetwork(modelPath);
-    this.prevAvgTrainingError = Integer.MAX_VALUE;
-    this.batchSize = conf.getInt("training.batch.size", 50);
-  }
-
-  @Override
-  /**
-   * Write the trained model back to stored location.
-   */
-  public void cleanup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer) {
-    // write model to modelPath
-    if (peer.getPeerIndex() == 0) {
-      try {
-        Log.info(String.format("End of training, number of iterations: %d.\n",
-            this.iterations));
-        Log.info(String.format("Write model back to %s\n",
-            inMemoryModel.getModelPath()));
-        this.inMemoryModel.writeModelToFile();
-      } catch (IOException e) {
-        e.printStackTrace();
-      }
-    }
-  }
-
-  @Override
-  public void bsp(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer)
-      throws IOException, SyncException, InterruptedException {
-    while (this.iterations++ < maxIterations) {
-      // each groom calculate the matrices updates according to local data
-      calculateUpdates(peer);
-      peer.sync();
-
-      // master merge the updates model
-      if (peer.getPeerIndex() == 0) {
-        mergeUpdates(peer);
-      }
-      peer.sync();
-      if (this.isConverge) {
-        break;
-      }
-    }
-  }
-
-  /**
-   * Calculate the matrices updates according to local partition of data.
-   * 
-   * @param peer
-   * @throws IOException
-   */
-  private void calculateUpdates(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer)
-      throws IOException {
-    // receive update information from master
-    if (peer.getNumCurrentMessages() != 0) {
-      SmallLayeredNeuralNetworkMessage inMessage = peer.getCurrentMessage();
-      DoubleMatrix[] newWeights = inMessage.getCurMatrices();
-      DoubleMatrix[] preWeightUpdates = inMessage.getPrevMatrices();
-      this.inMemoryModel.setWeightMatrices(newWeights);
-      this.inMemoryModel.setPrevWeightMatrices(preWeightUpdates);
-      this.isConverge = inMessage.isConverge();
-      // check converge
-      if (isConverge) {
-        return;
-      }
-    }
-
-    DoubleMatrix[] weightUpdates = new DoubleMatrix[this.inMemoryModel.weightMatrixList
-        .size()];
-    for (int i = 0; i < weightUpdates.length; ++i) {
-      int row = this.inMemoryModel.weightMatrixList.get(i).getRowCount();
-      int col = this.inMemoryModel.weightMatrixList.get(i).getColumnCount();
-      weightUpdates[i] = new DenseDoubleMatrix(row, col);
-    }
-
-    // continue to train
-    double avgTrainingError = 0.0;
-    LongWritable key = new LongWritable();
-    VectorWritable value = new VectorWritable();
-    for (int recordsRead = 0; recordsRead < batchSize; ++recordsRead) {
-      if (!peer.readNext(key, value)) {
-        peer.reopenInput();
-        peer.readNext(key, value);
-      }
-      DoubleVector trainingInstance = value.getVector();
-      SmallLayeredNeuralNetwork.matricesAdd(weightUpdates,
-          this.inMemoryModel.trainByInstance(trainingInstance));
-      avgTrainingError += this.inMemoryModel.trainingError;
-    }
-    avgTrainingError /= batchSize;
-
-    // calculate the average of updates
-    for (int i = 0; i < weightUpdates.length; ++i) {
-      weightUpdates[i] = weightUpdates[i].divide(batchSize);
-    }
-
-    DoubleMatrix[] prevWeightUpdates = this.inMemoryModel
-        .getPrevMatricesUpdates();
-    SmallLayeredNeuralNetworkMessage outMessage = new SmallLayeredNeuralNetworkMessage(
-        avgTrainingError, false, weightUpdates, prevWeightUpdates);
-    peer.send(peer.getPeerName(0), outMessage);
-  }
-
-  /**
-   * Merge the updates according to the updates of the grooms.
-   * 
-   * @param peer
-   * @throws IOException
-   */
-  private void mergeUpdates(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, SmallLayeredNeuralNetworkMessage> peer)
-      throws IOException {
-    int numMessages = peer.getNumCurrentMessages();
-    boolean isConverge = false;
-    if (numMessages == 0) { // converges
-      isConverge = true;
-      return;
-    }
-
-    double avgTrainingError = 0;
-    DoubleMatrix[] matricesUpdates = null;
-    DoubleMatrix[] prevMatricesUpdates = null;
-
-    while (peer.getNumCurrentMessages() > 0) {
-      SmallLayeredNeuralNetworkMessage message = peer.getCurrentMessage();
-      if (matricesUpdates == null) {
-        matricesUpdates = message.getCurMatrices();
-        prevMatricesUpdates = message.getPrevMatrices();
-      } else {
-        SmallLayeredNeuralNetwork.matricesAdd(matricesUpdates,
-            message.getCurMatrices());
-        SmallLayeredNeuralNetwork.matricesAdd(prevMatricesUpdates,
-            message.getPrevMatrices());
-      }
-      avgTrainingError += message.getTrainingError();
-    }
-
-    if (numMessages != 1) {
-      avgTrainingError /= numMessages;
-      for (int i = 0; i < matricesUpdates.length; ++i) {
-        matricesUpdates[i] = matricesUpdates[i].divide(numMessages);
-        prevMatricesUpdates[i] = prevMatricesUpdates[i].divide(numMessages);
-      }
-    }
-    this.inMemoryModel.updateWeightMatrices(matricesUpdates);
-    this.inMemoryModel.setPrevWeightMatrices(prevMatricesUpdates);
-
-    // check convergence
-    if (iterations % convergenceCheckInterval == 0) {
-      if (prevAvgTrainingError < curAvgTrainingError) {
-        // error cannot decrease any more
-        isConverge = true;
-      }
-      // update
-      prevAvgTrainingError = curAvgTrainingError;
-      curAvgTrainingError = 0;
-    }
-    curAvgTrainingError += avgTrainingError / convergenceCheckInterval;
-
-    // broadcast updated weight matrices
-    for (String peerName : peer.getAllPeerNames()) {
-      SmallLayeredNeuralNetworkMessage msg = new SmallLayeredNeuralNetworkMessage(
-          0, isConverge, this.inMemoryModel.getWeightMatrices(),
-          this.inMemoryModel.getPrevMatricesUpdates());
-      peer.send(peerName, msg);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/perception/MLPMessage.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/perception/MLPMessage.java b/ml/src/main/java/org/apache/hama/ml/perception/MLPMessage.java
deleted file mode 100644
index a4a1a99..0000000
--- a/ml/src/main/java/org/apache/hama/ml/perception/MLPMessage.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.perception;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * MLPMessage is used to hold the parameters that needs to be sent between the
- * tasks.
- */
-public abstract class MLPMessage implements Writable {
-  protected boolean terminated;
-
-  public MLPMessage() {
-  }
-  
-  public MLPMessage(boolean terminated) {
-    setTerminated(terminated);
-  }
-
-
-  public void setTerminated(boolean terminated) {
-    this.terminated = terminated;
-  }
-
-  public boolean isTerminated() {
-    return terminated;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/perception/MultiLayerPerceptron.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/perception/MultiLayerPerceptron.java b/ml/src/main/java/org/apache/hama/ml/perception/MultiLayerPerceptron.java
deleted file mode 100644
index 8901549..0000000
--- a/ml/src/main/java/org/apache/hama/ml/perception/MultiLayerPerceptron.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.perception;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hama.commons.math.DoubleDoubleFunction;
-import org.apache.hama.commons.math.DoubleFunction;
-import org.apache.hama.commons.math.DoubleVector;
-import org.apache.hama.commons.math.FunctionFactory;
-import org.apache.hama.ml.ann.NeuralNetworkTrainer;
-import org.apache.hama.ml.util.DefaultFeatureTransformer;
-import org.apache.hama.ml.util.FeatureTransformer;
-
-/**
- * PerceptronBase defines the common behavior of all the concrete perceptrons.
- */
-public abstract class MultiLayerPerceptron {
-
-  /* The trainer for the model */
-  protected NeuralNetworkTrainer trainer;
-  /* The file path that contains the model meta-data */
-  protected String modelPath;
-
-  /* Model meta-data */
-  protected String MLPType;
-  protected double learningRate;
-  protected double regularization;
-  protected double momentum;
-  protected int numberOfLayers;
-  protected String squashingFunctionName;
-  protected String costFunctionName;
-  protected int[] layerSizeArray;
-
-  protected DoubleDoubleFunction costFunction;
-  protected DoubleFunction squashingFunction;
-
-  // transform the original features to new space
-  protected FeatureTransformer featureTransformer;
-
-  /**
-   * Initialize the MLP.
-   * 
-   * @param learningRate Larger learningRate makes MLP learn more aggressive.
-   *          Learning rate cannot be negative.
-   * @param regularization Regularization makes MLP less likely to overfit. The
-   *          value of regularization cannot be negative or too large, otherwise
-   *          it will affect the precision.
-   * @param momentum The momentum makes the historical adjust have affect to
-   *          current adjust. The weight of momentum cannot be negative.
-   * @param squashingFunctionName The name of squashing function.
-   * @param costFunctionName The name of the cost function.
-   * @param layerSizeArray The number of neurons for each layer. Note that the
-   *          actual size of each layer is one more than the input size.
-   */
-  public MultiLayerPerceptron(double learningRate, double regularization,
-      double momentum, String squashingFunctionName, String costFunctionName,
-      int[] layerSizeArray) {
-    this.MLPType = getTypeName();
-    if (learningRate <= 0) {
-      throw new IllegalStateException("learning rate cannot be negative.");
-    }
-    this.learningRate = learningRate;
-    if (regularization < 0 || regularization >= 0.5) {
-      throw new IllegalStateException(
-          "regularization weight must be in range (0, 0.5).");
-    }
-    this.regularization = regularization; // no regularization
-    if (momentum < 0) {
-      throw new IllegalStateException("momentum weight cannot be negative.");
-    }
-    this.momentum = momentum; // no momentum
-    this.squashingFunctionName = squashingFunctionName;
-    this.costFunctionName = costFunctionName;
-    this.layerSizeArray = layerSizeArray;
-    this.numberOfLayers = this.layerSizeArray.length;
-
-    this.costFunction = FunctionFactory
-        .createDoubleDoubleFunction(this.costFunctionName);
-    this.squashingFunction = FunctionFactory
-        .createDoubleFunction(this.squashingFunctionName);
-
-    this.featureTransformer = new DefaultFeatureTransformer();
-  }
-
-  /**
-   * Initialize a multi-layer perceptron with existing model.
-   * 
-   * @param modelPath Location of existing model meta-data.
-   */
-  public MultiLayerPerceptron(String modelPath) {
-    this.modelPath = modelPath;
-  }
-
-  /**
-   * Train the model with given data. This method invokes a perceptron training
-   * BSP task to train the model. It then write the model to modelPath.
-   * 
-   * @param dataInputPath The path of the data.
-   * @param trainingParams Extra parameters for training.
-   */
-  public abstract void train(Path dataInputPath,
-      Map<String, String> trainingParams) throws Exception;
-
-  /**
-   * Get the output based on the input instance and the learned model.
-   * 
-   * @param featureVector The feature of an instance to feed the perceptron.
-   * @return The results.
-   */
-  public DoubleVector output(DoubleVector featureVector) {
-    return this.outputWrapper(this.featureTransformer.transform(featureVector));
-  }
-
-  public abstract DoubleVector outputWrapper(DoubleVector featureVector);
-
-  /**
-   * Use the class name as the type name.
-   */
-  protected abstract String getTypeName();
-
-  /**
-   * Read the model meta-data from the specified location.
-   * 
-   * @throws IOException
-   */
-  protected abstract void readFromModel() throws IOException;
-
-  /**
-   * Write the model data to specified location.
-   * 
-   * @param modelPath The location in file system to store the model.
-   * @throws IOException
-   */
-  public abstract void writeModelToFile(String modelPath) throws IOException;
-
-  public String getModelPath() {
-    return modelPath;
-  }
-
-  public String getMLPType() {
-    return MLPType;
-  }
-
-  public double getLearningRate() {
-    return learningRate;
-  }
-
-  public double isRegularization() {
-    return regularization;
-  }
-
-  public double getMomentum() {
-    return momentum;
-  }
-
-  public int getNumberOfLayers() {
-    return numberOfLayers;
-  }
-
-  public String getSquashingFunctionName() {
-    return squashingFunctionName;
-  }
-
-  public String getCostFunctionName() {
-    return costFunctionName;
-  }
-
-  public int[] getLayerSizeArray() {
-    return layerSizeArray;
-  }
-
-  /**
-   * Set the feature transformer.
-   * 
-   * @param featureTransformer
-   */
-  public void setFeatureTransformer(FeatureTransformer featureTransformer) {
-    this.featureTransformer = featureTransformer;
-  }
-  
-  public FeatureTransformer getFeatureTransformer() {
-    return this.featureTransformer;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/perception/PerceptronTrainer.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/perception/PerceptronTrainer.java b/ml/src/main/java/org/apache/hama/ml/perception/PerceptronTrainer.java
deleted file mode 100644
index 0baf132..0000000
--- a/ml/src/main/java/org/apache/hama/ml/perception/PerceptronTrainer.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.perception;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hama.bsp.BSP;
-import org.apache.hama.bsp.BSPPeer;
-import org.apache.hama.bsp.sync.SyncException;
-import org.apache.hama.commons.io.VectorWritable;
-
-/**
- * The trainer that is used to train the perceptron with BSP. The trainer would
- * read the training data and obtain the trained parameters of the model.
- * 
- */
-public abstract class PerceptronTrainer extends
-    BSP<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> {
-
-  protected Configuration conf;
-  protected int maxIteration;
-  protected int batchSize;
-  protected String trainingMode;
-
-  @Override
-  public void setup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException, SyncException, InterruptedException {
-    conf = peer.getConfiguration();
-    trainingMode = conf.get("training.mode");
-    batchSize = conf.getInt("training.batch.size", 100); // mini-batch by
-                                                         // default
-    this.extraSetup(peer);
-  }
-
-  /**
-   * Handle extra setup for sub-classes.
-   * 
-   * @param peer
-   * @throws IOException
-   * @throws SyncException
-   * @throws InterruptedException
-   */
-  protected void extraSetup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException, SyncException, InterruptedException {
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public abstract void bsp(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException, SyncException, InterruptedException;
-
-  @Override
-  public void cleanup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException {
-
-    this.extraCleanup(peer);
-  }
-
-  /**
-   * Handle extra cleanup for sub-classes.
-   * 
-   * @param peer
-   * @throws IOException
-   * @throws SyncException
-   * @throws InterruptedException
-   */
-  protected void extraCleanup(
-      BSPPeer<LongWritable, VectorWritable, NullWritable, NullWritable, MLPMessage> peer)
-      throws IOException {
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hama/blob/33041c09/ml/src/main/java/org/apache/hama/ml/perception/SmallMLPMessage.java
----------------------------------------------------------------------
diff --git a/ml/src/main/java/org/apache/hama/ml/perception/SmallMLPMessage.java b/ml/src/main/java/org/apache/hama/ml/perception/SmallMLPMessage.java
deleted file mode 100644
index 5504cf9..0000000
--- a/ml/src/main/java/org/apache/hama/ml/perception/SmallMLPMessage.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hama.ml.perception;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hama.commons.io.MatrixWritable;
-import org.apache.hama.commons.math.DenseDoubleMatrix;
-
-/**
- * SmallMLPMessage is used to exchange information for the
- * {@link SmallMultiLayerPerceptron}. It send the whole parameter matrix from
- * one task to another.
- */
-public class SmallMLPMessage extends MLPMessage {
-
-  private int owner; // the ID of the task who creates the message
-  private int numOfUpdatedMatrices;
-  private DenseDoubleMatrix[] weightUpdatedMatrices;
-  private int numOfPrevUpdatedMatrices;
-  private DenseDoubleMatrix[] prevWeightUpdatedMatrices;
-
-  public SmallMLPMessage() {
-    super();
-  }
-  
-  /**
-   * When slave send message to master, use this constructor.
-   * 
-   * @param owner The owner that create the message
-   * @param terminated Whether the training is terminated for the owner task
-   * @param weightUpdatedMatrics The weight updates
-   */
-  public SmallMLPMessage(int owner, boolean terminated,
-      DenseDoubleMatrix[] weightUpdatedMatrics) {
-    super(terminated);
-    this.owner = owner;
-    this.weightUpdatedMatrices = weightUpdatedMatrics;
-    this.numOfUpdatedMatrices = this.weightUpdatedMatrices == null ? 0
-        : this.weightUpdatedMatrices.length;
-    this.numOfPrevUpdatedMatrices = 0;
-    this.prevWeightUpdatedMatrices = null;
-  }
-
-  /**
-   * When master send message to slave, use this constructor.
-   * 
-   * @param owner The owner that create the message
-   * @param terminated Whether the training is terminated for the owner task
-   * @param weightUpdatedMatrices The weight updates
-   * @param prevWeightUpdatedMatrices
-   */
-  public SmallMLPMessage(int owner, boolean terminated,
-      DenseDoubleMatrix[] weightUpdatedMatrices,
-      DenseDoubleMatrix[] prevWeightUpdatedMatrices) {
-    this(owner, terminated, weightUpdatedMatrices);
-    this.prevWeightUpdatedMatrices = prevWeightUpdatedMatrices;
-    this.numOfPrevUpdatedMatrices = this.prevWeightUpdatedMatrices == null ? 0
-        : this.prevWeightUpdatedMatrices.length;
-  }
-
-  /**
-   * Get the owner task Id of the message.
-   * 
-   * @return the owner value.
-   */
-  public int getOwner() {
-    return owner;
-  }
-
-  /**
-   * Get the updated weight matrices.
-   * 
-   * @return the array value of dense double matrix object.
-   */
-  public DenseDoubleMatrix[] getWeightUpdatedMatrices() {
-    return this.weightUpdatedMatrices;
-  }
-
-  public DenseDoubleMatrix[] getPrevWeightsUpdatedMatrices() {
-    return this.prevWeightUpdatedMatrices;
-  }
-
-  @Override
-  public void readFields(DataInput input) throws IOException {
-    this.owner = input.readInt();
-    this.terminated = input.readBoolean();
-    this.numOfUpdatedMatrices = input.readInt();
-    this.weightUpdatedMatrices = new DenseDoubleMatrix[this.numOfUpdatedMatrices];
-    for (int i = 0; i < this.numOfUpdatedMatrices; ++i) {
-      this.weightUpdatedMatrices[i] = (DenseDoubleMatrix) MatrixWritable
-          .read(input);
-    }
-    this.numOfPrevUpdatedMatrices = input.readInt();
-    this.prevWeightUpdatedMatrices = new DenseDoubleMatrix[this.numOfPrevUpdatedMatrices];
-    for (int i = 0; i < this.numOfPrevUpdatedMatrices; ++i) {
-      this.prevWeightUpdatedMatrices[i] = (DenseDoubleMatrix) MatrixWritable
-          .read(input);
-    }
-  }
-
-  @Override
-  public void write(DataOutput output) throws IOException {
-    output.writeInt(this.owner);
-    output.writeBoolean(this.terminated);
-    output.writeInt(this.numOfUpdatedMatrices);
-    for (int i = 0; i < this.numOfUpdatedMatrices; ++i) {
-      MatrixWritable.write(this.weightUpdatedMatrices[i], output);
-    }
-    output.writeInt(this.numOfPrevUpdatedMatrices);
-    for (int i = 0; i < this.numOfPrevUpdatedMatrices; ++i) {
-      MatrixWritable.write(this.prevWeightUpdatedMatrices[i], output);
-    }
-  }
-
-}