You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@horn.apache.org by ed...@apache.org on 2016/06/26 10:25:02 UTC

incubator-horn git commit: HORN-21: Add dropout neuron

Repository: incubator-horn
Updated Branches:
  refs/heads/master af88df41b -> d88a785b6


HORN-21: Add dropout neuron


Project: http://git-wip-us.apache.org/repos/asf/incubator-horn/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-horn/commit/d88a785b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-horn/tree/d88a785b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-horn/diff/d88a785b

Branch: refs/heads/master
Commit: d88a785b6140594660817ff3c3dccff3181c8e90
Parents: af88df4
Author: Edward J. Yoon <ed...@apache.org>
Authored: Fri May 27 20:03:39 2016 +0900
Committer: Edward J. Yoon <ed...@apache.org>
Committed: Mon Jun 20 20:46:29 2016 +0900

----------------------------------------------------------------------
 README.md                                       |  17 +-
 conf/log4j.properties                           |   3 +-
 .../horn/core/AbstractLayeredNeuralNetwork.java |   2 +-
 src/main/java/org/apache/horn/core/HornJob.java |  47 ++--
 .../apache/horn/core/LayeredNeuralNetwork.java  | 230 +++++++++++++++----
 .../horn/core/LayeredNeuralNetworkTrainer.java  |  13 +-
 src/main/java/org/apache/horn/core/Neuron.java  |  70 ++++--
 src/main/java/org/apache/horn/core/Synapse.java |  31 ++-
 .../org/apache/horn/examples/DropoutNeuron.java |  73 ++++++
 .../horn/examples/MultiLayerPerceptron.java     |  37 +--
 .../horn/funcs/CategoricalCrossEntropy.java     |  10 +-
 src/main/java/org/apache/horn/funcs/ReLU.java   |   6 +-
 .../java/org/apache/horn/funcs/SoftMax.java     |   5 +-
 .../org/apache/horn/utils/MNISTConverter.java   |   7 +-
 .../org/apache/horn/utils/MNISTEvaluator.java   |  44 ++--
 .../java/org/apache/horn/utils/MathUtils.java   |  31 +++
 src/main/resources/log4j.properties             |   2 +-
 .../java/org/apache/horn/core/TestNeuron.java   |   6 +-
 .../horn/examples/MultiLayerPerceptronTest.java |   4 +-
 src/test/resources/log4j.properties             |   2 +-
 20 files changed, 477 insertions(+), 163 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index 4c9ec6d..bbad6ab 100644
--- a/README.md
+++ b/README.md
@@ -23,10 +23,10 @@ Then, we measure the margin of error of the output and adjust the weights accord
     public void backward(
         Iterable<Synapse<FloatWritable, FloatWritable>> messages)
         throws IOException {
-      float gradient = 0;
+      float delta = 0;
       for (Synapse<FloatWritable, FloatWritable> m : messages) {
         // Calculates error gradient for each neuron
-        gradient += (m.getDelta() * m.getWeight());
+        delta += (m.getDelta() * m.getWeight());
 
         // Weight corrections
         float weight = -this.getLearningRate() * this.getOutput()
@@ -34,23 +34,22 @@ Then, we measure the margin of error of the output and adjust the weights accord
         this.push(weight);
       }
 
-      this.backpropagate(gradient
+      this.backpropagate(delta
           * this.squashingFunction.applyDerivative(this.getOutput()));
     }
   }
 ```
 The advantages of this programming model is easy and intuitive to use.
 
-Also, Apache Horn provides a simplified and intuitive configuration interface. To create neural network job and submit it to existing Hadoop or Hama cluster, we just add the layer with its properties such as squashing function and neuron class. The below example configures the create 2-layer neural network with 100 neurons in hidden layers for train MNIST dataset:
+Also, Apache Horn provides a simplified and intuitive configuration interface. To create neural network job and submit it to existing Hadoop or Hama cluster, we just add the layer with its properties such as squashing function and neuron class. The below example configures the create 2-layer neural network for train MNIST dataset:
 ```Java
   HornJob job = new HornJob(conf, MultiLayerPerceptron.class);
   job.setLearningRate(learningRate);
-  job.setTrainingMethod(TrainingMethod.GRADIENT_DESCENT);
   ..
 
-  job.inputLayer(784, Sigmoid.class, StandardNeuron.class);
-  job.addLayer(100, Sigmoid.class, StandardNeuron.class);
-  job.outputLayer(10, SoftMax.class, StandardNeuron.class);
+  job.inputLayer(features, 0.8f); // droprate
+  job.addLayer(hu, ReLU.class, DropoutNeuron.class);
+  job.outputLayer(labels, SoftMax.class, StandardNeuron.class);
   job.setCostFunction(CrossEntropy.class);
 ```
 
@@ -68,7 +67,7 @@ Then, train it with following command (in this example, we used \u03b7 0.01, \u03b1 0.9,
    0.01 0.9 0.0005 784 100 10 10 12000
 ```
 
-With this default example, you'll reach over the 95% accuracy. In local mode, 6 tasks will train the model in synchronous parallel fashion and will took around 30 mins. 
+With this default example, you'll reach over the 95% accuracy. In local mode, 20 tasks will train the model in synchronous parallel fashion and will took around 10 mins. 
 
 ## High Scalability
 

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/conf/log4j.properties b/conf/log4j.properties
index 8e3b877..f33e9a8 100644
--- a/conf/log4j.properties
+++ b/conf/log4j.properties
@@ -67,7 +67,8 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 
 log4j.logger.org.apache.zookeeper=ERROR
 log4j.logger.org.apache.avro=ERROR
-log4j.logger.org.apache.hama=INFO
+log4j.logger.org.apache.hama=ERROR
+log4j.logger.org.apache.hadoop=ERROR
 
 #
 # console

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/core/AbstractLayeredNeuralNetwork.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/core/AbstractLayeredNeuralNetwork.java b/src/main/java/org/apache/horn/core/AbstractLayeredNeuralNetwork.java
index 4d1ea52..26e815f 100644
--- a/src/main/java/org/apache/horn/core/AbstractLayeredNeuralNetwork.java
+++ b/src/main/java/org/apache/horn/core/AbstractLayeredNeuralNetwork.java
@@ -155,7 +155,7 @@ abstract class AbstractLayeredNeuralNetwork extends AbstractNeuralNetwork {
    * @return The layer index, starts with 0.
    */
   public abstract int addLayer(int size, boolean isFinalLayer,
-      FloatFunction squashingFunction, Class<? extends Neuron> neuronClass);
+      FloatFunction squashingFunction, Class<? extends Neuron<?>> neuronClass);
 
   /**
    * Get the size of a particular layer.

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/core/HornJob.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/core/HornJob.java b/src/main/java/org/apache/horn/core/HornJob.java
index d178166..3912b67 100644
--- a/src/main/java/org/apache/horn/core/HornJob.java
+++ b/src/main/java/org/apache/horn/core/HornJob.java
@@ -35,36 +35,39 @@ public class HornJob extends BSPJob {
     super(conf);
     this.setJarByClass(exampleClass);
 
+    // default local file block size 10mb
+    this.getConfiguration().set("fs.local.block.size", "10358951");
     neuralNetwork = new LayeredNeuralNetwork();
   }
 
-  @SuppressWarnings("rawtypes")
-  public void inputLayer(int featureDimension, Class<? extends Function> func,
-      Class<? extends Neuron> neuronClass) {
-    addLayer(featureDimension, func, neuronClass);
+  public void inputLayer(int featureDimension) {
+    addLayer(featureDimension, null, null);
+    neuralNetwork.setDropRateOfInputLayer(1);
+  }
+
+  public void inputLayer(int featureDimension, float dropRate) {
+    addLayer(featureDimension, null, null);
+    neuralNetwork.setDropRateOfInputLayer(dropRate);
   }
 
-  @SuppressWarnings("rawtypes")
   public void addLayer(int featureDimension, Class<? extends Function> func,
-      Class<? extends Neuron> neuronClass) {
-    neuralNetwork
-        .addLayer(featureDimension, false,
-            FunctionFactory.createFloatFunction(func.getSimpleName()),
-            neuronClass);
+      Class<? extends Neuron<?>> neuronClass) {
+    neuralNetwork.addLayer(
+        featureDimension,
+        false,
+        (func != null) ? FunctionFactory.createFloatFunction(func
+            .getSimpleName()) : null, neuronClass);
   }
 
-  @SuppressWarnings("rawtypes")
   public void outputLayer(int labels, Class<? extends Function> func,
-      Class<? extends Neuron> neuronClass) {
-    neuralNetwork
-        .addLayer(labels, true,
-            FunctionFactory.createFloatFunction(func.getSimpleName()),
-            neuronClass);
+      Class<? extends Neuron<?>> neuronClass) {
+    neuralNetwork.addLayer(labels, true,
+        FunctionFactory.createFloatFunction(func.getSimpleName()), neuronClass);
   }
 
   public void setCostFunction(Class<? extends Function> func) {
-    neuralNetwork.setCostFunction(FunctionFactory
-        .createFloatFloatFunction(func.getSimpleName()));
+    neuralNetwork.setCostFunction(FunctionFactory.createFloatFloatFunction(func
+        .getSimpleName()));
   }
 
   public void setDouble(String name, double value) {
@@ -82,15 +85,15 @@ public class HornJob extends BSPJob {
   public void setTrainingMethod(TrainingMethod method) {
     this.neuralNetwork.setTrainingMethod(method);
   }
-  
+
   public void setLearningStyle(LearningStyle style) {
     this.neuralNetwork.setLearningStyle(style);
   }
-  
+
   public void setLearningRate(float learningRate) {
     this.neuralNetwork.setLearningRate(learningRate);
   }
-  
+
   public void setConvergenceCheckInterval(int n) {
     this.conf.setInt("convergence.check.interval", n);
   }
@@ -98,7 +101,7 @@ public class HornJob extends BSPJob {
   public void setMomentumWeight(float momentumWeight) {
     this.neuralNetwork.setMomemtumWeight(momentumWeight);
   }
-  
+
   public void setRegularizationWeight(float regularizationWeight) {
     this.neuralNetwork.setRegularizationWeight(regularizationWeight);
   }

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java b/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
index aa8e68d..6f7aa70 100644
--- a/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
+++ b/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
@@ -22,6 +22,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.List;
 
 import org.apache.commons.lang.math.RandomUtils;
@@ -50,6 +51,7 @@ import org.apache.horn.examples.MultiLayerPerceptron.StandardNeuron;
 import org.apache.horn.funcs.FunctionFactory;
 import org.apache.horn.funcs.IdentityFunction;
 import org.apache.horn.funcs.SoftMax;
+import org.apache.horn.utils.MathUtils;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -79,11 +81,14 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
   /* Different layers can have different squashing function */
   protected List<FloatFunction> squashingFunctionList;
 
-  protected List<Class<? extends Neuron>> neuronClassList;
+  protected List<Class<? extends Neuron<?>>> neuronClassList;
 
   protected int finalLayerIdx;
 
-  private List<Neuron[]> neurons = new ArrayList<Neuron[]>();
+  private List<Neuron<?>[]> neurons = new ArrayList<Neuron<?>[]>();
+
+  private float dropRate;
+  private long iterations;
 
   public LayeredNeuralNetwork() {
     this.layerSizeList = Lists.newArrayList();
@@ -95,19 +100,28 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
 
   public LayeredNeuralNetwork(HamaConfiguration conf, String modelPath) {
     super(conf, modelPath);
+    initializeNeurons(false);
+  }
 
-    // initialize neuron objects
+  public LayeredNeuralNetwork(HamaConfiguration conf, String modelPath,
+      boolean isTraining) {
+    super(conf, modelPath);
+    initializeNeurons(isTraining);
+  }
+
+  // initialize neuron objects
+  private void initializeNeurons(boolean isTraining) {
     for (int i = 0; i < layerSizeList.size(); i++) {
       int numOfNeurons = layerSizeList.get(i);
-      Class neuronClass;
+      Class<? extends Neuron<?>> neuronClass;
       if (i == 0)
-        neuronClass = Neuron.class;
+        neuronClass = StandardNeuron.class; // actually doesn't needed
       else
         neuronClass = neuronClassList.get(i - 1);
 
-      Neuron[] tmp = new Neuron[numOfNeurons];
+      Neuron<?>[] tmp = new Neuron[numOfNeurons];
       for (int j = 0; j < numOfNeurons; j++) {
-        Neuron n = newNeuronInstance(StandardNeuron.class);
+        Neuron<?> n = newNeuronInstance(neuronClass);
         if (i > 0)
           n.setSquashingFunction(squashingFunctionList.get(i - 1));
         else
@@ -115,8 +129,10 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
 
         n.setLayerIndex(i);
 
+        n.setNeuronID(j);
         n.setLearningRate(this.learningRate);
         n.setMomentumWeight(this.momentumWeight);
+        n.setTraining(isTraining);
         tmp[j] = n;
       }
 
@@ -129,12 +145,12 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
    * {@inheritDoc}
    */
   public int addLayer(int size, boolean isFinalLayer,
-      FloatFunction squashingFunction, Class<? extends Neuron> neuronClass) {
+      FloatFunction squashingFunction, Class<? extends Neuron<?>> neuronClass) {
     return addLayer(size, isFinalLayer, squashingFunction, neuronClass, null);
   }
 
   public int addLayer(int size, boolean isFinalLayer,
-      FloatFunction squashingFunction, Class<? extends Neuron> neuronClass,
+      FloatFunction squashingFunction, Class<? extends Neuron<?>> neuronClass,
       Class<? extends IntermediateOutput> interlayer) {
     Preconditions.checkArgument(size > 0,
         "Size of layer must be larger than 0.");
@@ -267,13 +283,14 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
     super.readFields(input);
 
     this.finalLayerIdx = input.readInt();
+    this.dropRate = input.readFloat();
 
     // read neuron classes
     int neuronClasses = input.readInt();
     this.neuronClassList = Lists.newArrayList();
     for (int i = 0; i < neuronClasses; ++i) {
       try {
-        Class<? extends Neuron> clazz = (Class<? extends Neuron>) Class
+        Class<? extends Neuron<?>> clazz = (Class<? extends Neuron<?>>) Class
             .forName(input.readUTF());
         neuronClassList.add(clazz);
       } catch (ClassNotFoundException e) {
@@ -306,12 +323,12 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
   @Override
   public void write(DataOutput output) throws IOException {
     super.write(output);
-
     output.writeInt(finalLayerIdx);
+    output.writeFloat(dropRate);
 
     // write neuron classes
     output.writeInt(this.neuronClassList.size());
-    for (Class<? extends Neuron> clazz : this.neuronClassList) {
+    for (Class<? extends Neuron<?>> clazz : this.neuronClassList) {
       output.writeUTF(clazz.getName());
     }
 
@@ -361,6 +378,10 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
     return getOutputInternal(instanceWithBias);
   }
 
+  public void setDropRateOfInputLayer(float dropRate) {
+    this.dropRate = dropRate;
+  }
+
   /**
    * Calculate output internally, the intermediate output of each layer will be
    * stored.
@@ -370,9 +391,15 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
    */
   public FloatVector getOutputInternal(FloatVector instanceWithBias) {
     // sets the output of input layer
-    Neuron[] inputLayer = neurons.get(0);
+    Neuron<?>[] inputLayer = neurons.get(0);
     for (int i = 0; i < inputLayer.length; i++) {
-      inputLayer[i].setOutput(instanceWithBias.get(i));
+      float m2 = MathUtils.getBinomial(1, dropRate);
+      if(m2 == 0)
+        inputLayer[i].setDrop(true);
+      else
+        inputLayer[i].setDrop(false);
+      
+      inputLayer[i].setOutput(instanceWithBias.get(i) * m2);
     }
 
     for (int i = 0; i < this.layerSizeList.size() - 1; ++i) {
@@ -397,6 +424,60 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
     return (Neuron) ReflectionUtils.newInstance(neuronClass);
   }
 
+  public class InputMessageIterable implements
+      Iterable<Synapse<FloatWritable, FloatWritable>> {
+    private int currNeuronID;
+    private int prevNeuronID;
+    private int end;
+    private FloatMatrix weightMat;
+    private Neuron<?>[] layer;
+
+    public InputMessageIterable(int fromLayer, int row) {
+      this.currNeuronID = row;
+      this.prevNeuronID = -1;
+      this.end = weightMatrixList.get(fromLayer).getColumnCount() - 1;
+      this.weightMat = weightMatrixList.get(fromLayer);
+      this.layer = neurons.get(fromLayer);
+    }
+
+    @Override
+    public Iterator<Synapse<FloatWritable, FloatWritable>> iterator() {
+      return new MessageIterator();
+    }
+
+    private class MessageIterator implements
+        Iterator<Synapse<FloatWritable, FloatWritable>> {
+
+      @Override
+      public boolean hasNext() {
+        if (prevNeuronID < end) {
+          return true;
+        } else {
+          return false;
+        }
+      }
+
+      private FloatWritable i = new FloatWritable();
+      private FloatWritable w = new FloatWritable();
+      private Synapse<FloatWritable, FloatWritable> msg = new Synapse<FloatWritable, FloatWritable>();
+      
+      @Override
+      public Synapse<FloatWritable, FloatWritable> next() {
+        prevNeuronID++;
+
+        i.set(layer[prevNeuronID].getOutput());
+        w.set(weightMat.get(currNeuronID, prevNeuronID));
+        msg.set(prevNeuronID, i, w);
+        return new Synapse<FloatWritable, FloatWritable>(prevNeuronID, i, w);
+      }
+
+      @Override
+      public void remove() {
+      }
+
+    }
+  }
+
   /**
    * Forward the calculation for one layer.
    * 
@@ -410,20 +491,15 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
     FloatVector vec = new DenseFloatVector(weightMatrix.getRowCount());
 
     for (int row = 0; row < weightMatrix.getRowCount(); row++) {
-      List<Synapse<FloatWritable, FloatWritable>> msgs = new ArrayList<Synapse<FloatWritable, FloatWritable>>();
-      for (int col = 0; col < weightMatrix.getColumnCount(); col++) {
-        msgs.add(new Synapse<FloatWritable, FloatWritable>(new FloatWritable(
-            neurons.get(fromLayer)[col].getOutput()), new FloatWritable(
-            weightMatrix.get(row, col))));
-      }
-
-      Neuron n;
+      Neuron<?> n;
       if (curLayerIdx == finalLayerIdx)
         n = neurons.get(curLayerIdx)[row];
       else
         n = neurons.get(curLayerIdx)[row + 1];
 
       try {
+        Iterable msgs = new InputMessageIterable(fromLayer, row);
+        n.setIterationNumber(iterations);
         n.forward(msgs);
       } catch (IOException e) {
         // TODO Auto-generated catch block
@@ -518,7 +594,8 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
     calculateTrainingError(labels, output);
 
     if (this.trainingMethod.equals(TrainingMethod.GRADIENT_DESCENT)) {
-      return this.trainByInstanceGradientDescent(labels);
+      FloatMatrix[] updates = this.trainByInstanceGradientDescent(labels);
+      return updates;
     } else {
       throw new IllegalArgumentException(
           String.format("Training method is not supported."));
@@ -563,7 +640,7 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
         costFuncDerivative *= squashingFunction.applyDerivative(finalOut);
       }
 
-      neurons.get(finalLayerIdx)[i].setDelta(costFuncDerivative);
+      neurons.get(finalLayerIdx)[i].backpropagate(costFuncDerivative);
       deltaVec.set(i, costFuncDerivative);
     }
 
@@ -576,6 +653,75 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
     return weightUpdateMatrices;
   }
 
+  public class ErrorMessageIterable implements
+      Iterable<Synapse<FloatWritable, FloatWritable>> {
+    private int row;
+    private int neuronID;
+    private int end;
+    private FloatMatrix weightMat;
+    private FloatMatrix prevWeightMat;
+
+    private float[] nextLayerDelta;
+    
+    public ErrorMessageIterable(int curLayerIdx, int row) {
+      this.row = row;
+      this.neuronID = -1;
+      this.weightMat = weightMatrixList.get(curLayerIdx);
+      this.end = weightMat.getRowCount() - 1;
+      this.prevWeightMat = prevWeightUpdatesList.get(curLayerIdx);
+      
+      Neuron<?>[] nextLayer = neurons.get(curLayerIdx + 1);
+      nextLayerDelta = new float[weightMat.getRowCount()];
+      
+      for(int i = 0; i <= end; ++i) {
+        if (curLayerIdx + 1 == finalLayerIdx) {
+          nextLayerDelta[i] = nextLayer[i].getDelta();
+        } else {
+          nextLayerDelta[i] = nextLayer[i + 1].getDelta();
+        }
+      }
+    }
+
+    @Override
+    public Iterator<Synapse<FloatWritable, FloatWritable>> iterator() {
+      return new MessageIterator();
+    }
+
+    private class MessageIterator implements
+        Iterator<Synapse<FloatWritable, FloatWritable>> {
+
+      @Override
+      public boolean hasNext() {
+        if (neuronID < end) {
+          return true;
+        } else {
+          return false;
+        }
+      }
+
+      private FloatWritable d = new FloatWritable();
+      private FloatWritable w = new FloatWritable();
+      private FloatWritable p = new FloatWritable();
+      private Synapse<FloatWritable, FloatWritable> msg = new Synapse<FloatWritable, FloatWritable>();
+      
+      @Override
+      public Synapse<FloatWritable, FloatWritable> next() {
+        neuronID++;
+        
+        d.set(nextLayerDelta[neuronID]);
+        w.set(weightMat.get(neuronID, row));
+        p.set(prevWeightMat.get(neuronID, row));
+        msg.set(neuronID, d, w, p);
+        return msg;
+      }
+
+      @Override
+      public void remove() {
+      }
+
+    }
+  }
+
   /**
    * Back-propagate the errors to from next layer to current layer. The weight
    * updated information will be stored in the weightUpdateMatrices, and the
@@ -588,31 +734,18 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
       DenseFloatMatrix weightUpdateMatrix) {
 
     // get layer related information
-    FloatMatrix weightMatrix = this.weightMatrixList.get(curLayerIdx);
-    FloatMatrix prevWeightMatrix = this.prevWeightUpdatesList.get(curLayerIdx);
-
-    FloatVector deltaVector = new DenseFloatVector(
-        weightMatrix.getColumnCount());
-
-    for (int row = 0; row < weightMatrix.getColumnCount(); ++row) {
-      Neuron n = neurons.get(curLayerIdx)[row];
-      n.setWeightVector(weightMatrix.getRowCount());
+    int x = this.weightMatrixList.get(curLayerIdx).getColumnCount();
+    int y = this.weightMatrixList.get(curLayerIdx).getRowCount();
 
-      List<Synapse<FloatWritable, FloatWritable>> msgs = new ArrayList<Synapse<FloatWritable, FloatWritable>>();
-
-      for (int col = 0; col < weightMatrix.getRowCount(); ++col) {
-        float deltaOfNextLayer;
-        if (curLayerIdx + 1 == this.finalLayerIdx)
-          deltaOfNextLayer = neurons.get(curLayerIdx + 1)[col].getDelta();
-        else
-          deltaOfNextLayer = neurons.get(curLayerIdx + 1)[col + 1].getDelta();
-
-        msgs.add(new Synapse<FloatWritable, FloatWritable>(new FloatWritable(
-            deltaOfNextLayer), new FloatWritable(weightMatrix.get(col, row)),
-            new FloatWritable(prevWeightMatrix.get(col, row))));
-      }
+    FloatVector deltaVector = new DenseFloatVector(x);
+    Neuron<?>[] ns = neurons.get(curLayerIdx);
+    
+    for (int row = 0; row < x; ++row) {
+      Neuron<?> n = ns[row];
+      n.setWeightVector(y);
 
       try {
+        Iterable msgs = new ErrorMessageIterable(curLayerIdx, row);
         n.backward(msgs);
       } catch (IOException e) {
         // TODO Auto-generated catch block
@@ -652,6 +785,7 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
 
     job.getConfiguration().setInt(Constants.ADDITIONAL_BSP_TASKS, 1);
 
+    job.setBoolean("training.mode", true);
     job.setInputPath(new Path(conf.get("training.input.path")));
     job.setInputFormat(org.apache.hama.bsp.SequenceFileInputFormat.class);
     job.setInputKeyClass(LongWritable.class);
@@ -680,4 +814,8 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
     return this.squashingFunctionList.get(idx);
   }
 
+  public void setIterationNumber(long iterations) {
+    this.iterations = iterations;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java b/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
index e0810e2..259c1a9 100644
--- a/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
+++ b/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
@@ -78,9 +78,9 @@ public final class LayeredNeuralNetworkTrainer
     this.maxIterations = conf.getLong("training.max.iterations", Long.MAX_VALUE);
     this.convergenceCheckInterval = conf.getLong("convergence.check.interval",
         100);
-    this.inMemoryModel = new LayeredNeuralNetwork(conf, modelPath);
+    this.inMemoryModel = new LayeredNeuralNetwork(conf, modelPath, true);
     this.prevAvgTrainingError = Integer.MAX_VALUE;
-    this.batchSize = conf.getInt("training.batch.size", 50);
+    this.batchSize = conf.getInt("training.batch.size", 5);
   }
 
   @Override
@@ -117,8 +117,14 @@ public final class LayeredNeuralNetworkTrainer
       FloatVector v = value.getVector();
       trainingSet.add(v);
     }
-
+    
+    if (peer.getPeerIndex() != peer.getNumPeers() - 1) {
+      LOG.debug(peer.getPeerName() + ": " + trainingSet.size() + " training instances loaded.");
+    }
+    
     while (this.iterations++ < maxIterations) {
+      this.inMemoryModel.setIterationNumber(iterations);
+      
       // each groom calculate the matrices updates according to local data
       if (peer.getPeerIndex() != peer.getNumPeers() - 1) {
         calculateUpdates(peer);
@@ -188,6 +194,7 @@ public final class LayeredNeuralNetworkTrainer
           this.inMemoryModel.trainByInstance(trainingInstance));
       avgTrainingError += this.inMemoryModel.trainingError;
     }
+    
     avgTrainingError /= batchSize;
 
     // calculate the average of updates

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/core/Neuron.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/core/Neuron.java b/src/main/java/org/apache/horn/core/Neuron.java
index 908abf4..7519201 100644
--- a/src/main/java/org/apache/horn/core/Neuron.java
+++ b/src/main/java/org/apache/horn/core/Neuron.java
@@ -24,7 +24,8 @@ import java.io.IOException;
 import org.apache.hadoop.io.Writable;
 import org.apache.hama.commons.math.FloatFunction;
 
-public abstract class Neuron<M extends Writable> implements Writable, NeuronInterface<M> {
+public abstract class Neuron<M extends Writable> implements Writable,
+    NeuronInterface<M> {
   int id;
   float output;
   float weight;
@@ -35,17 +36,20 @@ public abstract class Neuron<M extends Writable> implements Writable, NeuronInte
 
   int layerIndex;
   boolean isOutputLayer;
-  
+  boolean isTraining;
+  boolean isDropped;
+  long iterations;
+
   protected FloatFunction squashingFunction;
 
   public void setNeuronID(int id) {
     this.id = id;
   }
-  
-  public int getID() {
+
+  public int getNeuronID() {
     return id;
   }
-  
+
   public int getLayerIndex() {
     return layerIndex;
   }
@@ -53,7 +57,7 @@ public abstract class Neuron<M extends Writable> implements Writable, NeuronInte
   public void setLayerIndex(int index) {
     this.layerIndex = index;
   }
-  
+
   public void feedforward(float sum) {
     this.output = sum;
   }
@@ -62,10 +66,6 @@ public abstract class Neuron<M extends Writable> implements Writable, NeuronInte
     this.delta = gradient;
   }
 
-  public void setDelta(float delta) {
-    this.delta = delta;
-  }
-  
   public float getDelta() {
     return delta;
   }
@@ -101,6 +101,7 @@ public abstract class Neuron<M extends Writable> implements Writable, NeuronInte
   // ////////
 
   private int i;
+  float[] weights;
 
   public void push(float weight) {
     weights[i++] = weight;
@@ -110,8 +111,6 @@ public abstract class Neuron<M extends Writable> implements Writable, NeuronInte
     return weight;
   }
 
-  float[] weights;
-
   public void setWeightVector(int rowCount) {
     i = 0;
     weights = new float[rowCount];
@@ -121,19 +120,34 @@ public abstract class Neuron<M extends Writable> implements Writable, NeuronInte
     return weights;
   }
 
+  public void setWeights(float[] weights) {
+    this.weights = weights;
+  }
+
   public void setSquashingFunction(FloatFunction squashingFunction) {
     this.squashingFunction = squashingFunction;
   }
 
+  public void setTraining(boolean b) {
+    this.isTraining = b;
+  }
+
+  public boolean isTraining() {
+    return isTraining;
+  }
+
   @Override
   public void readFields(DataInput in) throws IOException {
     id = in.readInt();
     output = in.readFloat();
     weight = in.readFloat();
     delta = in.readFloat();
+    iterations = in.readLong();
 
     momentumWeight = in.readFloat();
     learningRate = in.readFloat();
+    isTraining = in.readBoolean();
+    isDropped = in.readBoolean();
   }
 
   @Override
@@ -142,9 +156,39 @@ public abstract class Neuron<M extends Writable> implements Writable, NeuronInte
     out.writeFloat(output);
     out.writeFloat(weight);
     out.writeFloat(delta);
-    
+    out.writeLong(iterations);
+
     out.writeFloat(momentumWeight);
     out.writeFloat(learningRate);
+    out.writeBoolean(isTraining);
+    out.writeBoolean(isDropped);
+  }
+
+  public void setIterationNumber(long iterations) {
+    this.iterations = iterations;
+  }
+
+  public long getIterationNumber() {
+    return iterations;
+  }
+
+  public boolean isDropped() {
+    return isDropped;
+  }
+
+  public void setDrop(boolean isDropped) {
+    this.isDropped = isDropped;
+  }
+  
+  private float nablaW;
+
+  public void setNablaW(float f) {
+    // TODO Auto-generated method stub
+    nablaW = f;
+  }
+  
+  public float getNablaW() {
+    return nablaW;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/core/Synapse.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/core/Synapse.java b/src/main/java/org/apache/horn/core/Synapse.java
index 7e9db2a..ae34400 100644
--- a/src/main/java/org/apache/horn/core/Synapse.java
+++ b/src/main/java/org/apache/horn/core/Synapse.java
@@ -30,21 +30,44 @@ import org.apache.hadoop.io.Writable;
 public class Synapse<M extends Writable, W extends Writable> implements
     Writable {
 
+  int neuronID;
   FloatWritable message;
   FloatWritable weight;
   FloatWritable prevWeight;
 
-  public Synapse(FloatWritable message, FloatWritable weight) {
+  public Synapse() {
+  }
+  
+  public Synapse(int neuronID, FloatWritable message, FloatWritable weight) {
+    this.neuronID = neuronID;
     this.message = message;
     this.weight = weight;
   }
 
-  public Synapse(FloatWritable message, FloatWritable weight, FloatWritable prevWeight) {
+  public Synapse(int neuronID, FloatWritable message, FloatWritable weight, FloatWritable prevWeight) {
+    this.neuronID = neuronID;
     this.message = message;
     this.weight = weight;
     this.prevWeight = prevWeight;
   }
   
+  public void set(int neuronID, FloatWritable message, FloatWritable weight) {
+    this.neuronID = neuronID;
+    this.message = message;
+    this.weight = weight;
+  }
+  
+  public void set(int neuronID, FloatWritable message, FloatWritable weight, FloatWritable prevWeight) {
+    this.neuronID = neuronID;
+    this.message = message;
+    this.weight = weight;
+    this.prevWeight = prevWeight;
+  }
+  
+  public int getSenderID() {
+    return neuronID;
+  }
+  
   /**
    * @return the activation or error message
    */
@@ -74,12 +97,16 @@ public class Synapse<M extends Writable, W extends Writable> implements
   public void readFields(DataInput in) throws IOException {
     message.readFields(in);
     weight.readFields(in);
+    prevWeight.readFields(in);
+    neuronID = in.readInt();
   }
 
   @Override
   public void write(DataOutput out) throws IOException {
     message.write(out);
     weight.write(out);
+    prevWeight.write(out);
+    out.writeInt(neuronID);
   }
 
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/examples/DropoutNeuron.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/examples/DropoutNeuron.java b/src/main/java/org/apache/horn/examples/DropoutNeuron.java
new file mode 100644
index 0000000..ec02570
--- /dev/null
+++ b/src/main/java/org/apache/horn/examples/DropoutNeuron.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.horn.examples;
+
+import java.io.IOException;
+
+import org.apache.hadoop.io.FloatWritable;
+import org.apache.horn.core.Neuron;
+import org.apache.horn.core.Synapse;
+import org.apache.horn.utils.MathUtils;
+
+public class DropoutNeuron extends
+    Neuron<Synapse<FloatWritable, FloatWritable>> {
+
+  private float m2;
+
+  @Override
+  public void forward(Iterable<Synapse<FloatWritable, FloatWritable>> messages)
+      throws IOException {
+    m2 = (isTraining()) ? MathUtils.getBinomial(1, 0.5) : 0.5f;
+
+    if (m2 > 0) {
+      float sum = 0;
+      for (Synapse<FloatWritable, FloatWritable> m : messages) {
+        sum += m.getInput() * m.getWeight();
+      }
+
+      this.setDrop(false);
+      this.feedforward(squashingFunction.apply(sum) * m2);
+    } else {
+      this.setDrop(true);
+      this.feedforward(0);
+    }
+  }
+
+  @Override
+  public void backward(Iterable<Synapse<FloatWritable, FloatWritable>> messages)
+      throws IOException {
+    if (!this.isDropped()) {
+      float delta = 0;
+
+      for (Synapse<FloatWritable, FloatWritable> m : messages) {
+        // Calculates error gradient for each neuron
+        delta += (m.getDelta() * m.getWeight());
+
+        // Weight corrections
+        float weight = -this.getLearningRate() * m.getDelta()
+            * this.getOutput() + this.getMomentumWeight() * m.getPrevWeight();
+        this.push(weight);
+      }
+
+      this.backpropagate(delta * squashingFunction.applyDerivative(getOutput()));
+    } else {
+      this.backpropagate(0);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java b/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
index a787dda..5f3403b 100644
--- a/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
+++ b/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
@@ -26,7 +26,7 @@ import org.apache.horn.core.HornJob;
 import org.apache.horn.core.Neuron;
 import org.apache.horn.core.Synapse;
 import org.apache.horn.funcs.CrossEntropy;
-import org.apache.horn.funcs.Sigmoid;
+import org.apache.horn.funcs.ReLU;
 import org.apache.horn.funcs.SoftMax;
 
 public class MultiLayerPerceptron {
@@ -35,8 +35,7 @@ public class MultiLayerPerceptron {
       Neuron<Synapse<FloatWritable, FloatWritable>> {
 
     @Override
-    public void forward(
-        Iterable<Synapse<FloatWritable, FloatWritable>> messages)
+    public void forward(Iterable<Synapse<FloatWritable, FloatWritable>> messages)
         throws IOException {
       float sum = 0;
       for (Synapse<FloatWritable, FloatWritable> m : messages) {
@@ -49,19 +48,21 @@ public class MultiLayerPerceptron {
     public void backward(
         Iterable<Synapse<FloatWritable, FloatWritable>> messages)
         throws IOException {
-      float gradient = 0;
-      for (Synapse<FloatWritable, FloatWritable> m : messages) {
-        // Calculates error gradient for each neuron
-        gradient += (m.getDelta() * m.getWeight());
-
-        // Weight corrections
-        float weight = -this.getLearningRate() * this.getOutput()
-            * m.getDelta() + this.getMomentumWeight() * m.getPrevWeight();
-        this.push(weight);
+      float delta = 0;
+
+      if (!this.isDropped()) {
+        for (Synapse<FloatWritable, FloatWritable> m : messages) {
+          // Calculates error gradient for each neuron
+          delta += (m.getDelta() * m.getWeight());
+
+          // Weight corrections
+          float weight = -this.getLearningRate() * m.getDelta()
+              * this.getOutput() + this.getMomentumWeight() * m.getPrevWeight();
+          this.push(weight);
+        }
       }
 
-      this.backpropagate(gradient
-          * squashingFunction.applyDerivative(getOutput()));
+      this.backpropagate(delta * squashingFunction.applyDerivative(getOutput()));
     }
   }
 
@@ -79,13 +80,13 @@ public class MultiLayerPerceptron {
     job.setMomentumWeight(momemtumWeight);
     job.setRegularizationWeight(regularizationWeight);
 
-    job.setConvergenceCheckInterval(1000);
+    job.setConvergenceCheckInterval(100);
     job.setBatchSize(miniBatch);
 
     job.setTrainingMethod(TrainingMethod.GRADIENT_DESCENT);
 
-    job.inputLayer(features, Sigmoid.class, StandardNeuron.class);
-    job.addLayer(hu, Sigmoid.class, StandardNeuron.class);
+    job.inputLayer(features, 0.8f); // droprate
+    job.addLayer(hu, ReLU.class, DropoutNeuron.class);
     job.outputLayer(labels, SoftMax.class, StandardNeuron.class);
 
     job.setCostFunction(CrossEntropy.class);
@@ -111,7 +112,7 @@ public class MultiLayerPerceptron {
 
     long startTime = System.currentTimeMillis();
     if (ann.waitForCompletion(true)) {
-      System.out.println("Job Finished in "
+      System.out.println("Optimization Finished! "
           + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/funcs/CategoricalCrossEntropy.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/funcs/CategoricalCrossEntropy.java b/src/main/java/org/apache/horn/funcs/CategoricalCrossEntropy.java
index 887f24d..3e9bfac 100644
--- a/src/main/java/org/apache/horn/funcs/CategoricalCrossEntropy.java
+++ b/src/main/java/org/apache/horn/funcs/CategoricalCrossEntropy.java
@@ -20,15 +20,15 @@ package org.apache.horn.funcs;
 import org.apache.hama.commons.math.FloatFloatFunction;
 
 /**
- * for softmaxed output 
+ * for softmaxed output
  */
 public class CategoricalCrossEntropy extends FloatFloatFunction {
-  
-  private static final float epsilon = (float) 1e-8;
-  
+
+  private static final float epsilon = 1e-8f;
+
   @Override
   public float apply(float target, float actual) {
-    return -target * (float) Math.log(Math.max(actual, epsilon));
+    return -target * (float) Math.log(Math.min(Math.max(actual, epsilon), 1.0f));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/funcs/ReLU.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/funcs/ReLU.java b/src/main/java/org/apache/horn/funcs/ReLU.java
index 2f14f54..a185d39 100644
--- a/src/main/java/org/apache/horn/funcs/ReLU.java
+++ b/src/main/java/org/apache/horn/funcs/ReLU.java
@@ -30,15 +30,15 @@ public class ReLU extends FloatFunction {
 
   @Override
   public float apply(float value) {
-    return Math.max(0.001f, value);
+    return Math.max(0.0f, value);
   }
 
   @Override
   public float applyDerivative(float value) {
     if (value > 0)
-      return 0.999f;
+      return 1.0f;
     else
-      return 0.001f;
+      return 0.0f;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/funcs/SoftMax.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/funcs/SoftMax.java b/src/main/java/org/apache/horn/funcs/SoftMax.java
index 710b489..4e42202 100644
--- a/src/main/java/org/apache/horn/funcs/SoftMax.java
+++ b/src/main/java/org/apache/horn/funcs/SoftMax.java
@@ -20,7 +20,6 @@ package org.apache.horn.funcs;
 import java.io.IOException;
 
 import org.apache.hama.commons.math.DenseFloatVector;
-import org.apache.hama.commons.math.DoubleVector;
 import org.apache.hama.commons.math.FloatFunction;
 import org.apache.hama.commons.math.FloatVector;
 import org.apache.horn.core.IntermediateOutput;
@@ -44,11 +43,13 @@ public class SoftMax extends FloatFunction {
     public FloatVector interlayer(FloatVector output) throws IOException {
       FloatVector expVec = new DenseFloatVector(output.getDimension());
       float sum = 0.0f;
+      float max = output.max(); // to avoid infinity
       for(int i = 0; i < output.getDimension(); ++i) {
-        float exp = (float) Math.exp(output.get(i));
+        float exp = (float) Math.exp(output.get(i) - max);
         sum += exp;
         expVec.set(i, exp);
       }
+      
       // divide by the sum of exponential of the whole vector
       FloatVector softmaxed = expVec.divide(sum);
       return softmaxed;

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/utils/MNISTConverter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/utils/MNISTConverter.java b/src/main/java/org/apache/horn/utils/MNISTConverter.java
index 25ea2a0..e13b9ff 100644
--- a/src/main/java/org/apache/horn/utils/MNISTConverter.java
+++ b/src/main/java/org/apache/horn/utils/MNISTConverter.java
@@ -49,6 +49,10 @@ public class MNISTConverter {
     String labels_data = args[1];
     String output = args[2];
 
+    HamaConfiguration conf = new HamaConfiguration();
+    conf.set("dfs.block.size", "11554432");
+    FileSystem fs = FileSystem.get(conf);
+
     DataInputStream imagesIn = new DataInputStream(new FileInputStream(
         new File(training_data)));
     DataInputStream labelsIn = new DataInputStream(new FileInputStream(
@@ -70,9 +74,6 @@ public class MNISTConverter {
       labels[n] = labelsIn.readByte();
     }
 
-    HamaConfiguration conf = new HamaConfiguration();
-    FileSystem fs = FileSystem.get(conf);
-
     @SuppressWarnings("deprecation")
     SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, new Path(
         output), LongWritable.class, FloatVectorWritable.class);

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/utils/MNISTEvaluator.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/utils/MNISTEvaluator.java b/src/main/java/org/apache/horn/utils/MNISTEvaluator.java
index ede0d3e..a41fdb3 100644
--- a/src/main/java/org/apache/horn/utils/MNISTEvaluator.java
+++ b/src/main/java/org/apache/horn/utils/MNISTEvaluator.java
@@ -21,7 +21,6 @@ import java.io.DataInputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
-import java.util.Random;
 
 import org.apache.hama.HamaConfiguration;
 import org.apache.hama.commons.math.DenseFloatVector;
@@ -60,41 +59,30 @@ public class MNISTEvaluator {
     imagesIn.readInt(); // Rows
     imagesIn.readInt(); // Cols
 
-    byte[][] images = new byte[count][PIXELS];
-    byte[] labels = new byte[count];
-    for (int n = 0; n < count; n++) {
-      imagesIn.readFully(images[n]);
-      labels[n] = labelsIn.readByte();
-    }
-
     HamaConfiguration conf = new HamaConfiguration();
     LayeredNeuralNetwork ann = new LayeredNeuralNetwork(conf, modelPath);
 
-    Random generator = new Random();
     int correct = 0;
     int total = 0;
-    for (int i = 0; i < count; i++) {
-      if (generator.nextInt(10) == 1) {
-        float[] vals = new float[PIXELS];
-        for (int j = 0; j < PIXELS; j++) {
-          vals[j] = rescale((images[i][j] & 0xff));
-        }
-        int label = (labels[i] & 0xff);
-
-        FloatVector instance = new DenseFloatVector(vals);
-        FloatVector result = ann.getOutput(instance);
-
-        if (getNumber(result) == label) {
-          correct++;
-        }
-        total++;
+
+    for (int n = 0; n < count; n++) {
+      byte[] vector = new byte[PIXELS];
+      imagesIn.readFully(vector);
+      int label = (labelsIn.readByte() & 0xff);
+
+      FloatVector instance = new DenseFloatVector(PIXELS);
+      for (int j = 0; j < PIXELS; j++) {
+        instance.set(j, rescale((vector[j] & 0xff)));
+      }
+      FloatVector result = ann.getOutput(instance);
+
+      if (getNumber(result) == label) {
+        correct++;
       }
+      total++;
     }
 
-    System.out.println(((double) correct / total * 100) + "%");
-    // TODO System.out.println("Precision = " + (tp / (tp + fp)));
-    // System.out.println("Recall = " + (tp / (tp + fn)));
-    // System.out.println("Accuracy = " + ((tp + tn) / (tp + tn + fp + fn)));
+    System.out.println("Accuracy: " + ((double) correct / total));
 
     imagesIn.close();
     labelsIn.close();

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/java/org/apache/horn/utils/MathUtils.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/utils/MathUtils.java b/src/main/java/org/apache/horn/utils/MathUtils.java
new file mode 100644
index 0000000..164e221
--- /dev/null
+++ b/src/main/java/org/apache/horn/utils/MathUtils.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.horn.utils;
+
+public class MathUtils {
+
+  public static int getBinomial(int n, double p) {
+    int x = 0;
+    for (int i = 0; i < n; i++) {
+      if (Math.random() < p)
+        x++;
+    }
+    return x;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/main/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/src/main/resources/log4j.properties b/src/main/resources/log4j.properties
index f7bed47..f2a431d 100644
--- a/src/main/resources/log4j.properties
+++ b/src/main/resources/log4j.properties
@@ -81,5 +81,5 @@ log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}:
 
 #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
 #log4j.logger.org.apache.hadoop.dfs=DEBUG
-#log4j.logger.org.apache.hama=DEBUG
+log4j.logger.org.apache.hama=ERROR
 #log4j.logger.org.apache.zookeeper=DEBUG

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/test/java/org/apache/horn/core/TestNeuron.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/horn/core/TestNeuron.java b/src/test/java/org/apache/horn/core/TestNeuron.java
index c962746..a05e531 100644
--- a/src/test/java/org/apache/horn/core/TestNeuron.java
+++ b/src/test/java/org/apache/horn/core/TestNeuron.java
@@ -71,9 +71,9 @@ public class TestNeuron extends TestCase {
 
   public void testProp() throws IOException {
     List<Synapse<FloatWritable, FloatWritable>> x = new ArrayList<Synapse<FloatWritable, FloatWritable>>();
-    x.add(new Synapse<FloatWritable, FloatWritable>(new FloatWritable(1.0f),
+    x.add(new Synapse<FloatWritable, FloatWritable>(0, new FloatWritable(1.0f),
         new FloatWritable(0.5f)));
-    x.add(new Synapse<FloatWritable, FloatWritable>(new FloatWritable(1.0f),
+    x.add(new Synapse<FloatWritable, FloatWritable>(0, new FloatWritable(1.0f),
         new FloatWritable(0.4f)));
 
     MyNeuron n = new MyNeuron();
@@ -81,7 +81,7 @@ public class TestNeuron extends TestCase {
     assertEquals(0.5249792f, n.getOutput());
 
     x.clear();
-    x.add(new Synapse<FloatWritable, FloatWritable>(new FloatWritable(
+    x.add(new Synapse<FloatWritable, FloatWritable>(0, new FloatWritable(
         -0.1274f), new FloatWritable(-1.2f)));
     n.backward(x);
   }

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java b/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
index 2e87659..74ccbb6 100644
--- a/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
+++ b/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
@@ -171,7 +171,7 @@ public class MultiLayerPerceptronTest extends HamaCluster {
       job.setTrainingSetPath(SEQTRAIN_DATA);
       job.setModelPath(MODEL_PATH);
 
-      job.setMaxIteration(1000);
+      job.setMaxIteration(1);
       job.setLearningRate(0.4f);
       job.setMomentumWeight(0.2f);
       job.setRegularizationWeight(0.001f);
@@ -181,7 +181,7 @@ public class MultiLayerPerceptronTest extends HamaCluster {
 
       job.setTrainingMethod(TrainingMethod.GRADIENT_DESCENT);
 
-      job.inputLayer(featureDimension, Sigmoid.class, StandardNeuron.class);
+      job.inputLayer(featureDimension, 0.8f);
       job.addLayer(featureDimension, Sigmoid.class, StandardNeuron.class);
       job.outputLayer(labelDimension, Sigmoid.class, StandardNeuron.class);
 

http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/d88a785b/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/src/test/resources/log4j.properties b/src/test/resources/log4j.properties
index f7bed47..6bcc0a4 100644
--- a/src/test/resources/log4j.properties
+++ b/src/test/resources/log4j.properties
@@ -81,5 +81,5 @@ log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}:
 
 #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
 #log4j.logger.org.apache.hadoop.dfs=DEBUG
-#log4j.logger.org.apache.hama=DEBUG
+#log4j.logger.org.apache.hama=ERROR
 #log4j.logger.org.apache.zookeeper=DEBUG