You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@horn.apache.org by ed...@apache.org on 2016/04/28 08:25:58 UTC
incubator-horn git commit: HORN-19: fix bugs and add dependencies
Repository: incubator-horn
Updated Branches:
refs/heads/master 56dd8a4b4 -> 1a3500f7a
HORN-19: fix bugs and add dependencies
Project: http://git-wip-us.apache.org/repos/asf/incubator-horn/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-horn/commit/1a3500f7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-horn/tree/1a3500f7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-horn/diff/1a3500f7
Branch: refs/heads/master
Commit: 1a3500f7a89bde3a5b953478bc45fcdf4f8b8e68
Parents: 56dd8a4
Author: Edward J. Yoon <ed...@apache.org>
Authored: Tue Apr 26 18:10:47 2016 +0900
Committer: Edward J. Yoon <ed...@apache.org>
Committed: Thu Apr 28 13:30:31 2016 +0900
----------------------------------------------------------------------
README.md | 4 +-
bin/horn | 28 ++--
pom.xml | 19 ++-
.../apache/horn/core/LayeredNeuralNetwork.java | 13 +-
.../horn/core/LayeredNeuralNetworkTrainer.java | 6 +-
.../horn/examples/MultiLayerPerceptron.java | 2 +-
.../org/apache/horn/utils/MNISTConverter.java | 20 ++-
.../core/TestSmallLayeredNeuralNetwork.java | 132 +++++++++++--------
.../horn/examples/MultiLayerPerceptronTest.java | 2 +-
9 files changed, 138 insertions(+), 88 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/1a3500f7/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index 651cfa2..f30ebc6 100644
--- a/README.md
+++ b/README.md
@@ -30,8 +30,8 @@ Then, we measure the margin of error of the output and adjust the weights accord
this.backpropagate(gradient);
// Weight corrections
- double weight = -learningRate * this.getOutput() * m.getDelta()
- + momentum * m.getPrevWeight();
+ double weight = -this.getLearningRate() * this.getOutput()
+ * m.getDelta() + this.getMomentumWeight() * m.getPrevWeight();
this.push(weight);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/1a3500f7/bin/horn
----------------------------------------------------------------------
diff --git a/bin/horn b/bin/horn
index 539e186..e697695 100755
--- a/bin/horn
+++ b/bin/horn
@@ -114,8 +114,8 @@ fi
# so that filenames w/ spaces are handled correctly in loops below
IFS=
-# for releases, add core hama jar to CLASSPATH
-for f in $HORN_HOME/hama-**.jar; do
+# for releases, add core horn jar to CLASSPATH
+for f in $HORN_HOME/horn-**.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
@@ -133,13 +133,13 @@ fi
if [ "$HORN_LOG_DIR" = "" ]; then
HORN_LOG_DIR="$HORN_HOME/logs"
fi
-if [ "$HAMA_LOGFILE" = "" ]; then
- HAMA_LOGFILE='hama.log'
+if [ "$HORN_LOGFILE" = "" ]; then
+ HORN_LOGFILE='horn.log'
fi
# default policy file for service-level authorization
-if [ "$HAMA_POLICYFILE" = "" ]; then
- HAMA_POLICYFILE="hama-policy.xml"
+if [ "$HORN_POLICYFILE" = "" ]; then
+ HORN_POLICYFILE="horn-policy.xml"
fi
# restore ordinary behaviour
@@ -166,17 +166,17 @@ if $cygwin; then
JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
fi
-HAMA_OPTS="$HAMA_OPTS -Dhama.log.dir=$HORN_LOG_DIR"
-HAMA_OPTS="$HAMA_OPTS -Dhama.log.file=$HAMA_LOGFILE"
-HAMA_OPTS="$HAMA_OPTS -Dhama.home.dir=$HORN_HOME"
-HAMA_OPTS="$HAMA_OPTS -Dhama.id.str=$HAMA_IDENT_STRING"
-HAMA_OPTS="$HAMA_OPTS -Dhama.root.logger=${HAMA_ROOT_LOGGER:-INFO,console}"
+HORN_OPTS="$HORN_OPTS -Dhorn.log.dir=$HORN_LOG_DIR"
+HORN_OPTS="$HORN_OPTS -Dhorn.log.file=$HORN_LOGFILE"
+HORN_OPTS="$HORN_OPTS -Dhorn.home.dir=$HORN_HOME"
+HORN_OPTS="$HORN_OPTS -Dhorn.id.str=$HORN_IDENT_STRING"
+HORN_OPTS="$HORN_OPTS -Dhorn.root.logger=${HORN_ROOT_LOGGER:-INFO,console}"
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
- HAMA_OPTS="$HAMA_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+ HORN_OPTS="$HORN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
fi
-HAMA_OPTS="$HAMA_OPTS -Dhama.policy.file=$HAMA_POLICYFILE"
+HORN_OPTS="$HORN_OPTS -Dhorn.policy.file=$HORN_POLICYFILE"
# run it
-exec "$JAVA" $JAVA_HEAP_MAX $HAMA_OPTS -classpath "$CLASSPATH" $CLASS "$@"
+exec "$JAVA" $JAVA_HEAP_MAX $HORN_OPTS -classpath "$CLASSPATH" $CLASS "$@"
http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/1a3500f7/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 5f83793..e7da3aa 100644
--- a/pom.xml
+++ b/pom.xml
@@ -44,10 +44,17 @@
<commons-httpclient.version>3.0.1</commons-httpclient.version>
<commons-io.version>2.4</commons-io.version>
<commons-collections.version>3.2.1</commons-collections.version>
+ <servlet-api.version>6.0.32</servlet-api.version>
+ <zookeeper.version>3.4.5</zookeeper.version>
</properties>
<dependencies>
<dependency>
+ <groupId>org.apache.htrace</groupId>
+ <artifactId>htrace-core</artifactId>
+ <version>3.1.0-incubating</version>
+ </dependency>
+ <dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons-logging.version}</version>
@@ -82,7 +89,17 @@
<artifactId>commons-collections</artifactId>
<version>${commons-collections.version}</version>
</dependency>
-
+ <dependency>
+ <groupId>org.apache.tomcat</groupId>
+ <artifactId>servlet-api</artifactId>
+ <version>${servlet-api.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ <version>${zookeeper.version}</version>
+ </dependency>
+
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/1a3500f7/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java b/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
index 32d6c64..c858d11 100644
--- a/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
+++ b/src/main/java/org/apache/horn/core/LayeredNeuralNetwork.java
@@ -44,7 +44,6 @@ import org.apache.hama.commons.math.DoubleFunction;
import org.apache.hama.commons.math.DoubleMatrix;
import org.apache.hama.commons.math.DoubleVector;
import org.apache.hama.util.ReflectionUtils;
-import org.apache.horn.examples.MultiLayerPerceptron.StandardNeuron;
import org.apache.horn.funcs.FunctionFactory;
import com.google.common.base.Preconditions;
@@ -106,9 +105,19 @@ public class LayeredNeuralNetwork extends AbstractLayeredNeuralNetwork {
size += 1;
}
- LOG.info("Add Layer: " + size);
this.layerSizeList.add(size);
int layerIdx = this.layerSizeList.size() - 1;
+
+ if(layerIdx == 0) {
+ LOG.info("Input Layer: " + (size - 1) + " features");
+ } else {
+ if(!isFinalLayer) {
+ LOG.info("Hidden Layer: " + (size - 1) + " neurons with 1 bias");
+ } else {
+ LOG.info("Output Layer: " + size);
+ }
+ }
+
if (isFinalLayer) {
this.finalLayerIdx = layerIdx;
}
http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/1a3500f7/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java b/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
index effd5b0..68287ad 100644
--- a/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
+++ b/src/main/java/org/apache/horn/core/LayeredNeuralNetworkTrainer.java
@@ -102,13 +102,13 @@ public final class LayeredNeuralNetworkTrainer
* conf.getInt("convergence.check.interval", 2000);
String master = peer.getPeerName();
String masterAddr = master.substring(0, master.indexOf(':'));
- int port = conf.getInt("sync.server.port", 40052);
+ int port = conf.getInt("sync.server.port", 40089);
if (isMaster(peer)) {
try {
this.merger = RPC.getServer(new ParameterMergerServer(inMemoryModel,
isConverge, slaveCount, mergeLimit, convergenceCheckInterval),
- masterAddr, port, conf);
+ masterAddr, port, slaveCount, false, conf);
merger.start();
} catch (IOException e) {
e.printStackTrace();
@@ -154,7 +154,7 @@ public final class LayeredNeuralNetworkTrainer
calculateUpdates(peer);
}
}
-
+ peer.sync();
if (isMaster(peer)) {
merger.stop();
}
http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/1a3500f7/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java b/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
index 90c9db4..c24fa16 100644
--- a/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
+++ b/src/main/java/org/apache/horn/examples/MultiLayerPerceptron.java
@@ -81,7 +81,7 @@ public class MultiLayerPerceptron {
job.setBatchSize(300);
job.inputLayer(features, Sigmoid.class, StandardNeuron.class);
- job.addLayer(features, Sigmoid.class, StandardNeuron.class);
+ job.addLayer(15, Sigmoid.class, StandardNeuron.class);
job.outputLayer(labels, Sigmoid.class, StandardNeuron.class);
job.setCostFunction(CrossEntropy.class);
http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/1a3500f7/src/main/java/org/apache/horn/utils/MNISTConverter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/horn/utils/MNISTConverter.java b/src/main/java/org/apache/horn/utils/MNISTConverter.java
index 224fc4b..3a2f8b8 100644
--- a/src/main/java/org/apache/horn/utils/MNISTConverter.java
+++ b/src/main/java/org/apache/horn/utils/MNISTConverter.java
@@ -34,12 +34,13 @@ public class MNISTConverter {
private static int PIXELS = 28 * 28;
public static void main(String[] args) throws Exception {
- if(args.length < 3) {
+ if (args.length < 3) {
System.out.println("Usage: TRAINING_DATA LABELS_DATA OUTPUT_PATH");
- System.out.println("ex) train-images.idx3-ubyte train-labels.idx1-ubyte /tmp/mnist.seq");
+ System.out
+ .println("ex) train-images.idx3-ubyte train-labels.idx1-ubyte /tmp/mnist.seq");
System.exit(1);
}
-
+
String training_data = args[0];
String labels_data = args[1];
String output = args[2];
@@ -73,15 +74,22 @@ public class MNISTConverter {
output), LongWritable.class, VectorWritable.class);
for (int i = 0; i < count; i++) {
- double[] vals = new double[PIXELS + 1];
+ double[] vals = new double[PIXELS + 10];
for (int j = 0; j < PIXELS; j++) {
vals[j] = (images[i][j] & 0xff);
}
- vals[PIXELS] = (labels[i] & 0xff);
+ int label = (labels[i] & 0xff);
+ for (int j = 0; j < 10; j++) {
+ if (j == label)
+ vals[PIXELS + j] = 1;
+ else
+ vals[PIXELS + j] = 0;
+ }
+
writer.append(new LongWritable(), new VectorWritable(
new DenseDoubleVector(vals)));
}
-
+
imagesIn.close();
labelsIn.close();
writer.close();
http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/1a3500f7/src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetwork.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetwork.java b/src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetwork.java
index 7e4328f..7e0cac9 100644
--- a/src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetwork.java
+++ b/src/test/java/org/apache/horn/core/TestSmallLayeredNeuralNetwork.java
@@ -62,11 +62,11 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
public void testReadWrite() {
LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
ann.addLayer(2, false,
- FunctionFactory.createDoubleFunction("IdentityFunction"));
+ FunctionFactory.createDoubleFunction("IdentityFunction"), null);
ann.addLayer(5, false,
- FunctionFactory.createDoubleFunction("IdentityFunction"));
+ FunctionFactory.createDoubleFunction("IdentityFunction"), null);
ann.addLayer(1, true,
- FunctionFactory.createDoubleFunction("IdentityFunction"));
+ FunctionFactory.createDoubleFunction("IdentityFunction"), null);
ann.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("SquaredError"));
double learningRate = 0.2;
@@ -74,17 +74,16 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
double momentumWeight = 0.5;
// ann.setMomemtumWeight(momentumWeight);
double regularizationWeight = 0.05;
- //ann.setRegularizationWeight(regularizationWeight);
+ // ann.setRegularizationWeight(regularizationWeight);
// intentionally initialize all weights to 0.5
DoubleMatrix[] matrices = new DenseDoubleMatrix[2];
matrices[0] = new DenseDoubleMatrix(5, 3, 0.2);
matrices[1] = new DenseDoubleMatrix(1, 6, 0.8);
ann.setWeightMatrices(matrices);
ann.setLearningStyle(LearningStyle.UNSUPERVISED);
-
+
FeatureTransformer defaultFeatureTransformer = new DefaultFeatureTransformer();
ann.setFeatureTransformer(defaultFeatureTransformer);
-
// write to file
String modelPath = "/tmp/testSmallLayeredNeuralNetworkReadWrite";
@@ -96,13 +95,14 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
}
// read from file
- LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(new HamaConfiguration(), modelPath);
+ LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(
+ new HamaConfiguration(), modelPath);
assertEquals(annCopy.getClass().getSimpleName(), annCopy.getModelType());
assertEquals(modelPath, annCopy.getModelPath());
// assertEquals(learningRate, annCopy.getLearningRate(), 0.000001);
// assertEquals(momentumWeight, annCopy.getMomemtumWeight(), 0.000001);
- //assertEquals(regularizationWeight, annCopy.getRegularizationWeight(),
- // 0.000001);
+ // assertEquals(regularizationWeight, annCopy.getRegularizationWeight(),
+ // 0.000001);
assertEquals(TrainingMethod.GRADIENT_DESCENT, annCopy.getTrainingMethod());
assertEquals(LearningStyle.UNSUPERVISED, annCopy.getLearningStyle());
@@ -117,9 +117,10 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
}
}
}
-
+
FeatureTransformer copyTransformer = annCopy.getFeatureTransformer();
- assertEquals(defaultFeatureTransformer.getClass().getName(), copyTransformer.getClass().getName());
+ assertEquals(defaultFeatureTransformer.getClass().getName(),
+ copyTransformer.getClass().getName());
}
@Test
@@ -130,11 +131,11 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
// first network
LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
ann.addLayer(2, false,
- FunctionFactory.createDoubleFunction("IdentityFunction"));
+ FunctionFactory.createDoubleFunction("IdentityFunction"), null);
ann.addLayer(5, false,
- FunctionFactory.createDoubleFunction("IdentityFunction"));
+ FunctionFactory.createDoubleFunction("IdentityFunction"), null);
ann.addLayer(1, true,
- FunctionFactory.createDoubleFunction("IdentityFunction"));
+ FunctionFactory.createDoubleFunction("IdentityFunction"), null);
ann.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("SquaredError"));
// ann.setLearningRate(0.1);
@@ -152,9 +153,12 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
// second network
LayeredNeuralNetwork ann2 = new LayeredNeuralNetwork();
- ann2.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann2.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann2.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
+ ann2.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann2.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann2.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
ann2.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("SquaredError"));
// ann2.setLearningRate(0.3);
@@ -171,9 +175,12 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
assertArrayEquals(result2, vec.toArray(), 0.000001);
LayeredNeuralNetwork ann3 = new LayeredNeuralNetwork();
- ann3.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann3.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann3.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
+ ann3.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann3.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann3.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
ann3.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("SquaredError"));
// ann3.setLearningRate(0.3);
@@ -191,9 +198,11 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
@Test
public void testXORlocal() {
LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
- ann.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
+ ann.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("SquaredError"));
// ann.setLearningRate(0.5);
@@ -228,7 +237,8 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
} catch (IOException e) {
e.printStackTrace();
}
- LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(new HamaConfiguration(), modelPath);
+ LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(
+ new HamaConfiguration(), modelPath);
// test on instances
for (int i = 0; i < instances.length; ++i) {
DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
@@ -244,9 +254,11 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
@Test
public void testXORWithMomentum() {
LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
- ann.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
+ ann.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("SquaredError"));
// ann.setLearningRate(0.6);
@@ -278,7 +290,8 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
} catch (IOException e) {
e.printStackTrace();
}
- LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(new HamaConfiguration(), modelPath);
+ LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(
+ new HamaConfiguration(), modelPath);
// test on instances
for (int i = 0; i < instances.length; ++i) {
DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
@@ -294,14 +307,16 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
@Test
public void testXORLocalWithRegularization() {
LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
- ann.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
- ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
+ ann.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"),
+ null);
+ ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("SquaredError"));
// ann.setLearningRate(0.7);
// ann.setMomemtumWeight(0.5);
- //ann.setRegularizationWeight(0.002);
+ // ann.setRegularizationWeight(0.002);
int iterations = 5000; // iteration should be set to a very large number
double[][] instances = { { 0, 1, 1 }, { 0, 0, 0 }, { 1, 0, 1 }, { 1, 1, 0 } };
@@ -329,7 +344,8 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
} catch (IOException e) {
e.printStackTrace();
}
- LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(new HamaConfiguration(), modelPath);
+ LayeredNeuralNetwork annCopy = new LayeredNeuralNetwork(
+ new HamaConfiguration(), modelPath);
// test on instances
for (int i = 0; i < instances.length; ++i) {
DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
@@ -367,7 +383,7 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
}
zeroOneNormalization(instanceList, instanceList.get(0).length - 1);
-
+
int dimension = instanceList.get(0).length - 1;
// divide dataset into training and testing
@@ -380,14 +396,14 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
// ann.setLearningRate(0.001);
// ann.setMomemtumWeight(0.1);
- //ann.setRegularizationWeight(0.01);
+ // ann.setRegularizationWeight(0.01);
ann.addLayer(dimension, false,
- FunctionFactory.createDoubleFunction("Sigmoid"));
+ FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.addLayer(dimension, false,
- FunctionFactory.createDoubleFunction("Sigmoid"));
+ FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.addLayer(dimension, false,
- FunctionFactory.createDoubleFunction("Sigmoid"));
- ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
+ FunctionFactory.createDoubleFunction("Sigmoid"), null);
+ ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("CrossEntropy"));
@@ -417,7 +433,7 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
Log.info(String.format("Relative error: %f%%\n", errorRate * 100));
}
-
+
@Test
public void testLogisticRegression() {
this.testLogisticRegressionDistributedVersion();
@@ -455,9 +471,9 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
instanceList.add(instance);
}
br.close();
-
+
zeroOneNormalization(instanceList, instanceList.get(0).length - 1);
-
+
// write training data to temporal sequence file
SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf,
tmpDatasetPath, LongWritable.class, VectorWritable.class);
@@ -488,14 +504,14 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
// ann.setLearningRate(0.7);
// ann.setMomemtumWeight(0.5);
- //ann.setRegularizationWeight(0.1);
+ // ann.setRegularizationWeight(0.1);
ann.addLayer(dimension, false,
- FunctionFactory.createDoubleFunction("Sigmoid"));
+ FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.addLayer(dimension, false,
- FunctionFactory.createDoubleFunction("Sigmoid"));
+ FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.addLayer(dimension, false,
- FunctionFactory.createDoubleFunction("Sigmoid"));
- ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
+ FunctionFactory.createDoubleFunction("Sigmoid"), null);
+ ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("CrossEntropy"));
ann.setModelPath(modelPath);
@@ -506,7 +522,7 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
trainingParameters.put("training.max.iterations", "2000");
trainingParameters.put("training.batch.size", "300");
trainingParameters.put("convergence.check.interval", "1000");
- //ann.train(new HamaConfiguration(), tmpDatasetPath, trainingParameters);
+ // ann.train(new HamaConfiguration(), tmpDatasetPath, trainingParameters);
long end = new Date().getTime();
@@ -560,9 +576,9 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
instanceList.add(instance);
}
br.close();
-
+
zeroOneNormalization(instanceList, instanceList.get(0).length - 1);
-
+
// write training data to temporal sequence file
SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf,
tmpDatasetPath, LongWritable.class, VectorWritable.class);
@@ -593,20 +609,20 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
LayeredNeuralNetwork ann = new LayeredNeuralNetwork();
// ann.setLearningRate(0.7);
// ann.setMomemtumWeight(0.5);
- //ann.setRegularizationWeight(0.1);
+ // ann.setRegularizationWeight(0.1);
ann.addLayer(dimension, false,
- FunctionFactory.createDoubleFunction("Sigmoid"));
+ FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.addLayer(dimension, false,
- FunctionFactory.createDoubleFunction("Sigmoid"));
+ FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.addLayer(dimension, false,
- FunctionFactory.createDoubleFunction("Sigmoid"));
- ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
+ FunctionFactory.createDoubleFunction("Sigmoid"), null);
+ ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"), null);
ann.setCostFunction(FunctionFactory
.createDoubleDoubleFunction("CrossEntropy"));
ann.setModelPath(modelPath);
-
+
FeatureTransformer featureTransformer = new DefaultFeatureTransformer();
-
+
ann.setFeatureTransformer(featureTransformer);
long start = new Date().getTime();
@@ -615,7 +631,7 @@ public class TestSmallLayeredNeuralNetwork extends MLTestBase {
trainingParameters.put("training.max.iterations", "2000");
trainingParameters.put("training.batch.size", "300");
trainingParameters.put("convergence.check.interval", "1000");
- //ann.train(new HamaConfiguration(), tmpDatasetPath, trainingParameters);
+ // ann.train(new HamaConfiguration(), tmpDatasetPath, trainingParameters);
long end = new Date().getTime();
http://git-wip-us.apache.org/repos/asf/incubator-horn/blob/1a3500f7/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java b/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
index fd24c4f..80a08f2 100644
--- a/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
+++ b/src/test/java/org/apache/horn/examples/MultiLayerPerceptronTest.java
@@ -126,7 +126,7 @@ public class MultiLayerPerceptronTest extends HamaCluster {
} finally {
fs.delete(new Path(RESULT_PATH), true);
fs.delete(new Path(MODEL_PATH), true);
- fs.delete(new Path(SEQTRAIN_DATA), true);
+ //fs.delete(new Path(SEQTRAIN_DATA), true);
}
}