You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by mb...@apache.org on 2018/05/21 04:57:30 UTC
systemml git commit: [SYSTEMML-2338] Fix DNN bias_multiply
correctness over sparse inputs
Repository: systemml
Updated Branches:
refs/heads/master 55ce4853c -> 73f9d417d
[SYSTEMML-2338] Fix DNN bias_multiply correctness over sparse inputs
This patch fixes incorrect index computation within the DNN-specific
bias_multiply operation over sparse inputs. Specifically the bias lookup
accessed wrong bias terms.
Project: http://git-wip-us.apache.org/repos/asf/systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/systemml/commit/73f9d417
Tree: http://git-wip-us.apache.org/repos/asf/systemml/tree/73f9d417
Diff: http://git-wip-us.apache.org/repos/asf/systemml/diff/73f9d417
Branch: refs/heads/master
Commit: 73f9d417d8c36de009b4d2071c5e5cd6ae4fcfc7
Parents: 55ce485
Author: Matthias Boehm <mb...@gmail.com>
Authored: Sun May 20 21:58:57 2018 -0700
Committer: Matthias Boehm <mb...@gmail.com>
Committed: Sun May 20 21:58:57 2018 -0700
----------------------------------------------------------------------
.../sysml/runtime/matrix/data/LibMatrixDNN.java | 25 ++++++++++----------
1 file changed, 12 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/systemml/blob/73f9d417/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java b/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java
index e980246..2d2cf63 100644
--- a/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java
+++ b/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java
@@ -377,28 +377,27 @@ public class LibMatrixDNN {
}
}
else {
+ SparseBlock sblock = outputBlock.sparseBlock;
// First delete those elements which will become zero
for(int k = 0; k < K; k++) {
if(biasArr[k] == 0) {
for(int n = 0; n < N; n++) {
- outputBlock.sparseBlock.deleteIndexRange(n, k*PQ, (k+1)*PQ);
+ if( sblock.isEmpty(n) ) continue;
+ sblock.deleteIndexRange(n, k*PQ, (k+1)*PQ);
}
}
}
// Then perform bias_multiply for non-zero bias entries
for(int n = 0; n < N; n++) {
- if( !outputBlock.sparseBlock.isEmpty(n) ) {
- int apos = outputBlock.sparseBlock.pos(n);
- int alen = outputBlock.sparseBlock.size(n);
- int[] aix = outputBlock.sparseBlock.indexes(n);
- double[] avals = outputBlock.sparseBlock.values(n);
-
- for(int j=apos; j<apos+alen; j++) {
- // Since aix[j] => KPQ
- int k = aix[j] % PQ;
- if(biasArr[k] != 0)
- avals[j] *= biasArr[k];
- }
+ if( sblock.isEmpty(n) ) continue;
+ int apos = sblock.pos(n);
+ int alen = sblock.size(n);
+ int[] aix = sblock.indexes(n);
+ double[] avals = sblock.values(n);
+ for(int j=apos; j<apos+alen; j++) {
+ int k = aix[j] / PQ; //aix[j] KPQ
+ if(biasArr[k] != 0)
+ avals[j] *= biasArr[k];
}
}
}