You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by ta...@apache.org on 2017/05/25 15:48:40 UTC

[3/4] incubator-impala git commit: IMPALA-5358: Fix repeatable table sample.

IMPALA-5358: Fix repeatable table sample.

The bug was a simple oversight where we iterated over
an unordered list of partitions.

Change-Id: I9110751b075430b068b52d7441e5845f86d1b6af
Reviewed-on: http://gerrit.cloudera.org:8080/6985
Reviewed-by: Henry Robinson <he...@cloudera.com>
Tested-by: Impala Public Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/25290a00
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/25290a00
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/25290a00

Branch: refs/heads/master
Commit: 25290a00caf28cca42f74848ca0fbeaeff03489b
Parents: 3e58bf4
Author: Alex Behm <al...@cloudera.com>
Authored: Wed May 24 18:06:44 2017 -0700
Committer: Impala Public Jenkins <im...@gerrit.cloudera.org>
Committed: Thu May 25 08:24:16 2017 +0000

----------------------------------------------------------------------
 fe/src/main/java/org/apache/impala/catalog/HdfsTable.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/25290a00/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index b7ba44f..55e4197 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -1956,7 +1956,7 @@ public class HdfsTable extends Table {
     HdfsPartition[] parts = new HdfsPartition[totalNumFiles];
     int idx = 0;
     long totalBytes = 0;
-    for (HdfsPartition part: inputParts) {
+    for (HdfsPartition part: orderedParts) {
       totalBytes += part.getSize();
       int numFds = part.getNumFileDescriptors();
       for (int fileIdx = 0; fileIdx < numFds; ++fileIdx) {