You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2017/02/07 20:59:22 UTC

[54/70] [abbrv] hive git commit: HIVE-15745: TestMiniLlapLocalCliDriver. vector_varchar_simple, vector_char_simple (Matt McCline, reviewed by Wei Zheng)

HIVE-15745: TestMiniLlapLocalCliDriver. vector_varchar_simple,vector_char_simple (Matt McCline, reviewed by Wei Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ea9e851d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ea9e851d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ea9e851d

Branch: refs/heads/hive-14535
Commit: ea9e851dec916736b28843771fe56dc3a583f126
Parents: def0cde
Author: Matt McCline <mm...@hortonworks.com>
Authored: Mon Feb 6 18:19:47 2017 -0600
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Mon Feb 6 18:19:47 2017 -0600

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/tez/ReduceRecordSource.java      | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ea9e851d/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
index d7264c2..8cd49c5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
@@ -60,6 +60,8 @@ import org.apache.tez.runtime.api.Reader;
 import org.apache.tez.runtime.library.api.KeyValueReader;
 import org.apache.tez.runtime.library.api.KeyValuesReader;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Process input from tez LogicalInput and write output - for a map plan
  * Just pump the records through the query plan.
@@ -100,7 +102,6 @@ public class ReduceRecordSource implements RecordSource {
 
   // number of columns pertaining to keys in a vectorized row batch
   private int firstValueColumnOffset;
-  private final int BATCH_SIZE = VectorizedRowBatch.DEFAULT_SIZE;
 
   private StructObjectInspector keyStructInspector;
   private StructObjectInspector valueStructInspectors;
@@ -428,6 +429,8 @@ public class ReduceRecordSource implements RecordSource {
       VectorizedBatchUtil.setRepeatingColumn(batch, i);
     }
 
+    final int maxSize = batch.getMaxSize();
+    Preconditions.checkState(maxSize > 0);
     int rowIdx = 0;
     try {
       for (Object value : values) {
@@ -444,8 +447,10 @@ public class ReduceRecordSource implements RecordSource {
           valueLazyBinaryDeserializeToRow.deserialize(batch, rowIdx);
         }
         rowIdx++;
-        if (rowIdx >= BATCH_SIZE) {
-          VectorizedBatchUtil.setBatchSize(batch, rowIdx);
+        if (rowIdx >= maxSize) {
+
+          // Batch is full.
+          batch.size = rowIdx;
           reducer.process(batch, tag);
 
           // Reset just the value columns and value buffer.