You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 00:28:22 UTC
svn commit: r1629545 [1/12] - in /hive/branches/spark-new:
itests/qtest-spark/ ql/src/java/org/apache/hadoop/hive/ql/exec/
ql/src/java/org/apache/hadoop/hive/ql/exec/mr/
ql/src/java/org/apache/hadoop/hive/ql/exec/spark/
ql/src/java/org/apache/hadoop/hi...
Author: brock
Date: Sun Oct 5 22:28:19 2014
New Revision: 1629545
URL: http://svn.apache.org/r1629545
Log:
HIVE-8353 - Merge trunk into spark 10/4/2015 (Post merge fixes)
Modified:
hive/branches/spark-new/itests/qtest-spark/pom.xml
hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket3.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket4.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/count.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/ctas.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby3.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby3_map.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby4.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby7_map.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby_position.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/having.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/innerjoin.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/input12.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/input13.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/input14.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/input17.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/input18.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/input1_limit.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/input_part2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/insert1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/insert_into1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/insert_into2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/insert_into3.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join0.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join10.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join11.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join12.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join13.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join14.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join15.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join16.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join17.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join18.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join19.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join20.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join21.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join22.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join23.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join25.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join26.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join27.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join3.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join4.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join5.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join6.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join7.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join8.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join9.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/mapreduce1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/mapreduce2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/merge1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/merge2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/multi_insert.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/order.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/order2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/parallel.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/parallel_join0.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/parallel_join1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sample1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sample2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sample3.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sample4.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sample5.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sample6.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sample7.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sample8.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sample9.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/script_pipe.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/sort.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/temp_table.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/transform1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union10.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union11.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union14.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union15.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union16.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union18.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union19.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union23.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union25.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union28.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union3.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union30.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union33.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union4.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union5.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union6.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union7.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union9.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_ppr.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_6.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
hive/branches/spark-new/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
Modified: hive/branches/spark-new/itests/qtest-spark/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/itests/qtest-spark/pom.xml?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/itests/qtest-spark/pom.xml (original)
+++ hive/branches/spark-new/itests/qtest-spark/pom.xml Sun Oct 5 22:28:19 2014
@@ -53,6 +53,38 @@
<scope>test</scope>
</dependency>
<dependency>
+ <groupId>org.apache.tez</groupId>
+ <artifactId>tez-api</artifactId>
+ <version>${tez.version}</version>
+ <optional>true</optional>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-core</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-common</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-client</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-util</artifactId>
<version>${spark.jetty.version}</version>
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java Sun Oct 5 22:28:19 2014
@@ -76,7 +76,7 @@ public class FilterOperator extends Oper
statsMap.put(Counter.FILTERED, filtered_count);
statsMap.put(Counter.PASSED, passed_count);
conditionInspector = null;
- ioContext = IOContext.get(hconf.get(Utilities.INPUT_NAME));
+ ioContext = IOContext.get(hconf);
} catch (Throwable e) {
throw new HiveException(e);
}
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java Sun Oct 5 22:28:19 2014
@@ -339,7 +339,7 @@ public class MapOperator extends Operato
}
public void setChildren(Configuration hconf) throws HiveException {
- Path fpath = IOContext.get(hconf.get(Utilities.INPUT_NAME)).getInputPath();
+ Path fpath = IOContext.get(hconf).getInputPath();
boolean schemeless = fpath.toUri().getScheme() == null;
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java Sun Oct 5 22:28:19 2014
@@ -63,7 +63,7 @@ public class ExecMapperContext {
public ExecMapperContext(JobConf jc) {
this.jc = jc;
- ioCxt = IOContext.get(jc.get(Utilities.INPUT_NAME));
+ ioCxt = IOContext.get(jc);
}
public void clear() {
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java Sun Oct 5 22:28:19 2014
@@ -63,8 +63,7 @@ public class SparkMapRecordHandler exten
private MapredLocalWork localWork = null;
private boolean isLogInfoEnabled = false;
-
- private final ExecMapperContext execContext = new ExecMapperContext();
+ private ExecMapperContext execContext;
public void init(JobConf job, OutputCollector output, Reporter reporter) {
super.init(job, output, reporter);
@@ -74,7 +73,7 @@ public class SparkMapRecordHandler exten
try {
jc = job;
- execContext.setJc(jc);
+ execContext = new ExecMapperContext(jc);
// create map and fetch operators
MapWork mrwork = (MapWork) cache.retrieve(PLAN_KEY);
if (mrwork == null) {
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java Sun Oct 5 22:28:19 2014
@@ -186,7 +186,7 @@ public class SparkReduceRecordHandler ex
} else {
ois.add(keyObjectInspector);
ois.add(valueObjectInspector[tag]);
- reducer.setGroupKeyObjectInspector(keyObjectInspector);
+ //reducer.setGroupKeyObjectInspector(keyObjectInspector);
rowObjectInspector[tag] = ObjectInspectorFactory.getStandardStructObjectInspector(
Utilities.reduceFieldNameList, ois);
}
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java Sun Oct 5 22:28:19 2014
@@ -161,7 +161,7 @@ public abstract class HiveContextAwareRe
}
public IOContext getIOContext() {
- return IOContext.get(jobConf.get(Utilities.INPUT_NAME));
+ return IOContext.get(jobConf);
}
private void initIOContext(long startPos, boolean isBlockPointer,
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java Sun Oct 5 22:28:19 2014
@@ -23,7 +23,10 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin;
import org.apache.hadoop.hive.ql.session.SessionState;
@@ -50,7 +53,15 @@ public class IOContext {
return inputNameIOContextMap;
}
- public static IOContext get(String inputName) {
+ public static IOContext get() {
+ return IOContext.threadLocal.get();
+ }
+
+ public static IOContext get(Configuration conf) {
+ if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
+ return get();
+ }
+ String inputName = conf.get(Utilities.INPUT_NAME);
if (inputNameIOContextMap.containsKey(inputName) == false) {
IOContext ioContext = new IOContext();
inputNameIOContextMap.put(inputName, ioContext);
Modified: hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java (original)
+++ hive/branches/spark-new/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java Sun Oct 5 22:28:19 2014
@@ -53,6 +53,7 @@ public class Optimizer {
public void initialize(HiveConf hiveConf) {
boolean isTezExecEngine = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez");
+ boolean isSparkExecEngine = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark");
boolean bucketMapJoinOptimizer = false;
transformations = new ArrayList<Transform>();
@@ -134,7 +135,7 @@ public class Optimizer {
if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES)) {
transformations.add(new StatsOptimizer());
}
- if (pctx.getContext().getExplain() && !isTezExecEngine) {
+ if (pctx.getContext().getExplain() && !isSparkExecEngine && !isTezExecEngine) {
transformations.add(new AnnotateWithStatistics());
transformations.add(new AnnotateWithOpTraits());
}
Modified: hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (original)
+++ hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java Sun Oct 5 22:28:19 2014
@@ -331,7 +331,7 @@ public class TestOperators extends TestC
Configuration hconf = new JobConf(TestOperators.class);
HiveConf.setVar(hconf, HiveConf.ConfVars.HADOOPMAPFILENAME,
"hdfs:///testDir/testFile");
- IOContext.get(hconf.get(Utilities.INPUT_NAME)).setInputPath(
+ IOContext.get(hconf).setInputPath(
new Path("hdfs:///testDir/testFile"));
// initialize pathToAliases
Modified: hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java (original)
+++ hive/branches/spark-new/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java Sun Oct 5 22:28:19 2014
@@ -116,7 +116,7 @@ public class TestHiveBinarySearchRecordR
private void resetIOContext() {
conf.set(Utilities.INPUT_NAME, "TestHiveBinarySearchRecordReader");
- ioContext = IOContext.get(conf.get(Utilities.INPUT_NAME));
+ ioContext = IOContext.get(conf);
ioContext.setUseSorted(false);
ioContext.setIsBinarySearching(false);
ioContext.setEndBinarySearch(false);
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out Sun Oct 5 22:28:19 2014
@@ -68,9 +68,11 @@ value string
#### A masked pattern was here ####
PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_stat
PREHOOK: Output: default@src_orc_merge_test_stat
POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_stat
POSTHOOK: Output: default@src_orc_merge_test_stat
PREHOOK: query: desc formatted src_orc_merge_test_stat
PREHOOK: type: DESCTABLE
@@ -118,9 +120,11 @@ POSTHOOK: Input: default@src_orc_merge_t
POSTHOOK: Output: default@src_orc_merge_test_stat
PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_stat
PREHOOK: Output: default@src_orc_merge_test_stat
POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_stat
POSTHOOK: Output: default@src_orc_merge_test_stat
PREHOOK: query: desc formatted src_orc_merge_test_stat
PREHOOK: type: DESCTABLE
@@ -249,8 +253,6 @@ Protect Mode: None
Partition Parameters:
COLUMN_STATS_ACCURATE true
numFiles 3
- numRows -1
- rawDataSize -1
totalSize 7488
#### A masked pattern was here ####
@@ -266,10 +268,12 @@ Storage Desc Params:
serialization.format 1
PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part_stat
PREHOOK: Output: default@src_orc_merge_test_part_stat
PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
POSTHOOK: Output: default@src_orc_merge_test_part_stat
POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
@@ -323,10 +327,12 @@ POSTHOOK: Input: default@src_orc_merge_t
POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part_stat
PREHOOK: Output: default@src_orc_merge_test_part_stat
PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
POSTHOOK: Output: default@src_orc_merge_test_part_stat
POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket2.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket2.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket2.q.out Sun Oct 5 22:28:19 2014 differ
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket3.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket3.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket3.q.out Sun Oct 5 22:28:19 2014 differ
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket4.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket4.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/spark/bucket4.q.out Sun Oct 5 22:28:19 2014 differ
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/column_access_stats.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/column_access_stats.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/spark/column_access_stats.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/spark/column_access_stats.q.out Sun Oct 5 22:28:19 2014
@@ -83,36 +83,19 @@ PREHOOK: query: -- More complicated sele
EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1
PREHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Spark
-#### A masked pattern was here ####
- Vertices:
- Map 1
- Map Operator Tree:
- TableScan
- alias: t1
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: key (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
- ListSink
+ TableScan
+ alias: t1
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ ListSink
PREHOOK: query: SELECT key FROM (SELECT key, val FROM T1) subq1
PREHOOK: type: QUERY
@@ -130,36 +113,19 @@ Columns:key
PREHOOK: query: EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1) subq1
PREHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Spark
-#### A masked pattern was here ####
- Vertices:
- Map 1
- Map Operator Tree:
- TableScan
- alias: t1
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: key (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
- ListSink
+ TableScan
+ alias: t1
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ ListSink
PREHOOK: query: SELECT k FROM (SELECT key as k, val as v FROM T1) subq1
PREHOOK: type: QUERY
@@ -418,28 +384,22 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: t2
- Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: key is not null (type: boolean)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: key (type: string)
sort order: +
Map-reduce partition columns: key (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Map 3
Map Operator Tree:
TableScan
alias: t1
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: key is not null (type: boolean)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: key (type: string)
sort order: +
Map-reduce partition columns: key (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Join Operator
@@ -449,14 +409,11 @@ STAGE PLANS:
0 {KEY.reducesinkkey0}
1
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Select Operator
expressions: _col0 (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -541,28 +498,22 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: t2
- Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: ((val = 3) and key is not null) (type: boolean)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: key (type: string)
sort order: +
Map-reduce partition columns: key (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Map 3
Map Operator Tree:
TableScan
alias: t1
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: ((val = 3) and key is not null) (type: boolean)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: key (type: string)
sort order: +
Map-reduce partition columns: key (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Join Operator
@@ -572,14 +523,11 @@ STAGE PLANS:
0 {KEY.reducesinkkey0}
1 {KEY.reducesinkkey0}
outputColumnNames: _col0, _col5
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Select Operator
expressions: _col0 (type: string), '3' (type: string), _col5 (type: string), '3' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -631,36 +579,28 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: t2
- Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: ((key = 6) and val is not null) (type: boolean)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Select Operator
expressions: val (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Map 3
Map Operator Tree:
TableScan
alias: t1
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: ((key = 5) and val is not null) (type: boolean)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Select Operator
expressions: val (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Join Operator
@@ -670,14 +610,11 @@ STAGE PLANS:
0 {KEY.reducesinkkey0}
1
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Select Operator
expressions: _col0 (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -744,50 +681,39 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: t3
- Statistics: Num rows: 0 Data size: 35 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: key is not null (type: boolean)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: key (type: string)
sort order: +
Map-reduce partition columns: key (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
value expressions: val (type: string)
Map 3
Map Operator Tree:
TableScan
alias: t2
- Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: key is not null (type: boolean)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Map 5
Map Operator Tree:
TableScan
alias: t1
- Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: key is not null (type: boolean)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Join Operator
@@ -797,14 +723,11 @@ STAGE PLANS:
0 {KEY.reducesinkkey0}
1 {KEY.reducesinkkey0} {VALUE._col0}
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -818,16 +741,13 @@ STAGE PLANS:
0 {KEY.reducesinkkey0}
1
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Select Operator
expressions: _col0 (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
Stage: Stage-0
Fetch Operator
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/count.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/count.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/spark/count.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/spark/count.q.out Sun Oct 5 22:28:19 2014
@@ -48,22 +48,18 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: abcd
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: a (type: int), b (type: int), c (type: int), d (type: int)
outputColumnNames: a, b, c, d
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(DISTINCT b), count(DISTINCT c), sum(d)
keys: a (type: int), b (type: int), c (type: int)
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
sort order: +++
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
value expressions: _col5 (type: bigint)
Reducer 2
Reduce Operator Tree:
@@ -72,14 +68,11 @@ STAGE PLANS:
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -122,21 +115,17 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: abcd
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: a (type: int), b (type: int), c (type: int), d (type: int)
outputColumnNames: a, b, c, d
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(1), count(), count(a), count(b), count(c), count(d), count(DISTINCT a), count(DISTINCT b), count(DISTINCT c), count(DISTINCT d), count(DISTINCT a, b), count(DISTINCT b, c), count(DISTINCT c, d), count(DISTINCT a, d), count(DISTINCT a, c), count(DISTINCT b, d), count(DISTINCT a, b, c), count(DISTINCT b, c, d), count(DISTINCT a, c, d), count(DISTINCT a, b, d), count(DISTINCT a, b, c, d)
keys: a (type: int), b (type: int), c (type: int), d (type: int)
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: int)
sort order: ++++
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
value expressions: _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint)
Reducer 2
Reduce Operator Tree:
@@ -144,14 +133,11 @@ STAGE PLANS:
aggregations: count(VALUE._col0), count(VALUE._col1), count(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY.
_col0:14._col3)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: bigint), _col19 (type: bigint), _col20 (type: bigint)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -191,16 +177,13 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: abcd
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: a (type: int), b (type: int), c (type: int), d (type: int)
outputColumnNames: a, b, c, d
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: a (type: int), b (type: int), c (type: int)
sort order: +++
Map-reduce partition columns: a (type: int)
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
value expressions: d (type: int)
Reducer 2
Reduce Operator Tree:
@@ -209,14 +192,11 @@ STAGE PLANS:
keys: KEY._col0 (type: int)
mode: complete
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -259,29 +239,23 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: abcd
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: a (type: int), b (type: int), c (type: int), d (type: int)
outputColumnNames: a, b, c, d
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: a (type: int), b (type: int), c (type: int), d (type: int)
sort order: ++++
- Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Group By Operator
aggregations: count(1), count(), count(KEY._col0:0._col0), count(KEY._col0:1._col0), count(KEY._col0:2._col0), count(KEY._col0:3._col0), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, K
EY._col0:14._col3)
mode: complete
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
- Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: bigint), _col19 (type: bigint), _col20 (type: bigint)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
- Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/ctas.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/ctas.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/spark/ctas.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/spark/ctas.q.out Sun Oct 5 22:28:19 2014
@@ -41,40 +41,31 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Reducer 3
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -190,40 +181,31 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Reducer 3
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -339,40 +321,31 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: (key / 2) (type: double), concat(value, '_con') (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: double), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: double), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Reducer 3
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -553,40 +526,31 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Reducer 3
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -745,16 +709,13 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
tag: -1
auto parallelism: true
Path -> Alias:
@@ -812,14 +773,11 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
tag: -1
auto parallelism: false
Reducer 3
@@ -828,16 +786,13 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 10
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
GlobalTableId: 1
#### A masked pattern was here ####
NumFilesPerFileSink: 1
- Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
#### A masked pattern was here ####
table:
input format: org.apache.hadoop.mapred.TextInputFormat
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out Sun Oct 5 22:28:19 2014 differ
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out Sun Oct 5 22:28:19 2014
@@ -21,25 +21,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -72,25 +67,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out Sun Oct 5 22:28:19 2014
@@ -21,25 +21,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: string), _col1 (type: string)
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -72,25 +67,20 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
sort order:
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: string), _col1 (type: string)
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out?rev=1629545&r1=1629544&r2=1629545&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out Sun Oct 5 22:28:19 2014
@@ -21,24 +21,19 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -71,24 +66,19 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reducer 2
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat