You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kh...@apache.org on 2014/08/20 00:41:13 UTC
svn commit: r1619005 [8/9] - in /hive/trunk: ./ accumulo-handler/
accumulo-handler/src/ accumulo-handler/src/java/
accumulo-handler/src/java/org/ accumulo-handler/src/java/org/apache/
accumulo-handler/src/java/org/apache/hadoop/ accumulo-handler/src/ja...
Added: hive/trunk/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestDefaultAccumuloRowIdFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestDefaultAccumuloRowIdFactory.java?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestDefaultAccumuloRowIdFactory.java (added)
+++ hive/trunk/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestDefaultAccumuloRowIdFactory.java Tue Aug 19 22:41:10 2014
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.accumulo.serde;
+
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding;
+import org.apache.hadoop.hive.accumulo.columns.ColumnMapper;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase;
+import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.SerDeParameters;
+import org.apache.hadoop.hive.serde2.lazy.LazyString;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyIntObjectInspector;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyPrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyStringObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaStringObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class TestDefaultAccumuloRowIdFactory {
+
+ @Test
+ public void testCorrectPrimitiveInspectors() throws SerDeException {
+ AccumuloSerDe accumuloSerDe = new AccumuloSerDe();
+
+ Properties properties = new Properties();
+ Configuration conf = new Configuration();
+ properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:cq");
+ properties.setProperty(serdeConstants.LIST_COLUMNS, "row,col");
+ properties.setProperty(serdeConstants.LIST_COLUMN_TYPES,
+ "string,int");
+
+ accumuloSerDe.initialize(conf, properties);
+
+ AccumuloRowIdFactory factory = accumuloSerDe.getParams().getRowIdFactory();
+ List<TypeInfo> columnTypes = accumuloSerDe.getParams().getHiveColumnTypes();
+ ColumnMapper mapper = accumuloSerDe.getParams().getColumnMapper();
+ SerDeParameters serDeParams = accumuloSerDe.getParams().getSerDeParameters();
+
+ List<ObjectInspector> OIs = accumuloSerDe.getColumnObjectInspectors(columnTypes, serDeParams, mapper.getColumnMappings(), factory);
+
+ Assert.assertEquals(2, OIs.size());
+ Assert.assertEquals(LazyStringObjectInspector.class, OIs.get(0).getClass());
+ Assert.assertEquals(LazyIntObjectInspector.class, OIs.get(1).getClass());
+ }
+
+ @Test
+ public void testCorrectComplexInspectors() throws SerDeException {
+ AccumuloSerDe accumuloSerDe = new AccumuloSerDe();
+
+ Properties properties = new Properties();
+ Configuration conf = new Configuration();
+ properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:cq");
+ properties.setProperty(serdeConstants.LIST_COLUMNS, "row,col");
+ properties.setProperty(serdeConstants.LIST_COLUMN_TYPES,
+ "struct<col1:int,col2:int>,map<string,string>");
+
+ accumuloSerDe.initialize(conf, properties);
+
+ AccumuloRowIdFactory factory = accumuloSerDe.getParams().getRowIdFactory();
+ List<TypeInfo> columnTypes = accumuloSerDe.getParams().getHiveColumnTypes();
+ ColumnMapper mapper = accumuloSerDe.getParams().getColumnMapper();
+ SerDeParameters serDeParams = accumuloSerDe.getParams().getSerDeParameters();
+
+ List<ObjectInspector> OIs = accumuloSerDe.getColumnObjectInspectors(columnTypes, serDeParams, mapper.getColumnMappings(), factory);
+
+ // Expect the correct OIs
+ Assert.assertEquals(2, OIs.size());
+ Assert.assertEquals(LazySimpleStructObjectInspector.class, OIs.get(0).getClass());
+ Assert.assertEquals(LazyMapObjectInspector.class, OIs.get(1).getClass());
+
+ LazySimpleStructObjectInspector structOI = (LazySimpleStructObjectInspector) OIs.get(0);
+ Assert.assertEquals(2, (int) structOI.getSeparator());
+
+ LazyMapObjectInspector mapOI = (LazyMapObjectInspector) OIs.get(1);
+ Assert.assertEquals(2, (int) mapOI.getItemSeparator());
+ Assert.assertEquals(3, (int) mapOI.getKeyValueSeparator());
+ }
+
+ @Test
+ public void testBinaryStringRowId() throws SerDeException {
+ AccumuloSerDe accumuloSerDe = new AccumuloSerDe();
+
+ Properties properties = new Properties();
+ Configuration conf = new Configuration();
+ properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:cq");
+ properties.setProperty(serdeConstants.LIST_COLUMNS, "row,col");
+ properties.setProperty(serdeConstants.LIST_COLUMN_TYPES,
+ "string,string");
+ properties.setProperty(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE, ColumnEncoding.BINARY.getName());
+
+ accumuloSerDe.initialize(conf, properties);
+
+ DefaultAccumuloRowIdFactory rowIdFactory = new DefaultAccumuloRowIdFactory();
+ rowIdFactory.init(accumuloSerDe.getParams(), properties);
+
+ LazyStringObjectInspector oi = LazyPrimitiveObjectInspectorFactory.getLazyStringObjectInspector(false, (byte) '\\');
+ LazyObjectBase lazyObj = rowIdFactory.createRowId(oi);
+ Assert.assertNotNull(lazyObj);
+ Assert.assertTrue(LazyString.class.isAssignableFrom(lazyObj.getClass()));
+ }
+
+}
Added: hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_custom_key.q
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_custom_key.q?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_custom_key.q (added)
+++ hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_custom_key.q Tue Aug 19 22:41:10 2014
@@ -0,0 +1,22 @@
+CREATE TABLE accumulo_ck_1(key struct<col1:string,col2:string,col3:string>, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+ "accumulo.table.name" = "accumulo_custom",
+ "accumulo.columns.mapping" = ":rowid,cf:string",
+ "accumulo.composite.rowid.factory"="org.apache.hadoop.hive.accumulo.serde.DelimitedAccumuloRowIdFactory",
+ "accumulo.composite.delimiter" = "$");
+
+CREATE EXTERNAL TABLE accumulo_ck_2(key string, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+ "accumulo.table.name" = "accumulo_custom",
+ "accumulo.columns.mapping" = ":rowid,cf:string");
+
+insert overwrite table accumulo_ck_1 select struct('1000','2000','3000'),'value'
+from src where key = 100;
+
+select * from accumulo_ck_1;
+select * from accumulo_ck_2;
+
+DROP TABLE accumulo_ck_1;
+DROP TABLE accumulo_ck_2;
Added: hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_custom_key2.q
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_custom_key2.q?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_custom_key2.q (added)
+++ hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_custom_key2.q Tue Aug 19 22:41:10 2014
@@ -0,0 +1,13 @@
+CREATE TABLE accumulo_ck_3(key struct<col1:string,col2:string,col3:string>, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+ "accumulo.table.name" = "accumulo_custom2",
+ "accumulo.columns.mapping" = ":rowid,cf:string",
+ "accumulo.composite.rowid"="org.apache.hadoop.hive.accumulo.serde.FirstCharAccumuloCompositeRowId");
+
+insert overwrite table accumulo_ck_3 select struct('abcd','mnop','wxyz'),'value'
+from src where key = 100;
+
+select * from accumulo_ck_3;
+
+DROP TABLE accumulo_ck_3;
Added: hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_joins.q
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_joins.q?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_joins.q (added)
+++ hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_joins.q Tue Aug 19 22:41:10 2014
@@ -0,0 +1,82 @@
+DROP TABLE users;
+DROP TABLE states;
+DROP TABLE countries;
+DROP TABLE users_level;
+
+-- From HIVE-1257
+
+CREATE TABLE users(key string, state string, country string, country_id int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,info:state,info:country,info:country_id"
+);
+
+CREATE TABLE states(key string, name string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,state:name"
+);
+
+CREATE TABLE countries(key string, name string, country string, country_id int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,info:name,info:country,info:country_id"
+);
+
+INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0
+FROM src WHERE key=100;
+
+INSERT OVERWRITE TABLE states SELECT 'IA', 'Iowa'
+FROM src WHERE key=100;
+
+INSERT OVERWRITE TABLE countries SELECT 'USA', 'United States', 'USA', 1
+FROM src WHERE key=100;
+
+set hive.input.format = org.apache.hadoop.hive.ql.io.HiveInputFormat;
+
+SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.key);
+
+SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.country);
+
+SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country_id = c.country_id);
+
+SELECT u.key, u.state, s.name FROM users u JOIN states s
+ON (u.state = s.key);
+
+set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
+
+SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.key);
+
+SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.country);
+
+SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country_id = c.country_id);
+
+SELECT u.key, u.state, s.name FROM users u JOIN states s
+ON (u.state = s.key);
+
+DROP TABLE users;
+DROP TABLE states;
+DROP TABLE countries;
+
+CREATE TABLE users(key int, userid int, username string, created int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:nickname,f:created");
+
+CREATE TABLE users_level(key int, userid int, level int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:level");
+
+-- HIVE-1903: the problem fixed here showed up even without any data,
+-- so no need to load any to test it
+SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num
+ FROM users JOIN users_level ON (users.userid = users_level.userid)
+ GROUP BY year(from_unixtime(users.created)), level;
+
+DROP TABLE users;
+DROP TABLE users_level;
Added: hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_predicate_pushdown.q
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_predicate_pushdown.q?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_predicate_pushdown.q (added)
+++ hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_predicate_pushdown.q Tue Aug 19 22:41:10 2014
@@ -0,0 +1,70 @@
+CREATE TABLE accumulo_pushdown(key string, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid,cf:string");
+
+INSERT OVERWRITE TABLE accumulo_pushdown
+SELECT cast(key as string), value
+FROM src;
+
+-- with full pushdown
+explain select * from accumulo_pushdown where key>'90';
+
+select * from accumulo_pushdown where key>'90';
+select * from accumulo_pushdown where key<'1';
+select * from accumulo_pushdown where key<='2';
+select * from accumulo_pushdown where key>='90';
+
+-- with constant expression
+explain select * from accumulo_pushdown where key>=cast(40 + 50 as string);
+select * from accumulo_pushdown where key>=cast(40 + 50 as string);
+
+-- with partial pushdown
+
+explain select * from accumulo_pushdown where key>'90' and value like '%9%';
+
+select * from accumulo_pushdown where key>'90' and value like '%9%';
+
+-- with two residuals
+
+explain select * from accumulo_pushdown
+where key>='90' and value like '%9%' and key=cast(value as int);
+
+select * from accumulo_pushdown
+where key>='90' and value like '%9%' and key=cast(value as int);
+
+
+-- with contradictory pushdowns
+
+explain select * from accumulo_pushdown
+where key<'80' and key>'90' and value like '%90%';
+
+select * from accumulo_pushdown
+where key<'80' and key>'90' and value like '%90%';
+
+-- with nothing to push down
+
+explain select * from accumulo_pushdown;
+
+-- with a predicate which is not actually part of the filter, so
+-- it should be ignored by pushdown
+
+explain select * from accumulo_pushdown
+where (case when key<'90' then 2 else 4 end) > 3;
+
+-- with a predicate which is under an OR, so it should
+-- be ignored by pushdown
+
+explain select * from accumulo_pushdown
+where key<='80' or value like '%90%';
+
+explain select * from accumulo_pushdown where key > '281'
+and key < '287';
+
+select * from accumulo_pushdown where key > '281'
+and key < '287';
+
+set hive.optimize.ppd.storage=false;
+
+-- with pushdown disabled
+
+explain select * from accumulo_pushdown where key<='90';
Added: hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_queries.q
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_queries.q?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_queries.q (added)
+++ hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_queries.q Tue Aug 19 22:41:10 2014
@@ -0,0 +1,158 @@
+DROP TABLE accumulo_table_1;
+CREATE TABLE accumulo_table_1(key int, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string")
+TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0");
+
+DESCRIBE EXTENDED accumulo_table_1;
+
+select * from accumulo_table_1;
+
+EXPLAIN FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0;
+FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0;
+
+DROP TABLE accumulo_table_2;
+CREATE EXTERNAL TABLE accumulo_table_2(key int, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string")
+TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0");
+
+EXPLAIN
+SELECT Y.*
+FROM
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN
+(SELECT src.* FROM src) Y
+ON (x.key = Y.key)
+ORDER BY key, value LIMIT 20;
+
+SELECT Y.*
+FROM
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN
+(SELECT src.* FROM src) Y
+ON (x.key = Y.key)
+ORDER BY key, value LIMIT 20;
+
+EXPLAIN
+SELECT Y.*
+FROM
+(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x
+JOIN
+(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y
+ON (x.key = Y.key)
+ORDER BY key, value;
+
+SELECT Y.*
+FROM
+(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x
+JOIN
+(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y
+ON (x.key = Y.key)
+ORDER BY key,value;
+
+DROP TABLE empty_accumulo_table;
+CREATE TABLE empty_accumulo_table(key int, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string");
+
+DROP TABLE empty_normal_table;
+CREATE TABLE empty_normal_table(key int, value string);
+
+select * from (select count(1) as c from empty_normal_table union all select count(1) as c from empty_accumulo_table) x order by c;
+select * from (select count(1) c from empty_normal_table union all select count(1) as c from accumulo_table_1) x order by c;
+select * from (select count(1) c from src union all select count(1) as c from empty_accumulo_table) x order by c;
+select * from (select count(1) c from src union all select count(1) as c from accumulo_table_1) x order by c;
+
+CREATE TABLE accumulo_table_3(key int, value string, count int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,cf:val,cf2:count"
+);
+
+EXPLAIN
+INSERT OVERWRITE TABLE accumulo_table_3
+SELECT x.key, x.value, Y.count
+FROM
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN
+(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y
+ON (x.key = Y.key);
+
+INSERT OVERWRITE TABLE accumulo_table_3
+SELECT x.key, x.value, Y.count
+FROM
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN
+(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y
+ON (x.key = Y.key);
+
+select count(1) from accumulo_table_3;
+select * from accumulo_table_3 order by key, value limit 5;
+select key, count from accumulo_table_3 order by key, count desc limit 5;
+
+DROP TABLE accumulo_table_4;
+CREATE TABLE accumulo_table_4(key int, value1 string, value2 int, value3 int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e"
+);
+
+INSERT OVERWRITE TABLE accumulo_table_4 SELECT key, value, key+1, key+2
+FROM src WHERE key=98 OR key=100;
+
+SELECT * FROM accumulo_table_4 ORDER BY key;
+
+DROP TABLE accumulo_table_5;
+CREATE EXTERNAL TABLE accumulo_table_5(key int, value map<string,string>)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,a:*")
+TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_4");
+
+SELECT * FROM accumulo_table_5 ORDER BY key;
+
+DROP TABLE accumulo_table_6;
+CREATE TABLE accumulo_table_6(key int, value map<string,string>)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,cf:*"
+);
+INSERT OVERWRITE TABLE accumulo_table_6 SELECT key, map(value, key) FROM src
+WHERE key=98 OR key=100;
+
+SELECT * FROM accumulo_table_6 ORDER BY key;
+
+DROP TABLE accumulo_table_7;
+CREATE TABLE accumulo_table_7(value map<string,string>, key int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = "cf:*,:rowID"
+);
+INSERT OVERWRITE TABLE accumulo_table_7
+SELECT map(value, key, upper(value), key+1), key FROM src
+WHERE key=98 OR key=100;
+
+SELECT * FROM accumulo_table_7 ORDER BY key;
+
+DROP TABLE accumulo_table_8;
+CREATE TABLE accumulo_table_8(key int, value1 string, value2 int, value3 int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e"
+);
+
+INSERT OVERWRITE TABLE accumulo_table_8 SELECT key, value, key+1, key+2
+FROM src WHERE key=98 OR key=100;
+
+SELECT * FROM accumulo_table_8 ORDER BY key;
+
+DROP TABLE accumulo_table_1;
+DROP TABLE accumulo_table_2;
+DROP TABLE accumulo_table_3;
+DROP TABLE accumulo_table_4;
+DROP TABLE accumulo_table_5;
+DROP TABLE accumulo_table_6;
+DROP TABLE accumulo_table_7;
+DROP TABLE accumulo_table_8;
+DROP TABLE empty_accumulo_table;
+DROP TABLE empty_normal_table;
Added: hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_single_sourced_multi_insert.q
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_single_sourced_multi_insert.q?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_single_sourced_multi_insert.q (added)
+++ hive/trunk/accumulo-handler/src/test/queries/positive/accumulo_single_sourced_multi_insert.q Tue Aug 19 22:41:10 2014
@@ -0,0 +1,24 @@
+-- HIVE-4375 Single sourced multi insert consists of native and non-native table mixed throws NPE
+CREATE TABLE src_x1(key string, value string);
+CREATE TABLE src_x2(key string, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid, cf:value");
+
+explain
+from src a
+insert overwrite table src_x1
+select key,"" where a.key > 0 AND a.key < 50
+insert overwrite table src_x2
+select value,"" where a.key > 50 AND a.key < 100;
+
+from src a
+insert overwrite table src_x1
+select key,"" where a.key > 0 AND a.key < 50
+insert overwrite table src_x2
+select value,"" where a.key > 50 AND a.key < 100;
+
+select * from src_x1 order by key;
+select * from src_x2 order by key;
+
+DROP TABLE src_x1;
+DROP TABLE src_x2;
Added: hive/trunk/accumulo-handler/src/test/results/positive/accumulo_custom_key.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/results/positive/accumulo_custom_key.q.out?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/results/positive/accumulo_custom_key.q.out (added)
+++ hive/trunk/accumulo-handler/src/test/results/positive/accumulo_custom_key.q.out Tue Aug 19 22:41:10 2014
@@ -0,0 +1,80 @@
+PREHOOK: query: CREATE TABLE accumulo_ck_1(key struct<col1:string,col2:string,col3:string>, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+ "accumulo.table.name" = "accumulo_custom",
+ "accumulo.columns.mapping" = ":rowid,cf:string",
+ "accumulo.composite.rowid.factory"="org.apache.hadoop.hive.accumulo.serde.DelimitedAccumuloRowIdFactory",
+ "accumulo.composite.delimiter" = "$")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_ck_1
+POSTHOOK: query: CREATE TABLE accumulo_ck_1(key struct<col1:string,col2:string,col3:string>, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+ "accumulo.table.name" = "accumulo_custom",
+ "accumulo.columns.mapping" = ":rowid,cf:string",
+ "accumulo.composite.rowid.factory"="org.apache.hadoop.hive.accumulo.serde.DelimitedAccumuloRowIdFactory",
+ "accumulo.composite.delimiter" = "$")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_ck_1
+PREHOOK: query: CREATE EXTERNAL TABLE accumulo_ck_2(key string, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+ "accumulo.table.name" = "accumulo_custom",
+ "accumulo.columns.mapping" = ":rowid,cf:string")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_ck_2
+POSTHOOK: query: CREATE EXTERNAL TABLE accumulo_ck_2(key string, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+ "accumulo.table.name" = "accumulo_custom",
+ "accumulo.columns.mapping" = ":rowid,cf:string")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_ck_2
+PREHOOK: query: insert overwrite table accumulo_ck_1 select struct('1000','2000','3000'),'value'
+from src where key = 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@accumulo_ck_1
+POSTHOOK: query: insert overwrite table accumulo_ck_1 select struct('1000','2000','3000'),'value'
+from src where key = 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@accumulo_ck_1
+PREHOOK: query: select * from accumulo_ck_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_ck_1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_ck_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_ck_1
+#### A masked pattern was here ####
+{"col1":"1000","col2":"2000","col3":"3000"} value
+PREHOOK: query: select * from accumulo_ck_2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_ck_2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_ck_2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_ck_2
+#### A masked pattern was here ####
+1000$2000$3000 value
+PREHOOK: query: DROP TABLE accumulo_ck_1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_ck_1
+PREHOOK: Output: default@accumulo_ck_1
+POSTHOOK: query: DROP TABLE accumulo_ck_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_ck_1
+POSTHOOK: Output: default@accumulo_ck_1
+PREHOOK: query: DROP TABLE accumulo_ck_2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_ck_2
+PREHOOK: Output: default@accumulo_ck_2
+POSTHOOK: query: DROP TABLE accumulo_ck_2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_ck_2
+POSTHOOK: Output: default@accumulo_ck_2
Added: hive/trunk/accumulo-handler/src/test/results/positive/accumulo_custom_key2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/results/positive/accumulo_custom_key2.q.out?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/results/positive/accumulo_custom_key2.q.out (added)
+++ hive/trunk/accumulo-handler/src/test/results/positive/accumulo_custom_key2.q.out Tue Aug 19 22:41:10 2014
@@ -0,0 +1,45 @@
+PREHOOK: query: CREATE TABLE accumulo_ck_3(key struct<col1:string,col2:string,col3:string>, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+ "accumulo.table.name" = "accumulo_custom2",
+ "accumulo.columns.mapping" = ":rowid,cf:string",
+ "accumulo.composite.rowid"="org.apache.hadoop.hive.accumulo.serde.FirstCharAccumuloCompositeRowId")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_ck_3
+POSTHOOK: query: CREATE TABLE accumulo_ck_3(key struct<col1:string,col2:string,col3:string>, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+ "accumulo.table.name" = "accumulo_custom2",
+ "accumulo.columns.mapping" = ":rowid,cf:string",
+ "accumulo.composite.rowid"="org.apache.hadoop.hive.accumulo.serde.FirstCharAccumuloCompositeRowId")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_ck_3
+PREHOOK: query: insert overwrite table accumulo_ck_3 select struct('abcd','mnop','wxyz'),'value'
+from src where key = 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@accumulo_ck_3
+POSTHOOK: query: insert overwrite table accumulo_ck_3 select struct('abcd','mnop','wxyz'),'value'
+from src where key = 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@accumulo_ck_3
+PREHOOK: query: select * from accumulo_ck_3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_ck_3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_ck_3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_ck_3
+#### A masked pattern was here ####
+{"col1":"a","col2":"m","col3":"w"} value
+PREHOOK: query: DROP TABLE accumulo_ck_3
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_ck_3
+PREHOOK: Output: default@accumulo_ck_3
+POSTHOOK: query: DROP TABLE accumulo_ck_3
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_ck_3
+POSTHOOK: Output: default@accumulo_ck_3
Added: hive/trunk/accumulo-handler/src/test/results/positive/accumulo_joins.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/results/positive/accumulo_joins.q.out?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/results/positive/accumulo_joins.q.out (added)
+++ hive/trunk/accumulo-handler/src/test/results/positive/accumulo_joins.q.out Tue Aug 19 22:41:10 2014
@@ -0,0 +1,282 @@
+PREHOOK: query: DROP TABLE users
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE users
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE states
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE states
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE countries
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE countries
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE users_level
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE users_level
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: -- From HIVE-1257
+
+CREATE TABLE users(key string, state string, country string, country_id int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,info:state,info:country,info:country_id"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@users
+POSTHOOK: query: -- From HIVE-1257
+
+CREATE TABLE users(key string, state string, country string, country_id int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,info:state,info:country,info:country_id"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@users
+PREHOOK: query: CREATE TABLE states(key string, name string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,state:name"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@states
+POSTHOOK: query: CREATE TABLE states(key string, name string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,state:name"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@states
+PREHOOK: query: CREATE TABLE countries(key string, name string, country string, country_id int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,info:name,info:country,info:country_id"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@countries
+POSTHOOK: query: CREATE TABLE countries(key string, name string, country string, country_id int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,info:name,info:country,info:country_id"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@countries
+PREHOOK: query: INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0
+FROM src WHERE key=100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@users
+POSTHOOK: query: INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0
+FROM src WHERE key=100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@users
+PREHOOK: query: INSERT OVERWRITE TABLE states SELECT 'IA', 'Iowa'
+FROM src WHERE key=100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@states
+POSTHOOK: query: INSERT OVERWRITE TABLE states SELECT 'IA', 'Iowa'
+FROM src WHERE key=100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@states
+PREHOOK: query: INSERT OVERWRITE TABLE countries SELECT 'USA', 'United States', 'USA', 1
+FROM src WHERE key=100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@countries
+POSTHOOK: query: INSERT OVERWRITE TABLE countries SELECT 'USA', 'United States', 'USA', 1
+FROM src WHERE key=100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@countries
+PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@countries
+PREHOOK: Input: default@users
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@countries
+POSTHOOK: Input: default@users
+#### A masked pattern was here ####
+user1 USA United States USA
+PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.country)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@countries
+PREHOOK: Input: default@users
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.country)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@countries
+POSTHOOK: Input: default@users
+#### A masked pattern was here ####
+user1 USA United States USA
+PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country_id = c.country_id)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@countries
+PREHOOK: Input: default@users
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country_id = c.country_id)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@countries
+POSTHOOK: Input: default@users
+#### A masked pattern was here ####
+PREHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s
+ON (u.state = s.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@states
+PREHOOK: Input: default@users
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s
+ON (u.state = s.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@states
+POSTHOOK: Input: default@users
+#### A masked pattern was here ####
+user1 IA Iowa
+PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@countries
+PREHOOK: Input: default@users
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@countries
+POSTHOOK: Input: default@users
+#### A masked pattern was here ####
+user1 USA United States USA
+PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.country)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@countries
+PREHOOK: Input: default@users
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country = c.country)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@countries
+POSTHOOK: Input: default@users
+#### A masked pattern was here ####
+user1 USA United States USA
+PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country_id = c.country_id)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@countries
+PREHOOK: Input: default@users
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c
+ON (u.country_id = c.country_id)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@countries
+POSTHOOK: Input: default@users
+#### A masked pattern was here ####
+PREHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s
+ON (u.state = s.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@states
+PREHOOK: Input: default@users
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s
+ON (u.state = s.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@states
+POSTHOOK: Input: default@users
+#### A masked pattern was here ####
+user1 IA Iowa
+PREHOOK: query: DROP TABLE users
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@users
+PREHOOK: Output: default@users
+POSTHOOK: query: DROP TABLE users
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@users
+POSTHOOK: Output: default@users
+PREHOOK: query: DROP TABLE states
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@states
+PREHOOK: Output: default@states
+POSTHOOK: query: DROP TABLE states
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@states
+POSTHOOK: Output: default@states
+PREHOOK: query: DROP TABLE countries
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@countries
+PREHOOK: Output: default@countries
+POSTHOOK: query: DROP TABLE countries
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@countries
+POSTHOOK: Output: default@countries
+PREHOOK: query: CREATE TABLE users(key int, userid int, username string, created int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:nickname,f:created")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@users
+POSTHOOK: query: CREATE TABLE users(key int, userid int, username string, created int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:nickname,f:created")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@users
+PREHOOK: query: CREATE TABLE users_level(key int, userid int, level int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:level")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@users_level
+POSTHOOK: query: CREATE TABLE users_level(key int, userid int, level int)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:level")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@users_level
+PREHOOK: query: -- HIVE-1903: the problem fixed here showed up even without any data,
+-- so no need to load any to test it
+SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num
+ FROM users JOIN users_level ON (users.userid = users_level.userid)
+ GROUP BY year(from_unixtime(users.created)), level
+PREHOOK: type: QUERY
+PREHOOK: Input: default@users
+PREHOOK: Input: default@users_level
+#### A masked pattern was here ####
+POSTHOOK: query: -- HIVE-1903: the problem fixed here showed up even without any data,
+-- so no need to load any to test it
+SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num
+ FROM users JOIN users_level ON (users.userid = users_level.userid)
+ GROUP BY year(from_unixtime(users.created)), level
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@users
+POSTHOOK: Input: default@users_level
+#### A masked pattern was here ####
+PREHOOK: query: DROP TABLE users
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@users
+PREHOOK: Output: default@users
+POSTHOOK: query: DROP TABLE users
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@users
+POSTHOOK: Output: default@users
+PREHOOK: query: DROP TABLE users_level
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@users_level
+PREHOOK: Output: default@users_level
+POSTHOOK: query: DROP TABLE users_level
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@users_level
+POSTHOOK: Output: default@users_level
Added: hive/trunk/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out (added)
+++ hive/trunk/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out Tue Aug 19 22:41:10 2014
@@ -0,0 +1,600 @@
+PREHOOK: query: CREATE TABLE accumulo_pushdown(key string, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid,cf:string")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_pushdown
+POSTHOOK: query: CREATE TABLE accumulo_pushdown(key string, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid,cf:string")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_pushdown
+PREHOOK: query: INSERT OVERWRITE TABLE accumulo_pushdown
+SELECT cast(key as string), value
+FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@accumulo_pushdown
+POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_pushdown
+SELECT cast(key as string), value
+FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@accumulo_pushdown
+PREHOOK: query: -- with full pushdown
+explain select * from accumulo_pushdown where key>'90'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- with full pushdown
+explain select * from accumulo_pushdown where key>'90'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: accumulo_pushdown
+ filterExpr: (key > '90') (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Filter Operator
+ predicate: (key > '90') (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from accumulo_pushdown where key>'90'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_pushdown where key>'90'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
+PREHOOK: query: select * from accumulo_pushdown where key<'1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_pushdown where key<'1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+0 val_0
+PREHOOK: query: select * from accumulo_pushdown where key<='2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_pushdown where key<='2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+0 val_0
+10 val_10
+100 val_100
+103 val_103
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+119 val_119
+12 val_12
+120 val_120
+125 val_125
+126 val_126
+128 val_128
+129 val_129
+131 val_131
+133 val_133
+134 val_134
+136 val_136
+137 val_137
+138 val_138
+143 val_143
+145 val_145
+146 val_146
+149 val_149
+15 val_15
+150 val_150
+152 val_152
+153 val_153
+155 val_155
+156 val_156
+157 val_157
+158 val_158
+160 val_160
+162 val_162
+163 val_163
+164 val_164
+165 val_165
+166 val_166
+167 val_167
+168 val_168
+169 val_169
+17 val_17
+170 val_170
+172 val_172
+174 val_174
+175 val_175
+176 val_176
+177 val_177
+178 val_178
+179 val_179
+18 val_18
+180 val_180
+181 val_181
+183 val_183
+186 val_186
+187 val_187
+189 val_189
+19 val_19
+190 val_190
+191 val_191
+192 val_192
+193 val_193
+194 val_194
+195 val_195
+196 val_196
+197 val_197
+199 val_199
+2 val_2
+PREHOOK: query: select * from accumulo_pushdown where key>='90'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_pushdown where key>='90'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+90 val_90
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
+PREHOOK: query: -- with constant expression
+explain select * from accumulo_pushdown where key>=cast(40 + 50 as string)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- with constant expression
+explain select * from accumulo_pushdown where key>=cast(40 + 50 as string)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: accumulo_pushdown
+ filterExpr: (key >= '90') (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Filter Operator
+ predicate: (key >= '90') (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from accumulo_pushdown where key>=cast(40 + 50 as string)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_pushdown where key>=cast(40 + 50 as string)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+90 val_90
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
+PREHOOK: query: -- with partial pushdown
+
+explain select * from accumulo_pushdown where key>'90' and value like '%9%'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- with partial pushdown
+
+explain select * from accumulo_pushdown where key>'90' and value like '%9%'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: accumulo_pushdown
+ filterExpr: (key > '90') (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Filter Operator
+ predicate: (value like '%9%') (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from accumulo_pushdown where key>'90' and value like '%9%'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_pushdown where key>'90' and value like '%9%'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
+PREHOOK: query: -- with two residuals
+
+explain select * from accumulo_pushdown
+where key>='90' and value like '%9%' and key=cast(value as int)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- with two residuals
+
+explain select * from accumulo_pushdown
+where key>='90' and value like '%9%' and key=cast(value as int)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: accumulo_pushdown
+ filterExpr: (key >= '90') (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Filter Operator
+ predicate: ((value like '%9%') and (key = UDFToInteger(value))) (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from accumulo_pushdown
+where key>='90' and value like '%9%' and key=cast(value as int)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_pushdown
+where key>='90' and value like '%9%' and key=cast(value as int)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+PREHOOK: query: -- with contradictory pushdowns
+
+explain select * from accumulo_pushdown
+where key<'80' and key>'90' and value like '%90%'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- with contradictory pushdowns
+
+explain select * from accumulo_pushdown
+where key<'80' and key>'90' and value like '%90%'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: accumulo_pushdown
+ filterExpr: ((key < '80') and (key > '90')) (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Filter Operator
+ predicate: (value like '%90%') (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from accumulo_pushdown
+where key<'80' and key>'90' and value like '%90%'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_pushdown
+where key<'80' and key>'90' and value like '%90%'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+PREHOOK: query: -- with nothing to push down
+
+explain select * from accumulo_pushdown
+PREHOOK: type: QUERY
+POSTHOOK: query: -- with nothing to push down
+
+explain select * from accumulo_pushdown
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: accumulo_pushdown
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ ListSink
+
+PREHOOK: query: -- with a predicate which is not actually part of the filter, so
+-- it should be ignored by pushdown
+
+explain select * from accumulo_pushdown
+where (case when key<'90' then 2 else 4 end) > 3
+PREHOOK: type: QUERY
+POSTHOOK: query: -- with a predicate which is not actually part of the filter, so
+-- it should be ignored by pushdown
+
+explain select * from accumulo_pushdown
+where (case when key<'90' then 2 else 4 end) > 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: accumulo_pushdown
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Filter Operator
+ predicate: (CASE WHEN ((key < '90')) THEN (2) ELSE (4) END > 3) (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- with a predicate which is under an OR, so it should
+-- be ignored by pushdown
+
+explain select * from accumulo_pushdown
+where key<='80' or value like '%90%'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- with a predicate which is under an OR, so it should
+-- be ignored by pushdown
+
+explain select * from accumulo_pushdown
+where key<='80' or value like '%90%'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: accumulo_pushdown
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Filter Operator
+ predicate: ((key <= '80') or (value like '%90%')) (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select * from accumulo_pushdown where key > '281'
+and key < '287'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from accumulo_pushdown where key > '281'
+and key < '287'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: accumulo_pushdown
+ filterExpr: ((key > '281') and (key < '287')) (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Filter Operator
+ predicate: ((key > '281') and (key < '287')) (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from accumulo_pushdown where key > '281'
+and key < '287'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_pushdown where key > '281'
+and key < '287'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_pushdown
+#### A masked pattern was here ####
+282 val_282
+283 val_283
+284 val_284
+285 val_285
+286 val_286
+PREHOOK: query: -- with pushdown disabled
+
+explain select * from accumulo_pushdown where key<='90'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- with pushdown disabled
+
+explain select * from accumulo_pushdown where key<='90'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: accumulo_pushdown
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Filter Operator
+ predicate: (key <= '90') (type: boolean)
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+