You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/08/10 03:33:55 UTC
svn commit: r1617040 [2/13] - in /hive/branches/spark: ./
ant/src/org/apache/hadoop/hive/ant/ beeline/
beeline/src/java/org/apache/hive/beeline/
common/src/java/org/apache/hadoop/hive/common/
common/src/java/org/apache/hadoop/hive/conf/ data/conf/ data...
Modified: hive/branches/spark/itests/qtest/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/qtest/pom.xml?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/itests/qtest/pom.xml (original)
+++ hive/branches/spark/itests/qtest/pom.xml Sun Aug 10 01:33:50 2014
@@ -420,7 +420,9 @@
resultsDirectory="${basedir}/${hive.path.to.root}/ql/src/test/results/compiler/" className="TestParse"
logFile="${project.build.directory}/testparsegen.log"
hadoopVersion="${active.hadoop.version}"
- logDirectory="${project.build.directory}/qfile-results/positive/"/>
+ logDirectory="${project.build.directory}/qfile-results/positive/"
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<!-- Negative Parse -->
<qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -433,7 +435,9 @@
resultsDirectory="${basedir}/${hive.path.to.root}/ql/src/test/results/compiler/errors/" className="TestParseNegative"
logFile="${project.build.directory}/testparseneggen.log"
hadoopVersion="${active.hadoop.version}"
- logDirectory="${project.build.directory}/qfile-results/negative/"/>
+ logDirectory="${project.build.directory}/qfile-results/negative/"
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<!-- Cli -->
<qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -448,7 +452,9 @@
resultsDirectory="${basedir}/${hive.path.to.root}/ql/src/test/results/clientpositive/" className="TestCliDriver"
logFile="${project.build.directory}/testclidrivergen.log"
logDirectory="${project.build.directory}/qfile-results/clientpositive/"
- hadoopVersion="${active.hadoop.version}"/>
+ hadoopVersion="${active.hadoop.version}"
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<!-- Negative Cli -->
<qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -463,7 +469,9 @@
resultsDirectory="${basedir}/${hive.path.to.root}/ql/src/test/results/clientnegative/" className="TestNegativeCliDriver"
logFile="${project.build.directory}/testnegativeclidrivergen.log"
logDirectory="${project.build.directory}/qfile-results/clientnegative/"
- hadoopVersion="${active.hadoop.version}"/>
+ hadoopVersion="${active.hadoop.version}"
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<!-- Compare Cli -->
<qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -477,7 +485,9 @@
className="TestCompareCliDriver"
logFile="${project.build.directory}/testcompareclidrivergen.log"
logDirectory="${project.build.directory}/qfile-results/clientcompare/"
- hadoopVersion="${active.hadoop.version}"/>
+ hadoopVersion="${active.hadoop.version}"
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<!-- Minimr -->
<qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -493,7 +503,8 @@
logFile="${project.build.directory}/testminimrclidrivergen.log"
logDirectory="${project.build.directory}/qfile-results/clientpositive/"
hadoopVersion="${active.hadoop.version}"
- />
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<if>
<equals arg1="${active.hadoop.version}" arg2="${hadoop-23.version}"/>
@@ -513,7 +524,8 @@
logFile="${project.build.directory}/testminitezclidrivergen.log"
logDirectory="${project.build.directory}/qfile-results/clientpositive/"
hadoopVersion="${active.hadoop.version}"
- />
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
</then>
<else>
</else>
@@ -533,7 +545,8 @@
logFile="${project.build.directory}/testnegativeminimrclidrivergen.log"
logDirectory="${project.build.directory}/qfile-results/clientnegative/"
hadoopVersion="${hadoopVersion}"
- />
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<!-- HBase Positive -->
<qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -545,7 +558,9 @@
clusterMode="${clustermode}"
resultsDirectory="${basedir}/${hive.path.to.root}/hbase-handler/src/test/results/positive/" className="TestHBaseCliDriver"
logFile="${project.build.directory}/testhbaseclidrivergen.log"
- logDirectory="${project.build.directory}/qfile-results/hbase-handler/positive/"/>
+ logDirectory="${project.build.directory}/qfile-results/hbase-handler/positive/"
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<!-- HBase Minimr -->
<qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -557,7 +572,9 @@
clusterMode="miniMR"
resultsDirectory="${basedir}/${hive.path.to.root}/hbase-handler/src/test/results/positive/" className="TestHBaseMinimrCliDriver"
logFile="${project.build.directory}/testhbaseminimrclidrivergen.log"
- logDirectory="${project.build.directory}/qfile-results/hbase-handler/minimrpositive/"/>
+ logDirectory="${project.build.directory}/qfile-results/hbase-handler/minimrpositive/"
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<!-- HBase Negative -->
<qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -569,7 +586,9 @@
clusterMode="${clustermode}"
resultsDirectory="${basedir}/${hive.path.to.root}/hbase-handler/src/test/results/negative/" className="TestHBaseNegativeCliDriver"
logFile="${project.build.directory}/testhbasenegativeclidrivergen.log"
- logDirectory="${project.build.directory}/qfile-results/hbase-handler/negative"/>
+ logDirectory="${project.build.directory}/qfile-results/hbase-handler/negative"
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<!-- Beeline -->
@@ -588,7 +607,7 @@
resultsDirectory="${basedir}/${hive.path.to.root}/ql/src/test/results/clientpositive/" className="TestBeeLineDriver"
logFile="${project.build.directory}/testbeelinedrivergen.log"
logDirectory="${project.build.directory}/qfile-results/beelinepositive/"
- hadoopVersion="${hadoopVersion}" />
+ hadoopVersion="${hadoopVersion}"/>
</then>
</if>
@@ -606,7 +625,8 @@
logFile="${project.build.directory}/testcontribclidrivergen.log"
logDirectory="${project.build.directory}/qfile-results/contribclientpositive"
hadoopVersion="${hadoopVersion}"
- />
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
<qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
outputDirectory="${project.build.directory}/generated-test-sources/java/org/apache/hadoop/hive/cli"
@@ -617,7 +637,9 @@
runDisabled="${run_disabled}"
resultsDirectory="${basedir}/${hive.path.to.root}/contrib/src/test/results/clientnegative/" className="TestContribNegativeCliDriver"
logFile="${project.build.directory}/testcontribnegclidrivergen.log"
- logDirectory="${project.build.directory}/qfile-results/contribclientnegative"/>
+ logDirectory="${project.build.directory}/qfile-results/contribclientnegative"
+ initScript="q_test_init.sql"
+ cleanupScript="q_test_cleanup.sql"/>
</target>
Modified: hive/branches/spark/itests/qtest/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/qtest/testconfiguration.properties?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/itests/qtest/testconfiguration.properties (original)
+++ hive/branches/spark/itests/qtest/testconfiguration.properties Sun Aug 10 01:33:50 2014
@@ -1,5 +1,5 @@
minimr.query.files=stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q,auto_sortmerge_join_16.q,quotedid_smb.q,file_with_header_footer.q,external_table_with_space_in_location_path.q,root_dir_external_table.q,index_bitmap3.q,ql_rewrite_gbtoidx.q,index_bitmap_auto.q,udf_using.q,empty_dir_in_table.q,temp_table_external.q
minimr.query.negative.files=cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q,file_with_header_footer_negative.q,udf_local_resource.q
minitez.query.files=tez_fsstat.q,mapjoin_decimal.q,tez_join_tests.q,tez_joins_explain.q,mrr.q,tez_dml.q,tez_insert_overwrite_local_directory_1.q,tez_union.q,bucket_map_join_tez1.q,bucket_map_join_tez2.q,tez_schema_evolution.q,tez_join_hash.q
-minitez.query.files.shared=orc_merge1.q,orc_merge2.q,orc_merge3.q,orc_merge4.q,alter_merge_orc.q,alter_merge_2_orc.q,alter_merge_stats_orc.q,cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transf
orm_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q,metadataonly1.q,temp_table.q,vectorized_ptf.q,optimize_nullscan.q,vector_cast_constant.q,vector_string_concat.q
+minitez.query.files.shared=orc_merge1.q,orc_merge2.q,orc_merge3.q,orc_merge4.q,alter_merge_orc.q,alter_merge_2_orc.q,alter_merge_stats_orc.q,cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transf
orm_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q,metadataonly1.q,temp_table.q,vectorized_ptf.q,optimize_nullscan.q,vector_cast_constant.q,vector_string_concat.q,vector_decimal_aggregate.q,vector_left_outer_join.q,vectorization_12.q,vectorization_13.q,vectorization_14.q,vectorization_9.q,vectorization_part_project.q,vectorization_short_regress.q,vectorized_mapjoin.q,vectorized_nested_mapjoin.q,vectorized_shufflejoin.q,vectorized_timestamp_funcs.q,vector_data_types.q
beeline.positive.exclude=add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rename.q,
exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_overwr
ite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q
Modified: hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java (original)
+++ hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java Sun Aug 10 01:33:50 2014
@@ -17,24 +17,99 @@
*/
package org.apache.hadoop.hive.hbase;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.ql.QTestUtil;
-import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
+
+import java.util.List;
/**
* HBaseQTestUtil initializes HBase-specific test fixtures.
*/
public class HBaseQTestUtil extends QTestUtil {
+
+ /** Name of the HBase table, in both Hive and HBase. */
+ public static String HBASE_SRC_NAME = "src_hbase";
+
+ /** Name of the table snapshot. */
+ public static String HBASE_SRC_SNAPSHOT_NAME = "src_hbase_snapshot";
+
+ /** A handle to this harness's cluster */
+ private final HConnection conn;
+
public HBaseQTestUtil(
- String outDir, String logDir, MiniClusterType miniMr, HBaseTestSetup setup)
+ String outDir, String logDir, MiniClusterType miniMr, HBaseTestSetup setup,
+ String initScript, String cleanupScript)
throws Exception {
- super(outDir, logDir, miniMr, null);
+ super(outDir, logDir, miniMr, null, initScript, cleanupScript);
setup.preTest(conf);
+ this.conn = setup.getConnection();
super.init();
}
+ /** return true when HBase table snapshot exists, false otherwise. */
+ private static boolean hbaseTableSnapshotExists(HBaseAdmin admin, String snapshotName) throws
+ Exception {
+ List<HBaseProtos.SnapshotDescription> snapshots =
+ admin.listSnapshots(".*" + snapshotName + ".*");
+ for (HBaseProtos.SnapshotDescription sn : snapshots) {
+ if (sn.getName().equals(HBASE_SRC_SNAPSHOT_NAME)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
@Override
public void init() throws Exception {
// defer
}
+
+ @Override
+ public void createSources() throws Exception {
+ super.createSources();
+
+ conf.setBoolean("hive.test.init.phase", true);
+
+ // create and load the input data into the hbase table
+ runCreateTableCmd(
+ "CREATE TABLE " + HBASE_SRC_NAME + "(key INT, value STRING)"
+ + " STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'"
+ + " WITH SERDEPROPERTIES ('hbase.columns.mapping' = ':key,cf:val')"
+ + " TBLPROPERTIES ('hbase.table.name' = '" + HBASE_SRC_NAME + "')"
+ );
+ runCmd("INSERT OVERWRITE TABLE " + HBASE_SRC_NAME + " SELECT * FROM src");
+
+ // create a snapshot
+ HBaseAdmin admin = null;
+ try {
+ admin = new HBaseAdmin(conn.getConfiguration());
+ admin.snapshot(HBASE_SRC_SNAPSHOT_NAME, HBASE_SRC_NAME);
+ } finally {
+ if (admin != null) admin.close();
+ }
+
+ conf.setBoolean("hive.test.init.phase", false);
+ }
+
+ @Override
+ public void cleanUp() throws Exception {
+ super.cleanUp();
+
+ // drop in case leftover from unsuccessful run
+ db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, HBASE_SRC_NAME);
+
+ HBaseAdmin admin = null;
+ try {
+ admin = new HBaseAdmin(conn.getConfiguration());
+ if (hbaseTableSnapshotExists(admin, HBASE_SRC_SNAPSHOT_NAME)) {
+ admin.deleteSnapshot(HBASE_SRC_SNAPSHOT_NAME);
+ }
+ } finally {
+ if (admin != null) admin.close();
+ }
+ }
}
Modified: hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java (original)
+++ hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java Sun Aug 10 01:33:50 2014
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.hbase;
-import java.io.File;
import java.io.IOException;
import java.net.ServerSocket;
import java.util.Arrays;
@@ -29,12 +28,13 @@ import junit.framework.Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -50,6 +50,7 @@ public class HBaseTestSetup extends Test
private MiniHBaseCluster hbaseCluster;
private int zooKeeperPort;
private String hbaseRoot;
+ private HConnection hbaseConn;
private static final int NUM_REGIONSERVERS = 1;
@@ -57,6 +58,10 @@ public class HBaseTestSetup extends Test
super(test);
}
+ public HConnection getConnection() {
+ return this.hbaseConn;
+ }
+
void preTest(HiveConf conf) throws Exception {
setUpFixtures(conf);
@@ -97,27 +102,23 @@ public class HBaseTestSetup extends Test
hbaseConf.setInt("hbase.regionserver.info.port", -1);
hbaseCluster = new MiniHBaseCluster(hbaseConf, NUM_REGIONSERVERS);
conf.set("hbase.master", hbaseCluster.getMaster().getServerName().getHostAndPort());
+ hbaseConn = HConnectionManager.createConnection(hbaseConf);
+
// opening the META table ensures that cluster is running
- new HTable(hbaseConf, HConstants.META_TABLE_NAME);
- createHBaseTable(hbaseConf);
+ HTableInterface meta = null;
+ try {
+ meta = hbaseConn.getTable(TableName.META_TABLE_NAME);
+ } finally {
+ if (meta != null) meta.close();
+ }
+ createHBaseTable();
}
- private void createHBaseTable(Configuration hbaseConf) throws IOException {
+ private void createHBaseTable() throws IOException {
final String HBASE_TABLE_NAME = "HiveExternalTable";
HTableDescriptor htableDesc = new HTableDescriptor(HBASE_TABLE_NAME.getBytes());
HColumnDescriptor hcolDesc = new HColumnDescriptor("cf".getBytes());
htableDesc.addFamily(hcolDesc);
- HBaseAdmin hbaseAdmin = new HBaseAdmin(hbaseConf);
- if(Arrays.asList(hbaseAdmin.listTables()).contains(htableDesc)){
- // if table is already in there, don't recreate.
- return;
- }
- hbaseAdmin.createTable(htableDesc);
- HTable htable = new HTable(hbaseConf, HBASE_TABLE_NAME);
-
- // data
- Put [] puts = new Put [] {
- new Put("key-1".getBytes()), new Put("key-2".getBytes()), new Put("key-3".getBytes()) };
boolean [] booleans = new boolean [] { true, false, true };
byte [] bytes = new byte [] { Byte.MIN_VALUE, -1, Byte.MAX_VALUE };
@@ -128,18 +129,37 @@ public class HBaseTestSetup extends Test
float [] floats = new float [] { Float.MIN_VALUE, -1.0F, Float.MAX_VALUE };
double [] doubles = new double [] { Double.MIN_VALUE, -1.0, Double.MAX_VALUE };
- // store data
- for (int i = 0; i < puts.length; i++) {
- puts[i].add("cf".getBytes(), "cq-boolean".getBytes(), Bytes.toBytes(booleans[i]));
- puts[i].add("cf".getBytes(), "cq-byte".getBytes(), new byte [] { bytes[i] });
- puts[i].add("cf".getBytes(), "cq-short".getBytes(), Bytes.toBytes(shorts[i]));
- puts[i].add("cf".getBytes(), "cq-int".getBytes(), Bytes.toBytes(ints[i]));
- puts[i].add("cf".getBytes(), "cq-long".getBytes(), Bytes.toBytes(longs[i]));
- puts[i].add("cf".getBytes(), "cq-string".getBytes(), Bytes.toBytes(strings[i]));
- puts[i].add("cf".getBytes(), "cq-float".getBytes(), Bytes.toBytes(floats[i]));
- puts[i].add("cf".getBytes(), "cq-double".getBytes(), Bytes.toBytes(doubles[i]));
-
- htable.put(puts[i]);
+ HBaseAdmin hbaseAdmin = null;
+ HTableInterface htable = null;
+ try {
+ hbaseAdmin = new HBaseAdmin(hbaseConn.getConfiguration());
+ if (Arrays.asList(hbaseAdmin.listTables()).contains(htableDesc)) {
+ // if table is already in there, don't recreate.
+ return;
+ }
+ hbaseAdmin.createTable(htableDesc);
+ htable = hbaseConn.getTable(HBASE_TABLE_NAME);
+
+ // data
+ Put[] puts = new Put[]{
+ new Put("key-1".getBytes()), new Put("key-2".getBytes()), new Put("key-3".getBytes())};
+
+ // store data
+ for (int i = 0; i < puts.length; i++) {
+ puts[i].add("cf".getBytes(), "cq-boolean".getBytes(), Bytes.toBytes(booleans[i]));
+ puts[i].add("cf".getBytes(), "cq-byte".getBytes(), new byte[]{bytes[i]});
+ puts[i].add("cf".getBytes(), "cq-short".getBytes(), Bytes.toBytes(shorts[i]));
+ puts[i].add("cf".getBytes(), "cq-int".getBytes(), Bytes.toBytes(ints[i]));
+ puts[i].add("cf".getBytes(), "cq-long".getBytes(), Bytes.toBytes(longs[i]));
+ puts[i].add("cf".getBytes(), "cq-string".getBytes(), Bytes.toBytes(strings[i]));
+ puts[i].add("cf".getBytes(), "cq-float".getBytes(), Bytes.toBytes(floats[i]));
+ puts[i].add("cf".getBytes(), "cq-double".getBytes(), Bytes.toBytes(doubles[i]));
+
+ htable.put(puts[i]);
+ }
+ } finally {
+ if (htable != null) htable.close();
+ if (hbaseAdmin != null) hbaseAdmin.close();
}
}
@@ -152,6 +172,10 @@ public class HBaseTestSetup extends Test
@Override
protected void tearDown() throws Exception {
+ if (hbaseConn != null) {
+ hbaseConn.close();
+ hbaseConn = null;
+ }
if (hbaseCluster != null) {
HConnectionManager.deleteAllConnections(true);
hbaseCluster.shutdown();
Modified: hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java Sun Aug 10 01:33:50 2014
@@ -38,7 +38,6 @@ import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.Serializable;
import java.io.StringWriter;
-import java.io.UnsupportedEncodingException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
@@ -115,6 +114,8 @@ public class QTestUtil {
public static final String UTF_8 = "UTF-8";
private static final Log LOG = LogFactory.getLog("QTestUtil");
+ private final String defaultInitScript = "q_test_init.sql";
+ private final String defaultCleanupScript = "q_test_cleanup.sql";
private String testWarehouse;
private final String testFiles;
@@ -130,7 +131,7 @@ public class QTestUtil {
public static final HashSet<String> srcTables = new HashSet<String>();
private static MiniClusterType clusterType = MiniClusterType.none;
private ParseDriver pd;
- private Hive db;
+ protected Hive db;
protected HiveConf conf;
private Driver drv;
private BaseSemanticAnalyzer sem;
@@ -142,6 +143,10 @@ public class QTestUtil {
private boolean miniMr = false;
private String hadoopVer = null;
private QTestSetup setup = null;
+ private boolean isSessionStateStarted = false;
+
+ private String initScript;
+ private String cleanupScript;
static {
for (String srcTable : System.getProperty("test.src.tables", "").trim().split(",")) {
@@ -225,8 +230,9 @@ public class QTestUtil {
}
}
- public QTestUtil(String outDir, String logDir) throws Exception {
- this(outDir, logDir, MiniClusterType.none, null, "0.20");
+ public QTestUtil(String outDir, String logDir, String initScript, String cleanupScript) throws
+ Exception {
+ this(outDir, logDir, MiniClusterType.none, null, "0.20", initScript, cleanupScript);
}
public String getOutputDirectory() {
@@ -297,13 +303,14 @@ public class QTestUtil {
}
}
- public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, String hadoopVer)
+ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, String hadoopVer,
+ String initScript, String cleanupScript)
throws Exception {
- this(outDir, logDir, clusterType, null, hadoopVer);
+ this(outDir, logDir, clusterType, null, hadoopVer, initScript, cleanupScript);
}
public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
- String confDir, String hadoopVer)
+ String confDir, String hadoopVer, String initScript, String cleanupScript)
throws Exception {
this.outDir = outDir;
this.logDir = logDir;
@@ -354,6 +361,20 @@ public class QTestUtil {
testFiles = dataDir;
+ // Use the current directory if it is not specified
+ String scriptsDir = conf.get("test.data.scripts");
+ if (scriptsDir == null) {
+ scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
+ }
+ if (initScript.isEmpty()) {
+ initScript = defaultInitScript;
+ }
+ if (cleanupScript.isEmpty()) {
+ cleanupScript = defaultCleanupScript;
+ }
+ this.initScript = scriptsDir + "/" + initScript;
+ this.cleanupScript = scriptsDir + "/" + cleanupScript;
+
overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));
setup = new QTestSetup();
@@ -593,14 +614,15 @@ public class QTestUtil {
}
public void cleanUp() throws Exception {
- // Drop any tables that remain due to unsuccessful runs
- for (String s : new String[] {"src", "src1", "src_json", "src_thrift",
- "src_sequencefile", "srcpart", "srcbucket", "srcbucket2", "dest1",
- "dest2", "dest3", "dest4", "dest4_sequencefile", "dest_j1", "dest_j2",
- "dest_g1", "dest_g2", "fetchtask_ioexception",
- AllVectorTypesRecord.TABLE_NAME}) {
- db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, s);
+ if(!isSessionStateStarted) {
+ startSessionState();
}
+ String cleanupCommands = readEntireFileIntoString(new File(cleanupScript));
+ LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands);
+ if(cliDriver == null) {
+ cliDriver = new CliDriver();
+ }
+ cliDriver.processLine(cleanupCommands);
// delete any contents in the warehouse dir
Path p = new Path(testWarehouse);
@@ -630,7 +652,7 @@ public class QTestUtil {
return;
}
- private void runCreateTableCmd(String createTableCmd) throws Exception {
+ protected void runCreateTableCmd(String createTableCmd) throws Exception {
int ecode = 0;
ecode = drv.run(createTableCmd).getResponseCode();
if (ecode != 0) {
@@ -641,7 +663,7 @@ public class QTestUtil {
return;
}
- private void runCmd(String cmd) throws Exception {
+ protected void runCmd(String cmd) throws Exception {
int ecode = 0;
ecode = drv.run(cmd).getResponseCode();
drv.close();
@@ -653,119 +675,20 @@ public class QTestUtil {
}
public void createSources() throws Exception {
-
- startSessionState();
+ if(!isSessionStateStarted) {
+ startSessionState();
+ }
conf.setBoolean("hive.test.init.phase", true);
- // Create a bunch of tables with columns key and value
- LinkedList<String> cols = new LinkedList<String>();
- cols.add("key");
- cols.add("value");
-
- LinkedList<String> part_cols = new LinkedList<String>();
- part_cols.add("ds");
- part_cols.add("hr");
- db.createTable("srcpart", cols, part_cols, TextInputFormat.class,
- IgnoreKeyTextOutputFormat.class);
-
- Path fpath;
- HashMap<String, String> part_spec = new HashMap<String, String>();
- for (String ds : new String[] {"2008-04-08", "2008-04-09"}) {
- for (String hr : new String[] {"11", "12"}) {
- part_spec.clear();
- part_spec.put("ds", ds);
- part_spec.put("hr", hr);
- // System.out.println("Loading partition with spec: " + part_spec);
- // db.createPartition(srcpart, part_spec);
- fpath = new Path(testFiles, "kv1.txt");
- // db.loadPartition(fpath, srcpart.getName(), part_spec, true);
- runLoadCmd("LOAD DATA LOCAL INPATH '" + fpath.toUri().getPath()
- + "' OVERWRITE INTO TABLE srcpart PARTITION (ds='" + ds + "',hr='"
- + hr + "')");
- }
- }
- ArrayList<String> bucketCols = new ArrayList<String>();
- bucketCols.add("key");
- runCreateTableCmd("CREATE TABLE srcbucket(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE");
- // db.createTable("srcbucket", cols, null, TextInputFormat.class,
- // IgnoreKeyTextOutputFormat.class, 2, bucketCols);
- for (String fname : new String[] {"srcbucket0.txt", "srcbucket1.txt"}) {
- fpath = new Path(testFiles, fname);
- runLoadCmd("LOAD DATA LOCAL INPATH '" + fpath.toUri().getPath()
- + "' INTO TABLE srcbucket");
- }
-
- runCreateTableCmd("CREATE TABLE srcbucket2(key int, value string) "
- + "CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE");
- // db.createTable("srcbucket", cols, null, TextInputFormat.class,
- // IgnoreKeyTextOutputFormat.class, 2, bucketCols);
- for (String fname : new String[] {"srcbucket20.txt", "srcbucket21.txt",
- "srcbucket22.txt", "srcbucket23.txt"}) {
- fpath = new Path(testFiles, fname);
- runLoadCmd("LOAD DATA LOCAL INPATH '" + fpath.toUri().getPath()
- + "' INTO TABLE srcbucket2");
- }
-
- for (String tname : new String[] {"src", "src1"}) {
- db.createTable(tname, cols, null, TextInputFormat.class,
- IgnoreKeyTextOutputFormat.class);
- }
- db.createTable("src_sequencefile", cols, null,
- SequenceFileInputFormat.class, SequenceFileOutputFormat.class);
-
- Table srcThrift =
- new Table(SessionState.get().getCurrentDatabase(), "src_thrift");
- srcThrift.setInputFormatClass(SequenceFileInputFormat.class.getName());
- srcThrift.setOutputFormatClass(SequenceFileOutputFormat.class.getName());
- srcThrift.setSerializationLib(ThriftDeserializer.class.getName());
- srcThrift.setSerdeParam(serdeConstants.SERIALIZATION_CLASS, Complex.class
- .getName());
- srcThrift.setSerdeParam(serdeConstants.SERIALIZATION_FORMAT,
- TBinaryProtocol.class.getName());
- db.createTable(srcThrift);
-
- LinkedList<String> json_cols = new LinkedList<String>();
- json_cols.add("json");
- db.createTable("src_json", json_cols, null, TextInputFormat.class,
- IgnoreKeyTextOutputFormat.class);
-
- // load the input data into the src table
- fpath = new Path(testFiles, "kv1.txt");
- runLoadCmd("LOAD DATA LOCAL INPATH '" + fpath.toUri().getPath() + "' INTO TABLE src");
-
- // load the input data into the src table
- fpath = new Path(testFiles, "kv3.txt");
- runLoadCmd("LOAD DATA LOCAL INPATH '" + fpath.toUri().getPath() + "' INTO TABLE src1");
-
- // load the input data into the src_sequencefile table
- fpath = new Path(testFiles, "kv1.seq");
- runLoadCmd("LOAD DATA LOCAL INPATH '" + fpath.toUri().getPath()
- + "' INTO TABLE src_sequencefile");
-
- // load the input data into the src_thrift table
- fpath = new Path(testFiles, "complex.seq");
- runLoadCmd("LOAD DATA LOCAL INPATH '" + fpath.toUri().getPath()
- + "' INTO TABLE src_thrift");
-
- // load the json data into the src_json table
- fpath = new Path(testFiles, "json.txt");
- runLoadCmd("LOAD DATA LOCAL INPATH '" + fpath.toUri().getPath()
- + "' INTO TABLE src_json");
-
- FileSystem localFs = FileSystem.getLocal(conf);
- // create and load data into orc table
- fpath = new Path(testFiles, AllVectorTypesRecord.TABLE_NAME);
-
- runCreateTableCmd(AllVectorTypesRecord.TABLE_CREATE_COMMAND);
- runLoadCmd("LOAD DATA LOCAL INPATH '" + fpath.toUri().getPath()
- + "' INTO TABLE "+AllVectorTypesRecord.TABLE_NAME);
-
- runCmd("DROP FUNCTION IF EXISTS qtest_get_java_boolean ");
- runCmd("CREATE FUNCTION qtest_get_java_boolean "
- + " AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaBoolean'");
+ String initCommands = readEntireFileIntoString(new File(this.initScript));
+ LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
+ if(cliDriver == null) {
+ cliDriver = new CliDriver();
+ }
+ cliDriver.processLine("set test.data.dir=" + testFiles + ";");
+ cliDriver.processLine(initCommands);
conf.setBoolean("hive.test.init.phase", false);
-
}
public void init() throws Exception {
@@ -786,33 +709,7 @@ public class QTestUtil {
public void init(String tname) throws Exception {
cleanUp();
createSources();
-
- LinkedList<String> cols = new LinkedList<String>();
- cols.add("key");
- cols.add("value");
-
- LinkedList<String> part_cols = new LinkedList<String>();
- part_cols.add("ds");
- part_cols.add("hr");
-
- db.createTable("dest1", cols, null, TextInputFormat.class,
- IgnoreKeyTextOutputFormat.class);
- db.createTable("dest2", cols, null, TextInputFormat.class,
- IgnoreKeyTextOutputFormat.class);
-
- db.createTable("dest3", cols, part_cols, TextInputFormat.class,
- IgnoreKeyTextOutputFormat.class);
- Table dest3 = db.getTable("dest3");
-
- HashMap<String, String> part_spec = new HashMap<String, String>();
- part_spec.put("ds", "2008-04-08");
- part_spec.put("hr", "12");
- db.createPartition(dest3, part_spec);
-
- db.createTable("dest4", cols, null, TextInputFormat.class,
- IgnoreKeyTextOutputFormat.class);
- db.createTable("dest4_sequencefile", cols, null,
- SequenceFileInputFormat.class, SequenceFileOutputFormat.class);
+ cliDriver.processCmd("set hive.cli.print.header=true;");
}
public void cliInit(String tname) throws Exception {
@@ -866,23 +763,38 @@ public class QTestUtil {
SessionState.start(ss);
cliDriver = new CliDriver();
+
if (tname.equals("init_file.q")) {
ss.initFiles.add("../../data/scripts/test_init_file.sql");
}
cliDriver.processInitFiles(ss);
+
return outf.getAbsolutePath();
}
private CliSessionState startSessionState()
- throws FileNotFoundException, UnsupportedEncodingException {
+ throws IOException {
HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
"org.apache.hadoop.hive.ql.security.DummyAuthenticator");
CliSessionState ss = new CliSessionState(conf);
assert ss != null;
+ ss.in = System.in;
+ ss.out = System.out;
+ ss.err = System.out;
+ SessionState oldSs = SessionState.get();
+ if (oldSs != null && clusterType == MiniClusterType.tez) {
+ oldSs.close();
+ }
+ if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
+ oldSs.out.close();
+ }
SessionState.start(ss);
+
+ isSessionStateStarted = true;
+
return ss;
}
@@ -1571,7 +1483,7 @@ public class QTestUtil {
{
QTestUtil[] qt = new QTestUtil[qfiles.length];
for (int i = 0; i < qfiles.length; i++) {
- qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20");
+ qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20", "", "");
qt[i].addFile(qfiles[i]);
qt[i].clearTestSideEffects();
}
Modified: hive/branches/spark/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/if/hive_metastore.thrift?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/metastore/if/hive_metastore.thrift (original)
+++ hive/branches/spark/metastore/if/hive_metastore.thrift Sun Aug 10 01:33:50 2014
@@ -361,6 +361,11 @@ struct ColumnStatistics {
2: required list<ColumnStatisticsObj> statsObj;
}
+struct AggrStats {
+1: required list<ColumnStatisticsObj> colStats,
+2: required i64 partsFound // number of partitions for which stats were found
+}
+
// schema of the table/query results etc.
struct Schema {
// column names, types, comments
@@ -950,6 +955,9 @@ service ThriftHiveMetastore extends fb30
(1:NoSuchObjectException o1, 2:MetaException o2)
PartitionsStatsResult get_partitions_statistics_req(1:PartitionsStatsRequest request) throws
(1:NoSuchObjectException o1, 2:MetaException o2)
+ AggrStats get_aggr_stats_for(1:PartitionsStatsRequest request) throws
+ (1:NoSuchObjectException o1, 2:MetaException o2)
+
// delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name]
// and col_name. If the delete API doesn't find the statistics record in the metastore, throws NoSuchObjectException