You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/08/10 03:33:55 UTC
svn commit: r1617040 [1/13] - in /hive/branches/spark: ./
ant/src/org/apache/hadoop/hive/ant/ beeline/
beeline/src/java/org/apache/hive/beeline/
common/src/java/org/apache/hadoop/hive/common/
common/src/java/org/apache/hadoop/hive/conf/ data/conf/ data...
Author: brock
Date: Sun Aug 10 01:33:50 2014
New Revision: 1617040
URL: http://svn.apache.org/r1617040
Log:
Merge from trunk to spark
Added:
hive/branches/spark/data/files/parquet_mixed_case
- copied unchanged from r1617007, hive/trunk/data/files/parquet_mixed_case
hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseTableSnapshotInputFormatUtil.java
- copied unchanged from r1617007, hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseTableSnapshotInputFormatUtil.java
hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseInputFormatUtil.java
- copied unchanged from r1617007, hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseInputFormatUtil.java
hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java
- copied unchanged from r1617007, hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java
hive/branches/spark/hbase-handler/src/test/queries/positive/hbase_handler_snapshot.q
- copied unchanged from r1617007, hive/trunk/hbase-handler/src/test/queries/positive/hbase_handler_snapshot.q
hive/branches/spark/hbase-handler/src/test/results/positive/hbase_handler_snapshot.q.out
- copied unchanged from r1617007, hive/trunk/hbase-handler/src/test/results/positive/hbase_handler_snapshot.q.out
hive/branches/spark/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestPasswordWithCredentialProvider.java
- copied unchanged from r1617007, hive/trunk/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestPasswordWithCredentialProvider.java
hive/branches/spark/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java
- copied unchanged from r1617007, hive/trunk/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java
hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestPasswordWithConfig.java
- copied unchanged from r1617007, hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestPasswordWithConfig.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
- copied unchanged from r1617007, hive/trunk/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractOperator.java
- copied unchanged from r1617007, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractOperator.java
hive/branches/spark/ql/src/test/queries/clientnegative/authorization_drop_admin_role.q
- copied unchanged from r1617007, hive/trunk/ql/src/test/queries/clientnegative/authorization_drop_admin_role.q
hive/branches/spark/ql/src/test/queries/clientnegative/char_pad_convert_fail0.q
- copied unchanged from r1617007, hive/trunk/ql/src/test/queries/clientnegative/char_pad_convert_fail0.q
hive/branches/spark/ql/src/test/queries/clientnegative/char_pad_convert_fail1.q
- copied unchanged from r1617007, hive/trunk/ql/src/test/queries/clientnegative/char_pad_convert_fail1.q
hive/branches/spark/ql/src/test/queries/clientnegative/char_pad_convert_fail2.q
- copied unchanged from r1617007, hive/trunk/ql/src/test/queries/clientnegative/char_pad_convert_fail2.q
hive/branches/spark/ql/src/test/queries/clientnegative/char_pad_convert_fail3.q
- copied unchanged from r1617007, hive/trunk/ql/src/test/queries/clientnegative/char_pad_convert_fail3.q
hive/branches/spark/ql/src/test/queries/clientpositive/alter_merge_3.q
- copied unchanged from r1617007, hive/trunk/ql/src/test/queries/clientpositive/alter_merge_3.q
hive/branches/spark/ql/src/test/queries/clientpositive/char_pad_convert.q
- copied unchanged from r1617007, hive/trunk/ql/src/test/queries/clientpositive/char_pad_convert.q
hive/branches/spark/ql/src/test/queries/clientpositive/parquet_mixed_case.q
- copied unchanged from r1617007, hive/trunk/ql/src/test/queries/clientpositive/parquet_mixed_case.q
hive/branches/spark/ql/src/test/queries/clientpositive/vector_data_types.q
- copied unchanged from r1617007, hive/trunk/ql/src/test/queries/clientpositive/vector_data_types.q
hive/branches/spark/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out
hive/branches/spark/ql/src/test/results/clientnegative/char_pad_convert_fail0.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientnegative/char_pad_convert_fail0.q.out
hive/branches/spark/ql/src/test/results/clientnegative/char_pad_convert_fail1.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientnegative/char_pad_convert_fail1.q.out
hive/branches/spark/ql/src/test/results/clientnegative/char_pad_convert_fail2.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientnegative/char_pad_convert_fail2.q.out
hive/branches/spark/ql/src/test/results/clientnegative/char_pad_convert_fail3.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientnegative/char_pad_convert_fail3.q.out
hive/branches/spark/ql/src/test/results/clientpositive/alter_merge_3.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/alter_merge_3.q.out
hive/branches/spark/ql/src/test/results/clientpositive/char_pad_convert.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/char_pad_convert.q.out
hive/branches/spark/ql/src/test/results/clientpositive/parquet_mixed_case.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/parquet_mixed_case.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_12.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorization_12.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_14.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorization_14.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_9.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorization_9.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out
hive/branches/spark/ql/src/test/results/clientpositive/vector_data_types.q.out
- copied unchanged from r1617007, hive/trunk/ql/src/test/results/clientpositive/vector_data_types.q.out
hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
- copied unchanged from r1617007, hive/trunk/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
Modified:
hive/branches/spark/ (props changed)
hive/branches/spark/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java
hive/branches/spark/beeline/pom.xml
hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
hive/branches/spark/data/conf/hive-site.xml
hive/branches/spark/data/conf/tez/hive-site.xml
hive/branches/spark/data/files/dept.txt
hive/branches/spark/data/files/emp.txt
hive/branches/spark/data/files/loc.txt
hive/branches/spark/data/scripts/q_test_cleanup.sql
hive/branches/spark/data/scripts/q_test_init.sql
hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java
hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java
hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java
hive/branches/spark/hbase-handler/src/test/results/positive/external_table_ppd.q.out
hive/branches/spark/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
hive/branches/spark/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
hive/branches/spark/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java
hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java
hive/branches/spark/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java
hive/branches/spark/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java
hive/branches/spark/itests/qtest/pom.xml
hive/branches/spark/itests/qtest/testconfiguration.properties
hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
hive/branches/spark/metastore/if/hive_metastore.thrift
hive/branches/spark/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
hive/branches/spark/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
hive/branches/spark/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
hive/branches/spark/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
hive/branches/spark/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
hive/branches/spark/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
hive/branches/spark/metastore/src/gen/thrift/gen-php/metastore/Types.php
hive/branches/spark/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
hive/branches/spark/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
hive/branches/spark/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
hive/branches/spark/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
hive/branches/spark/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
hive/branches/spark/pom.xml
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedBatchUtil.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedColumnarSerDe.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/AnnotateWithStatistics.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBasePad.java
hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatchCtx.java
hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_filter.q
hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_join.q
hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_limit.q
hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_part.q
hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_select.q
hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_table.q
hive/branches/spark/ql/src/test/queries/clientpositive/annotate_stats_union.q
hive/branches/spark/ql/src/test/queries/clientpositive/columnstats_partlvl.q
hive/branches/spark/ql/src/test/queries/clientpositive/parquet_columnar.q
hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_14.q
hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_15.q
hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_16.q
hive/branches/spark/ql/src/test/queries/clientpositive/vectorization_9.q
hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_date_funcs.q
hive/branches/spark/ql/src/test/results/clientnegative/authorization_public_drop.q.out
hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out
hive/branches/spark/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out
hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_join.q.out
hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_part.q.out
hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_select.q.out
hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_table.q.out
hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_union.q.out
hive/branches/spark/ql/src/test/results/clientpositive/columnstats_partlvl.q.out
hive/branches/spark/ql/src/test/results/clientpositive/combine2.q.out
hive/branches/spark/ql/src/test/results/clientpositive/groupby_sort_11.q.out
hive/branches/spark/ql/src/test/results/clientpositive/input24.q.out
hive/branches/spark/ql/src/test/results/clientpositive/input25.q.out
hive/branches/spark/ql/src/test/results/clientpositive/metadataonly1.q.out
hive/branches/spark/ql/src/test/results/clientpositive/nullgroup3.q.out
hive/branches/spark/ql/src/test/results/clientpositive/parquet_columnar.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/union5.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/union7.q.out
hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
hive/branches/spark/ql/src/test/results/clientpositive/udf_explode.q.out
hive/branches/spark/ql/src/test/results/clientpositive/udtf_explode.q.out
hive/branches/spark/ql/src/test/results/clientpositive/union11.q.out
hive/branches/spark/ql/src/test/results/clientpositive/union14.q.out
hive/branches/spark/ql/src/test/results/clientpositive/union15.q.out
hive/branches/spark/ql/src/test/results/clientpositive/union17.q.out
hive/branches/spark/ql/src/test/results/clientpositive/union19.q.out
hive/branches/spark/ql/src/test/results/clientpositive/union20.q.out
hive/branches/spark/ql/src/test/results/clientpositive/union21.q.out
hive/branches/spark/ql/src/test/results/clientpositive/union5.q.out
hive/branches/spark/ql/src/test/results/clientpositive/union7.q.out
hive/branches/spark/ql/src/test/results/clientpositive/vectorization_14.q.out
hive/branches/spark/ql/src/test/results/clientpositive/vectorization_15.q.out
hive/branches/spark/ql/src/test/results/clientpositive/vectorization_16.q.out
hive/branches/spark/ql/src/test/results/clientpositive/vectorization_9.q.out
hive/branches/spark/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
hive/branches/spark/ql/src/test/templates/TestCliDriver.vm
hive/branches/spark/ql/src/test/templates/TestCompareCliDriver.vm
hive/branches/spark/ql/src/test/templates/TestNegativeCliDriver.vm
hive/branches/spark/ql/src/test/templates/TestParse.vm
hive/branches/spark/ql/src/test/templates/TestParseNegative.vm
hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
hive/branches/spark/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java
hive/branches/spark/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
hive/branches/spark/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
Propchange: hive/branches/spark/
------------------------------------------------------------------------------
Merged /hive/trunk:r1615452-1617007
Modified: hive/branches/spark/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java (original)
+++ hive/branches/spark/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java Sun Aug 10 01:33:50 2014
@@ -141,6 +141,10 @@ public class QTestGenTask extends Task {
private String hadoopVersion;
+ private String initScript;
+
+ private String cleanupScript;
+
public void setHadoopVersion(String ver) {
this.hadoopVersion = ver;
}
@@ -197,6 +201,22 @@ public class QTestGenTask extends Task {
return template;
}
+ public String getInitScript() {
+ return initScript;
+ }
+
+ public void setInitScript(String initScript) {
+ this.initScript = initScript;
+ }
+
+ public String getCleanupScript() {
+ return cleanupScript;
+ }
+
+ public void setCleanupScript(String cleanupScript) {
+ this.cleanupScript = cleanupScript;
+ }
+
public void setHiveRootDirectory(File hiveRootDirectory) {
try {
this.hiveRootDirectory = hiveRootDirectory.getCanonicalPath();
@@ -444,6 +464,8 @@ public class QTestGenTask extends Task {
ctx.put("clusterMode", clusterMode);
ctx.put("hiveConfDir", escapePath(hiveConfDir));
ctx.put("hadoopVersion", hadoopVersion);
+ ctx.put("initScript", initScript);
+ ctx.put("cleanupScript", cleanupScript);
File outFile = new File(outDir, className + ".java");
FileWriter writer = new FileWriter(outFile);
Modified: hive/branches/spark/beeline/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/beeline/pom.xml?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/beeline/pom.xml (original)
+++ hive/branches/spark/beeline/pom.xml Sun Aug 10 01:33:50 2014
@@ -48,7 +48,6 @@
<groupId>org.apache.hive</groupId>
<artifactId>hive-shims</artifactId>
<version>${project.version}</version>
- <scope>runtime</scope>
</dependency>
<!-- inter-project -->
<dependency>
Modified: hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java (original)
+++ hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java Sun Aug 10 01:33:50 2014
@@ -48,6 +48,7 @@ import org.apache.hadoop.hive.conf.HiveC
import org.apache.hadoop.hive.metastore.HiveMetaException;
import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hive.beeline.HiveSchemaHelper.NestedScriptParser;
public class HiveSchemaTool {
@@ -72,7 +73,12 @@ public class HiveSchemaTool {
this.dbType = dbType;
this.metaStoreSchemaInfo = new MetaStoreSchemaInfo(hiveHome, hiveConf, dbType);
userName = hiveConf.get(ConfVars.METASTORE_CONNECTION_USER_NAME.varname);
- passWord = hiveConf.get(HiveConf.ConfVars.METASTOREPWD.varname);
+ try {
+ passWord = ShimLoader.getHadoopShims().getPassword(hiveConf,
+ HiveConf.ConfVars.METASTOREPWD.varname);
+ } catch (IOException err) {
+ throw new HiveMetaException("Error getting metastore password", err);
+ }
}
public HiveConf getHiveConf() {
Modified: hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java (original)
+++ hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java Sun Aug 10 01:33:50 2014
@@ -22,6 +22,8 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
+import java.security.AccessControlException;
+import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;
@@ -352,35 +354,47 @@ public final class FileUtils {
}
/**
- * Check if the given FileStatus indicates that the action is allowed for
- * userName. It checks the group and other permissions also to determine this.
- *
- * @param userName
- * @param fsStatus
- * @param action
- * @return true if it is writable for userName
- */
- public static boolean isActionPermittedForUser(String userName, FileStatus fsStatus, FsAction action) {
- FsPermission permissions = fsStatus.getPermission();
- // check user perm
- if (fsStatus.getOwner().equals(userName)
- && permissions.getUserAction().implies(action)) {
- return true;
- }
- // check other perm
- if (permissions.getOtherAction().implies(action)) {
- return true;
- }
- // check group perm after ensuring user belongs to the file owner group
- String fileGroup = fsStatus.getGroup();
- String[] userGroups = UserGroupInformation.createRemoteUser(userName).getGroupNames();
- for (String group : userGroups) {
- if (group.equals(fileGroup)) {
- // user belongs to the file group
- return permissions.getGroupAction().implies(action);
+ * Perform a check to determine if the user is able to access the file passed in.
+ * If the user name passed in is different from the current user, this method will
+ * attempt to do impersonate the user to do the check; the current user should be
+ * able to create proxy users in this case.
+ * @param fs FileSystem of the path to check
+ * @param stat FileStatus representing the file
+ * @param action FsAction that will be checked
+ * @param user User name of the user that will be checked for access. If the user name
+ * is null or the same as the current user, no user impersonation will be done
+ * and the check will be done as the current user. Otherwise the file access
+ * check will be performed within a doAs() block to use the access privileges
+ * of this user. In this case the user must be configured to impersonate other
+ * users, otherwise this check will fail with error.
+ * @param groups List of groups for the user
+ * @throws IOException
+ * @throws AccessControlException
+ * @throws InterruptedException
+ * @throws Exception
+ */
+ public static void checkFileAccessWithImpersonation(final FileSystem fs,
+ final FileStatus stat, final FsAction action, final String user)
+ throws IOException, AccessControlException, InterruptedException, Exception {
+ UserGroupInformation ugi = ShimLoader.getHadoopShims().getUGIForConf(fs.getConf());
+ String currentUser = ShimLoader.getHadoopShims().getShortUserName(ugi);
+
+ if (user == null || currentUser.equals(user)) {
+ // No need to impersonate user, do the checks as the currently configured user.
+ ShimLoader.getHadoopShims().checkFileAccess(fs, stat, action);
+ return;
+ }
+
+ // Otherwise, try user impersonation. Current user must be configured to do user impersonation.
+ UserGroupInformation proxyUser = ShimLoader.getHadoopShims().createProxyUser(user);
+ ShimLoader.getHadoopShims().doAs(proxyUser, new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
+ ShimLoader.getHadoopShims().checkFileAccess(fsAsUser, stat, action);
+ return null;
}
- }
- return false;
+ });
}
/**
@@ -395,7 +409,7 @@ public final class FileUtils {
* @throws IOException
*/
public static boolean isActionPermittedForFileHierarchy(FileSystem fs, FileStatus fileStatus,
- String userName, FsAction action) throws IOException {
+ String userName, FsAction action) throws Exception {
boolean isDir = fileStatus.isDir();
FsAction dirActionNeeded = action;
@@ -403,7 +417,11 @@ public final class FileUtils {
// for dirs user needs execute privileges as well
dirActionNeeded.and(FsAction.EXECUTE);
}
- if (!isActionPermittedForUser(userName, fileStatus, dirActionNeeded)) {
+
+ try {
+ checkFileAccessWithImpersonation(fs, fileStatus, action, userName);
+ } catch (AccessControlException err) {
+ // Action not permitted for user
return false;
}
@@ -595,4 +613,19 @@ public final class FileUtils {
return false;
}
}
+
+ /**
+ * @param fs1
+ * @param fs2
+ * @return return true if both file system arguments point to same file system
+ */
+ public static boolean equalsFileSystem(FileSystem fs1, FileSystem fs2) {
+ //When file system cache is disabled, you get different FileSystem objects
+ // for same file system, so '==' can't be used in such cases
+ //FileSystem api doesn't have a .equals() function implemented, so using
+ //the uri for comparison. FileSystem already uses uri+Configuration for
+ //equality in its CACHE .
+ //Once equality has been added in HDFS-4321, we should make use of it
+ return fs1.getUri().equals(fs2.getUri());
+ }
}
Modified: hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java (original)
+++ hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java Sun Aug 10 01:33:50 2014
@@ -124,6 +124,7 @@ public final class JavaUtils {
newOutputStream.close();
}
}
+ LogFactory.release(loader);
}
private JavaUtils() {
Modified: hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Sun Aug 10 01:33:50 2014
@@ -1098,7 +1098,9 @@ public class HiveConf extends Configurat
"Whether queries will fail because stats cannot be collected completely accurately. \n" +
"If this is set to true, reading/writing from/into a partition may fail because the stats\n" +
"could not be computed accurately."),
-
+ HIVE_STATS_COLLECT_PART_LEVEL_STATS("hive.analyze.stmt.collect.partlevel.stats", true,
+ "analyze table T compute statistics for columns. Queries like these should compute partition"
+ + "level stats for partitioned table even when no part spec is specified."),
HIVE_STATS_GATHER_NUM_THREADS("hive.stats.gather.num.threads", 10,
"Number of threads used by partialscan/noscan analyze command for partitioned tables.\n" +
"This is applicable only for file formats that implement StatsProvidingRecordReader (like ORC)."),
@@ -1256,6 +1258,9 @@ public class HiveConf extends Configurat
"Disabling this improves HBase write performance at the risk of lost writes in case of a crash."),
HIVE_HBASE_GENERATE_HFILES("hive.hbase.generatehfiles", false,
"True when HBaseStorageHandler should generate hfiles instead of operate against the online table."),
+ HIVE_HBASE_SNAPSHOT_NAME("hive.hbase.snapshot.name", null, "The HBase table snapshot name to use."),
+ HIVE_HBASE_SNAPSHOT_RESTORE_DIR("hive.hbase.snapshot.restoredir", "/tmp", "The directory in which to " +
+ "restore the HBase table snapshot."),
// For har files
HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"),
Modified: hive/branches/spark/data/conf/hive-site.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/data/conf/hive-site.xml?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/data/conf/hive-site.xml (original)
+++ hive/branches/spark/data/conf/hive-site.xml Sun Aug 10 01:33:50 2014
@@ -112,6 +112,12 @@
</property>
<property>
+ <name>test.data.scripts</name>
+ <value>${hive.root}/data/scripts</value>
+ <description></description>
+</property>
+
+<property>
<name>hive.jar.path</name>
<value>${maven.local.repository}/org/apache/hive/hive-exec/${hive.version}/hive-exec-${hive.version}.jar</value>
<description></description>
Modified: hive/branches/spark/data/conf/tez/hive-site.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/data/conf/tez/hive-site.xml?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
Binary files - no diff available.
Modified: hive/branches/spark/data/files/dept.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/data/files/dept.txt?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/data/files/dept.txt (original)
+++ hive/branches/spark/data/files/dept.txt Sun Aug 10 01:33:50 2014
@@ -2,3 +2,5 @@
33|engineering
34|clerical
35|marketing
+36|transport
+37|hr
Modified: hive/branches/spark/data/files/emp.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/data/files/emp.txt?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/data/files/emp.txt (original)
+++ hive/branches/spark/data/files/emp.txt Sun Aug 10 01:33:50 2014
@@ -1,6 +1,48 @@
-Rafferty|31
-Jones|33
-Steinberg|33
-Robinson|34
-Smith|34
-John|
+Rafferty|31|1
+Jones|33|2
+Steinberg|33|3
+Robinson|34|4
+Smith|34|5
+John|31|6
+Rafferty|31|1
+Jones|33|2
+Steinberg|33|3
+Robinson|34|4
+Smith|34|5
+John|31|6
+Rafferty|31|1
+Jones|33|2
+Steinberg|33|3
+Robinson|34|4
+Smith|34|5
+John|31|6
+Rafferty|31|1
+Jones|33|2
+Steinberg|33|3
+Robinson|34|4
+Smith|34|5
+John|31|6
+Rafferty|31|1
+Jones|33|2
+Steinberg|33|3
+Robinson|34|4
+Smith|34|5
+John|31|6
+Rafferty|31|1
+Jones|33|2
+Steinberg|33|3
+Robinson|34|4
+Smith|34|5
+John|31|6
+Rafferty|31|1
+Jones|33|2
+Steinberg|33|3
+Robinson|34|4
+Smith|34|5
+John|31|6
+Rafferty|31|1
+Jones|33|2
+Steinberg|33|3
+Robinson|34|4
+Smith|34|5
+John|31|6
Modified: hive/branches/spark/data/files/loc.txt
URL: http://svn.apache.org/viewvc/hive/branches/spark/data/files/loc.txt?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/data/files/loc.txt (original)
+++ hive/branches/spark/data/files/loc.txt Sun Aug 10 01:33:50 2014
@@ -1,8 +1,8 @@
-OH|31|43201|2001
-IO|32|43202|2001
-CA|35|43809|2001
-FL|33|54342|2001
-UT|35||2001
-CA|35|43809|2001
-|34|40000|
-FL|33|54342|2001
+OH|1|43201|2001
+IO|2|43202|2001
+CA|5|43809|2001
+FL|3|54342|2001
+UT|5||2001
+CA|5|43809|2001
+|4|40000|
+FL|6|54342|2001
Modified: hive/branches/spark/data/scripts/q_test_cleanup.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/data/scripts/q_test_cleanup.sql?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/data/scripts/q_test_cleanup.sql (original)
+++ hive/branches/spark/data/scripts/q_test_cleanup.sql Sun Aug 10 01:33:50 2014
@@ -7,4 +7,12 @@ DROP TABLE IF EXISTS srcbucket;
DROP TABLE IF EXISTS srcbucket2;
DROP TABLE IF EXISTS srcpart;
DROP TABLE IF EXISTS primitives;
-
+DROP TABLE IF EXISTS dest1;
+DROP TABLE IF EXISTS dest2;
+DROP TABLE IF EXISTS dest3;
+DROP TABLE IF EXISTS dest4;
+DROP TABLE IF EXISTS dest4_sequencefile;
+DROP TABLE IF EXISTS dest_j1;
+DROP TABLE IF EXISTS dest_g1;
+DROP TABLE IF EXISTS dest_g2;
+DROP TABLE IF EXISTS fetchtask_ioexception;
Modified: hive/branches/spark/data/scripts/q_test_init.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/data/scripts/q_test_init.sql?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/data/scripts/q_test_init.sql (original)
+++ hive/branches/spark/data/scripts/q_test_init.sql Sun Aug 10 01:33:50 2014
@@ -3,7 +3,7 @@
--
DROP TABLE IF EXISTS src;
-CREATE TABLE src (key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE src (key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS TEXTFILE;
LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/kv1.txt" INTO TABLE src;
@@ -12,7 +12,7 @@ LOAD DATA LOCAL INPATH "${hiveconf:test.
--
DROP TABLE IF EXISTS src1;
-CREATE TABLE src1 (key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE src1 (key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS TEXTFILE;
LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/kv3.txt" INTO TABLE src1;
@@ -21,7 +21,7 @@ LOAD DATA LOCAL INPATH "${hiveconf:test.
--
DROP TABLE IF EXISTS src_json;
-CREATE TABLE src_json (json STRING) STORED AS TEXTFILE;
+CREATE TABLE src_json (json STRING COMMENT 'default') STORED AS TEXTFILE;
LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/json.txt" INTO TABLE src_json;
@@ -31,7 +31,7 @@ LOAD DATA LOCAL INPATH "${hiveconf:test.
--
DROP TABLE IF EXISTS src_sequencefile;
-CREATE TABLE src_sequencefile (key STRING, value STRING) STORED AS SEQUENCEFILE;
+CREATE TABLE src_sequencefile (key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS SEQUENCEFILE;
LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/kv1.seq" INTO TABLE src_sequencefile;
@@ -45,7 +45,7 @@ CREATE TABLE src_thrift
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer'
WITH SERDEPROPERTIES (
'serialization.class' = 'org.apache.hadoop.hive.serde2.thrift.test.Complex',
- 'serialization.format' = 'com.facebook.thrift.protocol.TBinaryProtocol')
+ 'serialization.format' = 'org.apache.thrift.protocol.TBinaryProtocol')
STORED AS SEQUENCEFILE;
LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/complex.seq" INTO TABLE src_thrift;
@@ -75,6 +75,8 @@ STORED AS TEXTFILE;
LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/srcbucket20.txt" INTO TABLE srcbucket2;
LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/srcbucket21.txt" INTO TABLE srcbucket2;
+LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/srcbucket22.txt" INTO TABLE srcbucket2;
+LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/srcbucket23.txt" INTO TABLE srcbucket2;
--
@@ -82,7 +84,7 @@ LOAD DATA LOCAL INPATH "${hiveconf:test.
--
DROP TABLE IF EXISTS srcpart;
-CREATE TABLE srcpart (key STRING, value STRING)
+CREATE TABLE srcpart (key STRING COMMENT 'default', value STRING COMMENT 'default')
PARTITIONED BY (ds STRING, hr STRING)
STORED AS TEXTFILE;
@@ -99,20 +101,46 @@ LOAD DATA LOCAL INPATH "${hiveconf:test.
OVERWRITE INTO TABLE srcpart PARTITION (ds="2008-04-09", hr="12");
+--
+-- Table alltypesorc
+--
+DROP TABLE IF EXISTS alltypesorc;
+CREATE TABLE alltypesorc(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN)
+ STORED AS ORC;
+
+LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/alltypesorc"
+OVERWRITE INTO TABLE alltypesorc;
+
+
+--
+-- Table primitives
+--
DROP TABLE IF EXISTS primitives;
CREATE TABLE primitives (
- id INT,
- bool_col BOOLEAN,
- tinyint_col TINYINT,
- smallint_col SMALLINT,
- int_col INT,
- bigint_col BIGINT,
- float_col FLOAT,
- double_col DOUBLE,
- date_string_col STRING,
- string_col STRING,
- timestamp_col TIMESTAMP)
-PARTITIONED BY (year INT, month INT)
+ id INT COMMENT 'default',
+ bool_col BOOLEAN COMMENT 'default',
+ tinyint_col TINYINT COMMENT 'default',
+ smallint_col SMALLINT COMMENT 'default',
+ int_col INT COMMENT 'default',
+ bigint_col BIGINT COMMENT 'default',
+ float_col FLOAT COMMENT 'default',
+ double_col DOUBLE COMMENT 'default',
+ date_string_col STRING COMMENT 'default',
+ string_col STRING COMMENT 'default',
+ timestamp_col TIMESTAMP COMMENT 'default')
+PARTITIONED BY (year INT COMMENT 'default', month INT COMMENT 'default')
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
ESCAPED BY '\\'
@@ -130,3 +158,60 @@ OVERWRITE INTO TABLE primitives PARTITIO
LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/types/primitives/090401.txt"
OVERWRITE INTO TABLE primitives PARTITION(year=2009, month=4);
+--
+-- Function qtest_get_java_boolean
+--
+DROP FUNCTION IF EXISTS qtest_get_java_boolean;
+CREATE FUNCTION qtest_get_java_boolean AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaBoolean';
+
+--
+-- Table dest1
+--
+DROP TABLE IF EXISTS dest1;
+
+CREATE TABLE dest1 (key STRING COMMENT 'default', value STRING COMMENT 'default')
+STORED AS
+INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
+OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat';
+
+--
+-- Table dest2
+--
+DROP TABLE IF EXISTS dest2;
+
+CREATE TABLE dest2 (key STRING COMMENT 'default', value STRING COMMENT 'default')
+STORED AS
+INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
+OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat';
+
+--
+-- Table dest3
+--
+DROP TABLE IF EXISTS dest3;
+
+CREATE TABLE dest3 (key STRING COMMENT 'default', value STRING COMMENT 'default')
+PARTITIONED BY (ds STRING, hr STRING)
+STORED AS
+INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
+OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat';
+ALTER TABLE dest3 ADD PARTITION (ds='2008-04-08',hr='12');
+
+--
+-- Table dest4
+--
+DROP TABLE IF EXISTS dest4;
+
+CREATE TABLE dest4 (key STRING COMMENT 'default', value STRING COMMENT 'default')
+STORED AS
+INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
+OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat';
+
+--
+-- Table dest4_sequencefile
+--
+DROP TABLE IF EXISTS dest4_sequencefile;
+
+CREATE TABLE dest4_sequencefile (key STRING COMMENT 'default', value STRING COMMENT 'default')
+STORED AS
+INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat'
+OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat';
\ No newline at end of file
Modified: hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java (original)
+++ hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java Sun Aug 10 01:33:50 2014
@@ -31,46 +31,86 @@ import org.apache.hadoop.mapred.InputSpl
* HBaseSplit augments FileSplit with HBase column mapping.
*/
public class HBaseSplit extends FileSplit implements InputSplit {
- private final TableSplit split;
+ private final TableSplit tableSplit;
+ private final InputSplit snapshotSplit;
+ private boolean isTableSplit; // should be final but Writable
+
+ /**
+ * For Writable
+ */
public HBaseSplit() {
super((Path) null, 0, 0, (String[]) null);
- split = new TableSplit();
+ tableSplit = new TableSplit();
+ snapshotSplit = HBaseTableSnapshotInputFormatUtil.createTableSnapshotRegionSplit();
}
- public HBaseSplit(TableSplit split, Path dummyPath) {
+ public HBaseSplit(TableSplit tableSplit, Path dummyPath) {
super(dummyPath, 0, 0, (String[]) null);
- this.split = split;
+ this.tableSplit = tableSplit;
+ this.snapshotSplit = HBaseTableSnapshotInputFormatUtil.createTableSnapshotRegionSplit();
+ this.isTableSplit = true;
}
- public TableSplit getSplit() {
- return this.split;
+ /**
+ * TODO: use TableSnapshotRegionSplit HBASE-11555 is fixed.
+ */
+ public HBaseSplit(InputSplit snapshotSplit, Path dummyPath) {
+ super(dummyPath, 0, 0, (String[]) null);
+ this.tableSplit = new TableSplit();
+ this.snapshotSplit = snapshotSplit;
+ this.isTableSplit = false;
}
- @Override
- public void readFields(DataInput in) throws IOException {
- super.readFields(in);
- split.readFields(in);
+ public TableSplit getTableSplit() {
+ assert isTableSplit;
+ return this.tableSplit;
+ }
+
+ public InputSplit getSnapshotSplit() {
+ assert !isTableSplit;
+ return this.snapshotSplit;
}
@Override
public String toString() {
- return "TableSplit " + split;
+ return "" + (isTableSplit ? tableSplit : snapshotSplit);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ this.isTableSplit = in.readBoolean();
+ if (this.isTableSplit) {
+ tableSplit.readFields(in);
+ } else {
+ snapshotSplit.readFields(in);
+ }
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
- split.write(out);
+ out.writeBoolean(isTableSplit);
+ if (isTableSplit) {
+ tableSplit.write(out);
+ } else {
+ snapshotSplit.write(out);
+ }
}
@Override
public long getLength() {
- return split.getLength();
+ long val = 0;
+ try {
+ val = isTableSplit ? tableSplit.getLength() : snapshotSplit.getLength();
+ } finally {
+ return val;
+ }
}
@Override
public String[] getLocations() throws IOException {
- return split.getLocations();
+ return isTableSplit ? tableSplit.getLocations() : snapshotSplit.getLocations();
}
}
Modified: hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java (original)
+++ hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java Sun Aug 10 01:33:50 2014
@@ -29,7 +29,10 @@ import java.util.Properties;
import java.util.Set;
import org.apache.commons.io.IOUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -69,6 +72,19 @@ import org.apache.hadoop.util.StringUtil
public class HBaseStorageHandler extends DefaultStorageHandler
implements HiveMetaHook, HiveStoragePredicateHandler {
+ private static final Log LOG = LogFactory.getLog(HBaseStorageHandler.class);
+
+ /** HBase-internal config by which input format receives snapshot name. */
+ private static final String HBASE_SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
+ /** HBase-internal config by which input format received restore dir before HBASE-11335. */
+ private static final String HBASE_SNAPSHOT_TABLE_DIR_KEY = "hbase.TableSnapshotInputFormat.table.dir";
+ /** HBase-internal config by which input format received restore dir after HBASE-11335. */
+ private static final String HBASE_SNAPSHOT_RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
+ /** HBase config by which a SlabCache is sized. */
+ private static final String HBASE_OFFHEAP_PCT_KEY = "hbase.offheapcache.percentage";
+ /** HBase config by which a BucketCache is sized. */
+ private static final String HBASE_BUCKETCACHE_SIZE_KEY = "hbase.bucketcache.size";
+
final static public String DEFAULT_PREFIX = "default.";
//Check if the configure job properties is called from input
@@ -258,6 +274,11 @@ public class HBaseStorageHandler extends
@Override
public Class<? extends InputFormat> getInputFormatClass() {
+ if (HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_HBASE_SNAPSHOT_NAME) != null) {
+ LOG.debug("Using TableSnapshotInputFormat");
+ return HiveHBaseTableSnapshotInputFormat.class;
+ }
+ LOG.debug("Using HiveHBaseTableInputFormat");
return HiveHBaseTableInputFormat.class;
}
@@ -342,6 +363,37 @@ public class HBaseStorageHandler extends
// do this for reconciling HBaseStorageHandler for use in HCatalog
// check to see if this an input job or an outputjob
if (this.configureInputJobProps) {
+ String snapshotName = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_HBASE_SNAPSHOT_NAME);
+ if (snapshotName != null) {
+ HBaseTableSnapshotInputFormatUtil.assertSupportsTableSnapshots();
+
+ try {
+ String restoreDir =
+ HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_HBASE_SNAPSHOT_RESTORE_DIR);
+ if (restoreDir == null) {
+ throw new IllegalArgumentException(
+ "Cannot process HBase snapshot without specifying " + HiveConf.ConfVars
+ .HIVE_HBASE_SNAPSHOT_RESTORE_DIR);
+ }
+
+ HBaseTableSnapshotInputFormatUtil.configureJob(hbaseConf, snapshotName, new Path(restoreDir));
+ // copy over configs touched by above method
+ jobProperties.put(HBASE_SNAPSHOT_NAME_KEY, hbaseConf.get(HBASE_SNAPSHOT_NAME_KEY));
+ if (hbaseConf.get(HBASE_SNAPSHOT_TABLE_DIR_KEY, null) != null) {
+ jobProperties.put(HBASE_SNAPSHOT_TABLE_DIR_KEY, hbaseConf.get(HBASE_SNAPSHOT_TABLE_DIR_KEY));
+ } else {
+ jobProperties.put(HBASE_SNAPSHOT_RESTORE_DIR_KEY, hbaseConf.get(HBASE_SNAPSHOT_RESTORE_DIR_KEY));
+ }
+
+ TableMapReduceUtil.resetCacheConfig(hbaseConf);
+ // copy over configs touched by above method
+ jobProperties.put(HBASE_OFFHEAP_PCT_KEY, hbaseConf.get(HBASE_OFFHEAP_PCT_KEY));
+ jobProperties.put(HBASE_BUCKETCACHE_SIZE_KEY, hbaseConf.get(HBASE_BUCKETCACHE_SIZE_KEY));
+ } catch (IOException e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+
for (String k : jobProperties.keySet()) {
jobConf.set(k, jobProperties.get(k));
}
@@ -415,7 +467,8 @@ public class HBaseStorageHandler extends
* only need TableMapReduceUtil.addDependencyJars(jobConf) here.
*/
TableMapReduceUtil.addDependencyJars(
- jobConf, HBaseStorageHandler.class, TableInputFormatBase.class);
+ jobConf, HBaseStorageHandler.class, TableInputFormatBase.class,
+ org.cliffc.high_scale_lib.Counter.class); // this will be removed for HBase 1.0
Set<String> merged = new LinkedHashSet<String>(jobConf.getStringCollection("tmpjars"));
Job copy = new Job(jobConf);
Modified: hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java (original)
+++ hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java Sun Aug 10 01:33:50 2014
@@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.plan.Ex
import org.apache.hadoop.hive.ql.plan.TableScanDesc;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.ByteStream;
-import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.io.ByteWritable;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -88,90 +87,11 @@ public class HiveHBaseTableInputFormat e
final Reporter reporter) throws IOException {
HBaseSplit hbaseSplit = (HBaseSplit) split;
- TableSplit tableSplit = hbaseSplit.getSplit();
- String hbaseTableName = jobConf.get(HBaseSerDe.HBASE_TABLE_NAME);
- setHTable(new HTable(HBaseConfiguration.create(jobConf), Bytes.toBytes(hbaseTableName)));
- String hbaseColumnsMapping = jobConf.get(HBaseSerDe.HBASE_COLUMNS_MAPPING);
- boolean doColumnRegexMatching = jobConf.getBoolean(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, true);
- List<Integer> readColIDs = ColumnProjectionUtils.getReadColumnIDs(jobConf);
- ColumnMappings columnMappings;
-
- try {
- columnMappings = HBaseSerDe.parseColumnsMapping(hbaseColumnsMapping, doColumnRegexMatching);
- } catch (SerDeException e) {
- throw new IOException(e);
- }
+ TableSplit tableSplit = hbaseSplit.getTableSplit();
- if (columnMappings.size() < readColIDs.size()) {
- throw new IOException("Cannot read more columns than the given table contains.");
- }
+ setHTable(HiveHBaseInputFormatUtil.getTable(jobConf));
+ setScan(HiveHBaseInputFormatUtil.getScan(jobConf));
- boolean readAllColumns = ColumnProjectionUtils.isReadAllColumns(jobConf);
- Scan scan = new Scan();
- boolean empty = true;
-
- // The list of families that have been added to the scan
- List<String> addedFamilies = new ArrayList<String>();
-
- if (!readAllColumns) {
- ColumnMapping[] columnsMapping = columnMappings.getColumnsMapping();
- for (int i : readColIDs) {
- ColumnMapping colMap = columnsMapping[i];
- if (colMap.hbaseRowKey) {
- continue;
- }
-
- if (colMap.qualifierName == null) {
- scan.addFamily(colMap.familyNameBytes);
- addedFamilies.add(colMap.familyName);
- } else {
- if(!addedFamilies.contains(colMap.familyName)){
- // add only if the corresponding family has not already been added
- scan.addColumn(colMap.familyNameBytes, colMap.qualifierNameBytes);
- }
- }
-
- empty = false;
- }
- }
-
- // The HBase table's row key maps to a Hive table column. In the corner case when only the
- // row key column is selected in Hive, the HBase Scan will be empty i.e. no column family/
- // column qualifier will have been added to the scan. We arbitrarily add at least one column
- // to the HBase scan so that we can retrieve all of the row keys and return them as the Hive
- // tables column projection.
- if (empty) {
- for (ColumnMapping colMap: columnMappings) {
- if (colMap.hbaseRowKey) {
- continue;
- }
-
- if (colMap.qualifierName == null) {
- scan.addFamily(colMap.familyNameBytes);
- } else {
- scan.addColumn(colMap.familyNameBytes, colMap.qualifierNameBytes);
- }
-
- if (!readAllColumns) {
- break;
- }
- }
- }
-
- String scanCache = jobConf.get(HBaseSerDe.HBASE_SCAN_CACHE);
- if (scanCache != null) {
- scan.setCaching(Integer.valueOf(scanCache));
- }
- String scanCacheBlocks = jobConf.get(HBaseSerDe.HBASE_SCAN_CACHEBLOCKS);
- if (scanCacheBlocks != null) {
- scan.setCacheBlocks(Boolean.valueOf(scanCacheBlocks));
- }
- String scanBatch = jobConf.get(HBaseSerDe.HBASE_SCAN_BATCH);
- if (scanBatch != null) {
- scan.setBatch(Integer.valueOf(scanBatch));
- }
-
- setScan(scan);
Job job = new Job(jobConf);
TaskAttemptContext tac = ShimLoader.getHadoopShims().newTaskAttemptContext(
job.getConfiguration(), reporter);
@@ -443,12 +363,12 @@ public class HiveHBaseTableInputFormat e
boolean doColumnRegexMatching = jobConf.getBoolean(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, true);
if (hbaseColumnsMapping == null) {
- throw new IOException("hbase.columns.mapping required for HBase Table.");
+ throw new IOException(HBaseSerDe.HBASE_COLUMNS_MAPPING + " required for HBase Table.");
}
ColumnMappings columnMappings = null;
try {
- columnMappings = HBaseSerDe.parseColumnsMapping(hbaseColumnsMapping,doColumnRegexMatching);
+ columnMappings = HBaseSerDe.parseColumnsMapping(hbaseColumnsMapping, doColumnRegexMatching);
} catch (SerDeException e) {
throw new IOException(e);
}
@@ -463,10 +383,9 @@ public class HiveHBaseTableInputFormat e
// definition into account and excludes regions which don't satisfy
// the start/stop row conditions (HBASE-1829).
Scan scan = createFilterScan(jobConf, iKey,
- getStorageFormatOfKey(keyMapping.mappingSpec,
+ HiveHBaseInputFormatUtil.getStorageFormatOfKey(keyMapping.mappingSpec,
jobConf.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, "string")));
-
// The list of families that have been added to the scan
List<String> addedFamilies = new ArrayList<String>();
@@ -503,28 +422,4 @@ public class HiveHBaseTableInputFormat e
return results;
}
-
- private boolean getStorageFormatOfKey(String spec, String defaultFormat) throws IOException{
-
- String[] mapInfo = spec.split("#");
- boolean tblLevelDefault = "binary".equalsIgnoreCase(defaultFormat) ? true : false;
-
- switch (mapInfo.length) {
- case 1:
- return tblLevelDefault;
-
- case 2:
- String storageType = mapInfo[1];
- if(storageType.equals("-")) {
- return tblLevelDefault;
- } else if ("string".startsWith(storageType)){
- return false;
- } else if ("binary".startsWith(storageType)){
- return true;
- }
-
- default:
- throw new IOException("Malformed string: " + spec);
- }
- }
}
Modified: hive/branches/spark/hbase-handler/src/test/results/positive/external_table_ppd.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/hbase-handler/src/test/results/positive/external_table_ppd.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hbase-handler/src/test/results/positive/external_table_ppd.q.out (original)
+++ hive/branches/spark/hbase-handler/src/test/results/positive/external_table_ppd.q.out Sun Aug 10 01:33:50 2014
@@ -63,8 +63,8 @@ Table Parameters:
# Storage Information
SerDe Library: org.apache.hadoop.hive.hbase.HBaseSerDe
-InputFormat: org.apache.hadoop.hive.hbase.HiveHBaseTableInputFormat
-OutputFormat: org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat
+InputFormat: null
+OutputFormat: null
Compressed: No
Num Buckets: -1
Bucket Columns: []
Modified: hive/branches/spark/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out (original)
+++ hive/branches/spark/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out Sun Aug 10 01:33:50 2014
@@ -63,8 +63,8 @@ Table Parameters:
# Storage Information
SerDe Library: org.apache.hadoop.hive.hbase.HBaseSerDe
-InputFormat: org.apache.hadoop.hive.hbase.HiveHBaseTableInputFormat
-OutputFormat: org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat
+InputFormat: null
+OutputFormat: null
Compressed: No
Num Buckets: -1
Bucket Columns: []
@@ -238,8 +238,8 @@ Table Parameters:
# Storage Information
SerDe Library: org.apache.hadoop.hive.hbase.HBaseSerDe
-InputFormat: org.apache.hadoop.hive.hbase.HiveHBaseTableInputFormat
-OutputFormat: org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat
+InputFormat: null
+OutputFormat: null
Compressed: No
Num Buckets: -1
Bucket Columns: []
Modified: hive/branches/spark/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
URL: http://svn.apache.org/viewvc/hive/branches/spark/hbase-handler/src/test/templates/TestHBaseCliDriver.vm?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hbase-handler/src/test/templates/TestHBaseCliDriver.vm (original)
+++ hive/branches/spark/hbase-handler/src/test/templates/TestHBaseCliDriver.vm Sun Aug 10 01:33:50 2014
@@ -27,7 +27,6 @@ import java.util.*;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
import org.apache.hadoop.hive.hbase.HBaseQTestUtil;
import org.apache.hadoop.hive.hbase.HBaseTestSetup;
-import org.apache.hadoop.hive.ql.session.SessionState;
public class $className extends TestCase {
@@ -45,9 +44,12 @@ public class $className extends TestCase
protected void setUp() {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
+ String initScript = "$initScript";
+ String cleanupScript = "$cleanupScript";
try {
- qt = new HBaseQTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, setup);
+ qt = new HBaseQTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR,
+ setup, initScript, cleanupScript);
} catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
Modified: hive/branches/spark/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
URL: http://svn.apache.org/viewvc/hive/branches/spark/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm (original)
+++ hive/branches/spark/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm Sun Aug 10 01:33:50 2014
@@ -45,9 +45,12 @@ public class $className extends TestCase
protected void setUp() {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
+ String initScript = "$initScript";
+ String cleanupScript = "$cleanupScript";
try {
- qt = new HBaseQTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, setup);
+ qt = new HBaseQTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR,
+ setup, initScript, cleanupScript);
} catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
Modified: hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java (original)
+++ hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/SpecialCases.java Sun Aug 10 01:33:50 2014
@@ -20,6 +20,7 @@ package org.apache.hive.hcatalog.mapredu
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcFile;
@@ -37,6 +38,7 @@ import java.util.Map;
* class that allows us to still be as generic as possible
* in the main codeflow path, and call attention to the special
* cases here.
+ *
* Note : For all methods introduced here, please document why
* the special case is necessary, providing a jira number if
* possible.
@@ -50,6 +52,11 @@ public class SpecialCases {
* instantiating a storage handler to write. We set any parameters
* we want to be visible to the job in jobProperties, and this will
* be available to the job via jobconf at run time.
+ *
+ * This is mostly intended to be used by StorageHandlers that wrap
+ * File-based OutputFormats such as FosterStorageHandler that wraps
+ * RCFile, ORC, etc.
+ *
* @param jobProperties : map to write to
* @param jobInfo : information about this output job to read from
* @param ofclass : the output format in use
@@ -78,5 +85,26 @@ public class SpecialCases {
}
}
+ /**
+ * Method to do any storage-handler specific special casing while instantiating a
+ * HCatLoader
+ *
+ * @param conf : configuration to write to
+ * @param tableInfo : the table definition being used
+ */
+ public static void addSpecialCasesParametersForHCatLoader(
+ Configuration conf, HCatTableInfo tableInfo) {
+ if ((tableInfo == null) || (tableInfo.getStorerInfo() == null)){
+ return;
+ }
+ String shClass = tableInfo.getStorerInfo().getStorageHandlerClass();
+ if ((shClass != null) && shClass.equals("org.apache.hadoop.hive.hbase.HBaseStorageHandler")){
+ // NOTE: The reason we use a string name of the hive hbase handler here is
+ // because we do not want to introduce a compile-dependency on the hive-hbase-handler
+ // module from within hive-hcatalog.
+ // This parameter was added due to the requirement in HIVE-7072
+ conf.set("pig.noSplitCombination", "true");
+ }
+ }
}
Modified: hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java (original)
+++ hive/branches/spark/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java Sun Aug 10 01:33:50 2014
@@ -43,6 +43,7 @@ import org.apache.hive.hcatalog.data.Pai
import org.apache.hive.hcatalog.data.schema.HCatSchema;
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat;
import org.apache.hive.hcatalog.mapreduce.InputJobInfo;
+import org.apache.hive.hcatalog.mapreduce.SpecialCases;
import org.apache.pig.Expression;
import org.apache.pig.Expression.BinaryExpression;
import org.apache.pig.PigException;
@@ -125,6 +126,12 @@ public class HCatLoader extends HCatBase
Job clone = new Job(job.getConfiguration());
HCatInputFormat.setInput(job, dbName, tableName, getPartitionFilterString());
+ InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(
+ job.getConfiguration().get(HCatConstants.HCAT_KEY_JOB_INFO));
+
+ SpecialCases.addSpecialCasesParametersForHCatLoader(job.getConfiguration(),
+ inputJobInfo.getTableInfo());
+
// We will store all the new /changed properties in the job in the
// udf context, so the the HCatInputFormat.setInput method need not
//be called many times.
Modified: hive/branches/spark/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java (original)
+++ hive/branches/spark/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java Sun Aug 10 01:33:50 2014
@@ -32,7 +32,11 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
+import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
+import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -41,7 +45,6 @@ import org.apache.hadoop.hive.ql.metadat
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
-import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hive.hcatalog.common.HCatException;
import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
@@ -568,12 +571,16 @@ public class HCatCreateTableDesc {
desc.fileFormat = fileFormat;
if ("SequenceFile".equalsIgnoreCase(fileFormat)) {
desc.inputformat = SequenceFileInputFormat.class.getName();
- desc.outputformat = SequenceFileOutputFormat.class
+ desc.outputformat = HiveSequenceFileOutputFormat.class
.getName();
} else if ("RCFile".equalsIgnoreCase(fileFormat)) {
desc.inputformat = RCFileInputFormat.class.getName();
desc.outputformat = RCFileOutputFormat.class.getName();
desc.serde = ColumnarSerDe.class.getName();
+ } else if ("orcfile".equalsIgnoreCase(fileFormat)) {
+ desc.inputformat = OrcInputFormat.class.getName();
+ desc.outputformat = OrcOutputFormat.class.getName();
+ desc.serde = OrcSerde.class.getName();
}
desc.storageHandler = StringUtils.EMPTY;
} else if (!StringUtils.isEmpty(storageHandler)) {
@@ -583,7 +590,7 @@ public class HCatCreateTableDesc {
LOG.info("Using text file format for the table.");
desc.inputformat = TextInputFormat.class.getName();
LOG.info("Table input format:" + desc.inputformat);
- desc.outputformat = IgnoreKeyTextOutputFormat.class
+ desc.outputformat = HiveIgnoreKeyTextOutputFormat.class
.getName();
LOG.info("Table output format:" + desc.outputformat);
}
Modified: hive/branches/spark/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java (original)
+++ hive/branches/spark/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java Sun Aug 10 01:33:50 2014
@@ -30,9 +30,12 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
+import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
@@ -119,6 +122,7 @@ public class TestHCatClient {
String db = "testdb";
String tableOne = "testTable1";
String tableTwo = "testTable2";
+ String tableThree = "testTable3";
HCatClient client = HCatClient.create(new Configuration(hcatConf));
client.dropDatabase(db, true, HCatClient.DropDBMode.CASCADE);
@@ -170,7 +174,7 @@ public class TestHCatClient {
assertTrue(table2.getInputFileFormat().equalsIgnoreCase(
TextInputFormat.class.getName()));
assertTrue(table2.getOutputFileFormat().equalsIgnoreCase(
- IgnoreKeyTextOutputFormat.class.getName()));
+ HiveIgnoreKeyTextOutputFormat.class.getName()));
assertTrue("SerdeParams not found", table2.getSerdeParams() != null);
assertEquals("checking " + serdeConstants.FIELD_DELIM, Character.toString('\001'),
table2.getSerdeParams().get(serdeConstants.FIELD_DELIM));
@@ -186,6 +190,19 @@ public class TestHCatClient {
table2.getSerdeParams().get(serdeConstants.SERIALIZATION_NULL_FORMAT));
assertEquals((expectedDir + "/" + db + ".db/" + tableTwo).toLowerCase(), table2.getLocation().toLowerCase());
+
+ HCatCreateTableDesc tableDesc3 = HCatCreateTableDesc.create(db,
+ tableThree, cols).fileFormat("orcfile").build();
+ client.createTable(tableDesc3);
+ HCatTable table3 = client.getTable(db, tableThree);
+ assertTrue(table3.getInputFileFormat().equalsIgnoreCase(
+ OrcInputFormat.class.getName()));
+ assertTrue(table3.getOutputFileFormat().equalsIgnoreCase(
+ OrcOutputFormat.class.getName()));
+ assertTrue(table3.getSerdeLib().equalsIgnoreCase(
+ OrcSerde.class.getName()));
+ assertTrue(table1.getCols().equals(cols));
+
client.close();
}
Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java Sun Aug 10 01:33:50 2014
@@ -88,7 +88,7 @@ public class TestLocationQueries extends
String hadoopVer, String locationSubdir)
throws Exception
{
- super(outDir, logDir, miniMr, hadoopVer);
+ super(outDir, logDir, miniMr, hadoopVer, "", "");
this.locationSubdir = locationSubdir;
}
}
Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java Sun Aug 10 01:33:50 2014
@@ -72,6 +72,9 @@ public class TestMetastoreAuthorizationP
return DefaultHiveMetastoreAuthorizationProvider.class.getName();
}
+ protected HiveConf createHiveConf() throws Exception {
+ return new HiveConf(this.getClass());
+ }
@Override
protected void setUp() throws Exception {
@@ -92,7 +95,7 @@ public class TestMetastoreAuthorizationP
MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
- clientHiveConf = new HiveConf(this.getClass());
+ clientHiveConf = createHiveConf();
// Turn off client-side authorization
clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false);
@@ -134,10 +137,23 @@ public class TestMetastoreAuthorizationP
return "smp_ms_tbl";
}
+ protected boolean isTestEnabled() {
+ return true;
+ }
+
+ protected String setupUser() {
+ return ugi.getUserName();
+ }
+
public void testSimplePrivileges() throws Exception {
+ if (!isTestEnabled()) {
+ System.out.println("Skipping test " + this.getClass().getName());
+ return;
+ }
+
String dbName = getTestDbName();
String tblName = getTestTableName();
- String userName = ugi.getUserName();
+ String userName = setupUser();
allowCreateDatabase(userName);
Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java?rev=1617040&r1=1617039&r2=1617040&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java Sun Aug 10 01:33:50 2014
@@ -19,6 +19,7 @@
package org.apache.hadoop.hive.ql.security;
import java.net.URI;
+import java.security.AccessControlException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -81,7 +82,7 @@ public class TestStorageBasedMetastoreAu
setPermissions(location,"-rwxr--r--");
}
- private void setPermissions(String locn, String permissions) throws Exception {
+ protected void setPermissions(String locn, String permissions) throws Exception {
FileSystem fs = FileSystem.get(new URI(locn), clientHiveConf);
fs.setPermission(new Path(locn), FsPermission.valueOf(permissions));
}
@@ -89,7 +90,7 @@ public class TestStorageBasedMetastoreAu
@Override
protected void assertNoPrivileges(MetaException me){
assertNotNull(me);
- assertTrue(me.getMessage().indexOf("not permitted") != -1);
+ assertTrue(me.getMessage().indexOf("AccessControlException") != -1);
}
@Override