You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/08/05 09:23:07 UTC

svn commit: r1615872 [1/12] - in /hive/branches/cbo: ./ bin/ common/ common/src/java/org/apache/hadoop/hive/conf/ conf/ contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/ contrib/src/test/queries/clientnegative/ contrib/src/test/queries/c...

Author: gunther
Date: Tue Aug  5 07:23:02 2014
New Revision: 1615872

URL: http://svn.apache.org/r1615872
Log:
Merge latest trunk into cbo branch (Gunther Hagleitner)

Added:
    hive/branches/cbo/contrib/src/test/queries/clientpositive/url_hook.q
      - copied unchanged from r1615869, hive/trunk/contrib/src/test/queries/clientpositive/url_hook.q
    hive/branches/cbo/contrib/src/test/results/clientpositive/url_hook.q.out
      - copied unchanged from r1615869, hive/trunk/contrib/src/test/results/clientpositive/url_hook.q.out
    hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseTableSnapshotInputFormatUtil.java
      - copied unchanged from r1615869, hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseTableSnapshotInputFormatUtil.java
    hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseInputFormatUtil.java
      - copied unchanged from r1615869, hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseInputFormatUtil.java
    hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java
      - copied unchanged from r1615869, hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java
    hive/branches/cbo/hbase-handler/src/test/queries/positive/hbase_handler_snapshot.q
      - copied unchanged from r1615869, hive/trunk/hbase-handler/src/test/queries/positive/hbase_handler_snapshot.q
    hive/branches/cbo/hbase-handler/src/test/results/positive/hbase_handler_snapshot.q.out
      - copied unchanged from r1615869, hive/trunk/hbase-handler/src/test/results/positive/hbase_handler_snapshot.q.out
    hive/branches/cbo/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java
      - copied unchanged from r1615869, hive/trunk/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java
    hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/
      - copied from r1615869, hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/
    hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/
      - copied from r1615869, hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
      - copied unchanged from r1615869, hive/trunk/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/
      - copied from r1615869, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/merge/
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileKeyWrapper.java
      - copied unchanged from r1615869, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileKeyWrapper.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java
      - copied unchanged from r1615869, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileStripeMergeInputFormat.java
      - copied unchanged from r1615869, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileStripeMergeInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileStripeMergeRecordReader.java
      - copied unchanged from r1615869, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileStripeMergeRecordReader.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileValueWrapper.java
      - copied unchanged from r1615869, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileValueWrapper.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java
      - copied unchanged from r1615869, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_admin_role.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientnegative/authorization_drop_admin_role.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/orc_merge1.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientnegative/orc_merge1.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/orc_merge2.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientnegative/orc_merge2.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/orc_merge3.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientnegative/orc_merge3.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/orc_merge4.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientnegative/orc_merge4.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/orc_merge5.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientnegative/orc_merge5.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/alter_merge_2_orc.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientpositive/alter_merge_2_orc.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/alter_merge_orc.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientpositive/alter_merge_orc.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/alter_merge_stats_orc.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientpositive/alter_merge_stats_orc.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge1.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientpositive/orc_merge1.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge2.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientpositive/orc_merge2.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge3.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientpositive/orc_merge3.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge4.q
      - copied unchanged from r1615869, hive/trunk/ql/src/test/queries/clientpositive/orc_merge4.q
    hive/branches/cbo/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/orc_merge1.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientnegative/orc_merge1.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/orc_merge2.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientnegative/orc_merge2.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/orc_merge3.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientnegative/orc_merge3.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/orc_merge4.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientnegative/orc_merge4.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/orc_merge5.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientnegative/orc_merge5.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/alter_merge_2_orc.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/alter_merge_2_orc.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/alter_merge_orc.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/alter_merge_orc.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_merge1.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/orc_merge1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_merge2.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/orc_merge2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_merge3.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/orc_merge3.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_merge4.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/orc_merge4.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/alter_merge_2_orc.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/tez/alter_merge_2_orc.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/alter_merge_orc.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/tez/alter_merge_orc.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge1.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge2.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge3.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge3.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge4.q.out
      - copied unchanged from r1615869, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge4.q.out
Removed:
    hive/branches/cbo/contrib/src/test/queries/clientnegative/url_hook.q
    hive/branches/cbo/contrib/src/test/results/clientnegative/url_hook.q.out
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/fileformats/
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/MergeWork.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeOutputFormat.java
Modified:
    hive/branches/cbo/   (props changed)
    hive/branches/cbo/.gitignore
    hive/branches/cbo/bin/hive.cmd
    hive/branches/cbo/common/pom.xml
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/cbo/conf/hive-default.xml.template
    hive/branches/cbo/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java
    hive/branches/cbo/data/conf/hive-site.xml
    hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java
    hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java
    hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java
    hive/branches/cbo/hbase-handler/src/test/results/positive/external_table_ppd.q.out
    hive/branches/cbo/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
    hive/branches/cbo/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
    hive/branches/cbo/hcatalog/core/pom.xml
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java
    hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java
    hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java
    hive/branches/cbo/hcatalog/streaming/src/test/sit
    hive/branches/cbo/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
    hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
    hive/branches/cbo/itests/qtest/testconfiguration.properties
    hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
    hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
    hive/branches/cbo/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    hive/branches/cbo/metastore/if/hive_metastore.thrift
    hive/branches/cbo/metastore/pom.xml
    hive/branches/cbo/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
    hive/branches/cbo/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
    hive/branches/cbo/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
    hive/branches/cbo/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
    hive/branches/cbo/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
    hive/branches/cbo/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    hive/branches/cbo/metastore/src/gen/thrift/gen-php/metastore/Types.php
    hive/branches/cbo/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    hive/branches/cbo/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    hive/branches/cbo/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
    hive/branches/cbo/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
    hive/branches/cbo/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
    hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
    hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
    hive/branches/cbo/pom.xml
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTablePartMergeFilesDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationProvider.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_priv_current_role_neg.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_role_cycles1.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_role_cycles2.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_role_grant.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_role_grant2.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_role_grant_nosuchrole.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_role_grant_otherrole.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_role_grant_otheruser.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_rolehierarchy_privs.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_set_role_neg2.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_show_grant_otherrole.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_show_grant_otheruser_all.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_show_grant_otheruser_alltabs.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_show_grant_otheruser_wtab.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_admin_almighty2.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_create_func1.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_create_macro1.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_insert.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_owner_actions_db.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_role_grant1.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_role_grant2.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_show_grant.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q
    hive/branches/cbo/ql/src/test/results/clientnegative/authorization_public_drop.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/annotate_stats_part.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_10.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_createas1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/rcfile_createas1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/rcfile_merge1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/rcfile_merge2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/rcfile_merge3.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_10.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_11.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_12.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_13.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_14.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_16.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_9.q.out
    hive/branches/cbo/service/src/java/org/apache/hive/service/cli/CLIService.java
    hive/branches/cbo/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
    hive/branches/cbo/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java

Propchange: hive/branches/cbo/
------------------------------------------------------------------------------
  Merged /hive/trunk:r1614506-1615869

Modified: hive/branches/cbo/.gitignore
URL: http://svn.apache.org/viewvc/hive/branches/cbo/.gitignore?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/.gitignore (original)
+++ hive/branches/cbo/.gitignore Tue Aug  5 07:23:02 2014
@@ -13,6 +13,7 @@ common/src/gen
 *.iml
 *.ipr
 *.iws
+*.swp
 derby.log
 datanucleus.log
 .arc

Modified: hive/branches/cbo/bin/hive.cmd
URL: http://svn.apache.org/viewvc/hive/branches/cbo/bin/hive.cmd?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/bin/hive.cmd (original)
+++ hive/branches/cbo/bin/hive.cmd Tue Aug  5 07:23:02 2014
@@ -236,6 +236,21 @@ if defined HIVE_CLASSPATH (
   set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HIVE_CLASSPATH%
 )
 
+@rem set hbase components
+if defined HBASE_HOME (
+  if not defined HBASE_CONF_DIR (
+    if exist %HBASE_HOME%\conf (
+      set HBASE_CONF_DIR=%HBASE_HOME%\conf
+    )
+  )
+  if defined HBASE_CONF_DIR (
+    call :AddToHadoopClassPath %HBASE_CONF_DIR%	
+  ) 
+  if exist %HBASE_HOME%\lib (
+    call :AddToHadoopClassPath %HBASE_HOME%\lib\*
+  ) 
+)
+
 if defined AUX_PARAM (
         set HIVE_OPTS=%HIVE_OPTS% -hiveconf hive.aux.jars.path="%AUX_PARAM%"
 	set AUX_JARS_CMD_LINE="-libjars %AUX_PARAM%"
@@ -359,3 +374,12 @@ if not defined AUX_PARAM (
 	)
 )
 goto :EOF
+
+:AddToHadoopClassPath
+if defined HADOOP_CLASSPATH (
+  set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%1
+) else (
+    set HADOOP_CLASSPATH=%1
+  )  
+)
+goto :EOF

Modified: hive/branches/cbo/common/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/common/pom.xml?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/common/pom.xml (original)
+++ hive/branches/cbo/common/pom.xml Tue Aug  5 07:23:02 2014
@@ -112,6 +112,14 @@
     <profile>
       <id>dist</id>
       <build>
+        <resources>
+          <resource>
+            <directory>../conf/</directory>
+            <includes>
+              <include>hive-default.xml.template</include>
+            </includes>
+          </resource>
+        </resources>
         <plugins>
           <plugin>
             <groupId>org.apache.maven.plugins</groupId>
@@ -140,14 +148,6 @@
   </profiles>
 
   <build>
-    <resources>
-      <resource>
-        <directory>../conf/</directory>
-        <includes>
-          <include>hive-default.xml.template</include>
-        </includes>
-      </resource>
-    </resources>
     <sourceDirectory>${basedir}/src/java</sourceDirectory>
     <testSourceDirectory>${basedir}/src/test</testSourceDirectory>
     <scriptSourceDirectory>${basedir}/src/scripts</scriptSourceDirectory>

Modified: hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Tue Aug  5 07:23:02 2014
@@ -108,7 +108,6 @@ public class HiveConf extends Configurat
       HiveConf.ConfVars.METASTOREPWD,
       HiveConf.ConfVars.METASTORECONNECTURLHOOK,
       HiveConf.ConfVars.METASTORECONNECTURLKEY,
-      HiveConf.ConfVars.METASTOREFORCERELOADCONF,
       HiveConf.ConfVars.METASTORESERVERMINTHREADS,
       HiveConf.ConfVars.METASTORESERVERMAXTHREADS,
       HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE,
@@ -352,11 +351,6 @@ public class HiveConf extends Configurat
         "jdbc:derby:;databaseName=metastore_db;create=true",
         "JDBC connect string for a JDBC metastore"),
 
-    METASTOREFORCERELOADCONF("hive.metastore.force.reload.conf", false, 
-        "Whether to force reloading of the metastore configuration (including\n" +
-        "the connection URL, before the next metastore query that accesses the\n" +
-        "datastore. Once reloaded, this value is reset to false. Used for\n" +
-        "testing only."),
     HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 1,
         "The number of times to retry a HMSHandler call if there were a connection error"),
     HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", 1000,
@@ -789,6 +783,14 @@ public class HiveConf extends Configurat
     HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
     HIVEMERGEINPUTFORMATBLOCKLEVEL("hive.merge.input.format.block.level",
         "org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat", ""),
+    HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true,
+        "When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a\n" +
+        " table with ORC file format, enabling this config will do stripe level fast merge\n" +
+        " for small ORC files. Note that enabling this config will not honor padding tolerance\n" +
+        " config (hive.exec.orc.block.padding.tolerance)."),
+    HIVEMERGEINPUTFORMATSTRIPELEVEL("hive.merge.input.format.stripe.level",
+        "org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat", 
+	"Input file format to use for ORC stripe level merging (for internal use only)"),
     HIVEMERGECURRENTJOBHASDYNAMICPARTITIONS(
         "hive.merge.current.job.has.dynamic.partitions", false, ""),
 
@@ -1260,6 +1262,9 @@ public class HiveConf extends Configurat
         "Disabling this improves HBase write performance at the risk of lost writes in case of a crash."),
     HIVE_HBASE_GENERATE_HFILES("hive.hbase.generatehfiles", false,
         "True when HBaseStorageHandler should generate hfiles instead of operate against the online table."),
+    HIVE_HBASE_SNAPSHOT_NAME("hive.hbase.snapshot.name", null, "The HBase table snapshot name to use."),
+    HIVE_HBASE_SNAPSHOT_RESTORE_DIR("hive.hbase.snapshot.restoredir", "/tmp", "The directory in which to " +
+        "restore the HBase table snapshot."),
 
     // For har files
     HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"),
@@ -1554,7 +1559,7 @@ public class HiveConf extends Configurat
         "Comma separated list of non-SQL Hive commands users are authorized to execute"),
 
     HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list",
-        "hive.security.authenticator.manager,hive.security.authorization.manager",
+        "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
         "Comma separated list of configuration options which are immutable at runtime"),
 
     // If this is set all move tasks at the end of a multi-insert query will only begin once all
@@ -1675,7 +1680,9 @@ public class HiveConf extends Configurat
         "  none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" +
         "  column: implies column names can contain any character."
     ),
-    USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "",
+
+    // role names are case-insensitive
+    USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false,
         "Comma separated list of users who are in admin role for bootstrapping.\n" +
         "More users can be added in ADMIN role later."),
 
@@ -1721,25 +1728,31 @@ public class HiveConf extends Configurat
     private final String description;
 
     private final boolean excluded;
+    private final boolean caseSensitive;
 
     ConfVars(String varname, Object defaultVal, String description) {
-      this(varname, defaultVal, null, description, false);
+      this(varname, defaultVal, null, description, true, false);
     }
 
     ConfVars(String varname, Object defaultVal, String description, boolean excluded) {
-      this(varname, defaultVal, null, description, excluded);
+      this(varname, defaultVal, null, description, true, excluded);
+    }
+
+    ConfVars(String varname, String defaultVal, boolean caseSensitive, String description) {
+      this(varname, defaultVal, null, description, caseSensitive, false);
     }
 
     ConfVars(String varname, Object defaultVal, Validator validator, String description) {
-      this(varname, defaultVal, validator, description, false);
+      this(varname, defaultVal, validator, description, true, false);
     }
 
-    ConfVars(String varname, Object defaultVal, Validator validator, String description, boolean excluded) {
+    ConfVars(String varname, Object defaultVal, Validator validator, String description, boolean caseSensitive, boolean excluded) {
       this.varname = varname;
       this.validator = validator;
       this.description = description;
       this.defaultExpr = defaultVal == null ? null : String.valueOf(defaultVal);
       this.excluded = excluded;
+      this.caseSensitive = caseSensitive;
       if (defaultVal == null || defaultVal instanceof String) {
         this.valClass = String.class;
         this.valType = VarType.STRING;
@@ -1806,6 +1819,10 @@ public class HiveConf extends Configurat
       return excluded;
     }
 
+    public boolean isCaseSensitive() {
+      return caseSensitive;
+    }
+
     @Override
     public String toString() {
       return varname;

Modified: hive/branches/cbo/conf/hive-default.xml.template
URL: http://svn.apache.org/viewvc/hive/branches/cbo/conf/hive-default.xml.template?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/conf/hive-default.xml.template (original)
+++ hive/branches/cbo/conf/hive-default.xml.template Tue Aug  5 07:23:02 2014
@@ -374,16 +374,6 @@
     <description>JDBC connect string for a JDBC metastore</description>
   </property>
   <property>
-    <key>hive.metastore.force.reload.conf</key>
-    <value>false</value>
-    <description>
-      Whether to force reloading of the metastore configuration (including
-      the connection URL, before the next metastore query that accesses the
-      datastore. Once reloaded, this value is reset to false. Used for
-      testing only.
-    </description>
-  </property>
-  <property>
     <key>hive.hmshandler.retry.attempts</key>
     <value>1</value>
     <description>The number of times to retry a HMSHandler call if there were a connection error</description>
@@ -1289,6 +1279,21 @@
     <description/>
   </property>
   <property>
+    <key>hive.merge.orcfile.stripe.level</key>
+    <value>true</value>
+    <description>
+      When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a
+       table with ORC file format, enabling this config will do stripe level fast merge
+       for small ORC files. Note that enabling this config will not honor padding tolerance
+       config (hive.exec.orc.block.padding.tolerance).
+    </description>
+  </property>
+  <property>
+    <key>hive.merge.input.format.stripe.level</key>
+    <value>org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat</value>
+    <description>Input file format to use for ORC stripe level merging (for internal use only)</description>
+  </property>
+  <property>
     <key>hive.merge.current.job.has.dynamic.partitions</key>
     <value>false</value>
     <description/>
@@ -2233,6 +2238,16 @@
     <description>True when HBaseStorageHandler should generate hfiles instead of operate against the online table.</description>
   </property>
   <property>
+    <key>hive.hbase.snapshot.name</key>
+    <value/>
+    <description>The HBase table snapshot name to use.</description>
+  </property>
+  <property>
+    <key>hive.hbase.snapshot.restoredir</key>
+    <value>/tmp</value>
+    <description>The directory in which to restore the HBase table snapshot.</description>
+  </property>
+  <property>
     <key>hive.archive.enabled</key>
     <value>false</value>
     <description>Whether archiving operations are permitted</description>
@@ -2804,7 +2819,7 @@
   </property>
   <property>
     <key>hive.conf.restricted.list</key>
-    <value>hive.security.authenticator.manager,hive.security.authorization.manager</value>
+    <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
     <description>Comma separated list of configuration options which are immutable at runtime</description>
   </property>
   <property>

Modified: hive/branches/cbo/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java (original)
+++ hive/branches/cbo/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/TestURLHook.java Tue Aug  5 07:23:02 2014
@@ -28,7 +28,8 @@ import org.apache.hadoop.hive.metastore.
  */
 public class TestURLHook implements JDOConnectionURLHook {
 
-  static String originalUrl = null;
+  private String originalUrl;
+
   @Override
   public String getJdoConnectionUrl(Configuration conf) throws Exception {
     if (originalUrl == null) {

Modified: hive/branches/cbo/data/conf/hive-site.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/data/conf/hive-site.xml?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/data/conf/hive-site.xml (original)
+++ hive/branches/cbo/data/conf/hive-site.xml Tue Aug  5 07:23:02 2014
@@ -240,4 +240,9 @@
   <value>minimal</value>
 </property>
 
+<property>
+  <name>hive.users.in.admin.role</name>
+  <value>hive_admin_user</value>
+</property>
+
 </configuration>

Modified: hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java (original)
+++ hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSplit.java Tue Aug  5 07:23:02 2014
@@ -31,46 +31,86 @@ import org.apache.hadoop.mapred.InputSpl
  * HBaseSplit augments FileSplit with HBase column mapping.
  */
 public class HBaseSplit extends FileSplit implements InputSplit {
-  private final TableSplit split;
 
+  private final TableSplit tableSplit;
+  private final InputSplit snapshotSplit;
+  private boolean isTableSplit; // should be final but Writable
+
+  /**
+   * For Writable
+   */
   public HBaseSplit() {
     super((Path) null, 0, 0, (String[]) null);
-    split = new TableSplit();
+    tableSplit = new TableSplit();
+    snapshotSplit = HBaseTableSnapshotInputFormatUtil.createTableSnapshotRegionSplit();
   }
 
-  public HBaseSplit(TableSplit split, Path dummyPath) {
+  public HBaseSplit(TableSplit tableSplit, Path dummyPath) {
     super(dummyPath, 0, 0, (String[]) null);
-    this.split = split;
+    this.tableSplit = tableSplit;
+    this.snapshotSplit = HBaseTableSnapshotInputFormatUtil.createTableSnapshotRegionSplit();
+    this.isTableSplit = true;
   }
 
-  public TableSplit getSplit() {
-    return this.split;
+  /**
+   * TODO: use TableSnapshotRegionSplit HBASE-11555 is fixed.
+   */
+  public HBaseSplit(InputSplit snapshotSplit, Path dummyPath) {
+    super(dummyPath, 0, 0, (String[]) null);
+    this.tableSplit = new TableSplit();
+    this.snapshotSplit = snapshotSplit;
+    this.isTableSplit = false;
   }
 
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    split.readFields(in);
+  public TableSplit getTableSplit() {
+    assert isTableSplit;
+    return this.tableSplit;
+  }
+
+  public InputSplit getSnapshotSplit() {
+    assert !isTableSplit;
+    return this.snapshotSplit;
   }
 
   @Override
   public String toString() {
-    return "TableSplit " + split;
+    return "" + (isTableSplit ? tableSplit : snapshotSplit);
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    this.isTableSplit = in.readBoolean();
+    if (this.isTableSplit) {
+      tableSplit.readFields(in);
+    } else {
+      snapshotSplit.readFields(in);
+    }
   }
 
   @Override
   public void write(DataOutput out) throws IOException {
     super.write(out);
-    split.write(out);
+    out.writeBoolean(isTableSplit);
+    if (isTableSplit) {
+      tableSplit.write(out);
+    } else {
+      snapshotSplit.write(out);
+    }
   }
 
   @Override
   public long getLength() {
-    return split.getLength();
+    long val = 0;
+    try {
+      val = isTableSplit ? tableSplit.getLength() : snapshotSplit.getLength();
+    } finally {
+      return val;
+    }
   }
 
   @Override
   public String[] getLocations() throws IOException {
-    return split.getLocations();
+    return isTableSplit ? tableSplit.getLocations() : snapshotSplit.getLocations();
   }
 }

Modified: hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java (original)
+++ hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java Tue Aug  5 07:23:02 2014
@@ -29,7 +29,10 @@ import java.util.Properties;
 import java.util.Set;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -69,6 +72,19 @@ import org.apache.hadoop.util.StringUtil
 public class HBaseStorageHandler extends DefaultStorageHandler
   implements HiveMetaHook, HiveStoragePredicateHandler {
 
+  private static final Log LOG = LogFactory.getLog(HBaseStorageHandler.class);
+
+  /** HBase-internal config by which input format receives snapshot name. */
+  private static final String HBASE_SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
+  /** HBase-internal config by which input format received restore dir before HBASE-11335. */
+  private static final String HBASE_SNAPSHOT_TABLE_DIR_KEY = "hbase.TableSnapshotInputFormat.table.dir";
+  /** HBase-internal config by which input format received restore dir after HBASE-11335. */
+  private static final String HBASE_SNAPSHOT_RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
+  /** HBase config by which a SlabCache is sized. */
+  private static final String HBASE_OFFHEAP_PCT_KEY = "hbase.offheapcache.percentage";
+  /** HBase config by which a BucketCache is sized. */
+  private static final String HBASE_BUCKETCACHE_SIZE_KEY = "hbase.bucketcache.size";
+
   final static public String DEFAULT_PREFIX = "default.";
 
   //Check if the configure job properties is called from input
@@ -258,6 +274,11 @@ public class HBaseStorageHandler extends
 
   @Override
   public Class<? extends InputFormat> getInputFormatClass() {
+    if (HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_HBASE_SNAPSHOT_NAME) != null) {
+      LOG.debug("Using TableSnapshotInputFormat");
+      return HiveHBaseTableSnapshotInputFormat.class;
+    }
+    LOG.debug("Using HiveHBaseTableInputFormat");
     return HiveHBaseTableInputFormat.class;
   }
 
@@ -342,6 +363,37 @@ public class HBaseStorageHandler extends
     // do this for reconciling HBaseStorageHandler for use in HCatalog
     // check to see if this an input job or an outputjob
     if (this.configureInputJobProps) {
+      String snapshotName = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_HBASE_SNAPSHOT_NAME);
+      if (snapshotName != null) {
+        HBaseTableSnapshotInputFormatUtil.assertSupportsTableSnapshots();
+
+        try {
+          String restoreDir =
+            HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_HBASE_SNAPSHOT_RESTORE_DIR);
+          if (restoreDir == null) {
+            throw new IllegalArgumentException(
+              "Cannot process HBase snapshot without specifying " + HiveConf.ConfVars
+                .HIVE_HBASE_SNAPSHOT_RESTORE_DIR);
+          }
+
+          HBaseTableSnapshotInputFormatUtil.configureJob(hbaseConf, snapshotName, new Path(restoreDir));
+          // copy over configs touched by above method
+          jobProperties.put(HBASE_SNAPSHOT_NAME_KEY, hbaseConf.get(HBASE_SNAPSHOT_NAME_KEY));
+          if (hbaseConf.get(HBASE_SNAPSHOT_TABLE_DIR_KEY, null) != null) {
+            jobProperties.put(HBASE_SNAPSHOT_TABLE_DIR_KEY, hbaseConf.get(HBASE_SNAPSHOT_TABLE_DIR_KEY));
+          } else {
+            jobProperties.put(HBASE_SNAPSHOT_RESTORE_DIR_KEY, hbaseConf.get(HBASE_SNAPSHOT_RESTORE_DIR_KEY));
+          }
+
+          TableMapReduceUtil.resetCacheConfig(hbaseConf);
+          // copy over configs touched by above method
+          jobProperties.put(HBASE_OFFHEAP_PCT_KEY, hbaseConf.get(HBASE_OFFHEAP_PCT_KEY));
+          jobProperties.put(HBASE_BUCKETCACHE_SIZE_KEY, hbaseConf.get(HBASE_BUCKETCACHE_SIZE_KEY));
+        } catch (IOException e) {
+          throw new IllegalArgumentException(e);
+        }
+      }
+
       for (String k : jobProperties.keySet()) {
         jobConf.set(k, jobProperties.get(k));
       }
@@ -415,7 +467,8 @@ public class HBaseStorageHandler extends
        * only need TableMapReduceUtil.addDependencyJars(jobConf) here.
        */
       TableMapReduceUtil.addDependencyJars(
-          jobConf, HBaseStorageHandler.class, TableInputFormatBase.class);
+          jobConf, HBaseStorageHandler.class, TableInputFormatBase.class,
+          org.cliffc.high_scale_lib.Counter.class); // this will be removed for HBase 1.0
       Set<String> merged = new LinkedHashSet<String>(jobConf.getStringCollection("tmpjars"));
 
       Job copy = new Job(jobConf);

Modified: hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java (original)
+++ hive/branches/cbo/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java Tue Aug  5 07:23:02 2014
@@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.plan.Ex
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.ByteStream;
-import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -88,90 +87,11 @@ public class HiveHBaseTableInputFormat e
     final Reporter reporter) throws IOException {
 
     HBaseSplit hbaseSplit = (HBaseSplit) split;
-    TableSplit tableSplit = hbaseSplit.getSplit();
-    String hbaseTableName = jobConf.get(HBaseSerDe.HBASE_TABLE_NAME);
-    setHTable(new HTable(HBaseConfiguration.create(jobConf), Bytes.toBytes(hbaseTableName)));
-    String hbaseColumnsMapping = jobConf.get(HBaseSerDe.HBASE_COLUMNS_MAPPING);
-    boolean doColumnRegexMatching = jobConf.getBoolean(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, true);
-    List<Integer> readColIDs = ColumnProjectionUtils.getReadColumnIDs(jobConf);
-    ColumnMappings columnMappings;
-
-    try {
-      columnMappings = HBaseSerDe.parseColumnsMapping(hbaseColumnsMapping, doColumnRegexMatching);
-    } catch (SerDeException e) {
-      throw new IOException(e);
-    }
+    TableSplit tableSplit = hbaseSplit.getTableSplit();
 
-    if (columnMappings.size() < readColIDs.size()) {
-      throw new IOException("Cannot read more columns than the given table contains.");
-    }
+    setHTable(HiveHBaseInputFormatUtil.getTable(jobConf));
+    setScan(HiveHBaseInputFormatUtil.getScan(jobConf));
 
-    boolean readAllColumns = ColumnProjectionUtils.isReadAllColumns(jobConf);
-    Scan scan = new Scan();
-    boolean empty = true;
-
-    // The list of families that have been added to the scan
-    List<String> addedFamilies = new ArrayList<String>();
-
-    if (!readAllColumns) {
-      ColumnMapping[] columnsMapping = columnMappings.getColumnsMapping();
-      for (int i : readColIDs) {
-        ColumnMapping colMap = columnsMapping[i];
-        if (colMap.hbaseRowKey) {
-          continue;
-        }
-
-        if (colMap.qualifierName == null) {
-          scan.addFamily(colMap.familyNameBytes);
-          addedFamilies.add(colMap.familyName);
-        } else {
-          if(!addedFamilies.contains(colMap.familyName)){
-            // add only if the corresponding family has not already been added
-            scan.addColumn(colMap.familyNameBytes, colMap.qualifierNameBytes);
-          }
-        }
-
-        empty = false;
-      }
-    }
-
-    // The HBase table's row key maps to a Hive table column. In the corner case when only the
-    // row key column is selected in Hive, the HBase Scan will be empty i.e. no column family/
-    // column qualifier will have been added to the scan. We arbitrarily add at least one column
-    // to the HBase scan so that we can retrieve all of the row keys and return them as the Hive
-    // tables column projection.
-    if (empty) {
-      for (ColumnMapping colMap: columnMappings) {
-        if (colMap.hbaseRowKey) {
-          continue;
-        }
-
-        if (colMap.qualifierName == null) {
-          scan.addFamily(colMap.familyNameBytes);
-        } else {
-          scan.addColumn(colMap.familyNameBytes, colMap.qualifierNameBytes);
-        }
-
-        if (!readAllColumns) {
-          break;
-        }
-      }
-    }
-
-    String scanCache = jobConf.get(HBaseSerDe.HBASE_SCAN_CACHE);
-    if (scanCache != null) {
-      scan.setCaching(Integer.valueOf(scanCache));
-    }
-    String scanCacheBlocks = jobConf.get(HBaseSerDe.HBASE_SCAN_CACHEBLOCKS);
-    if (scanCacheBlocks != null) {
-      scan.setCacheBlocks(Boolean.valueOf(scanCacheBlocks));
-    }
-    String scanBatch = jobConf.get(HBaseSerDe.HBASE_SCAN_BATCH);
-    if (scanBatch != null) {
-      scan.setBatch(Integer.valueOf(scanBatch));
-    }
-
-    setScan(scan);
     Job job = new Job(jobConf);
     TaskAttemptContext tac = ShimLoader.getHadoopShims().newTaskAttemptContext(
         job.getConfiguration(), reporter);
@@ -443,12 +363,12 @@ public class HiveHBaseTableInputFormat e
     boolean doColumnRegexMatching = jobConf.getBoolean(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, true);
 
     if (hbaseColumnsMapping == null) {
-      throw new IOException("hbase.columns.mapping required for HBase Table.");
+      throw new IOException(HBaseSerDe.HBASE_COLUMNS_MAPPING + " required for HBase Table.");
     }
 
     ColumnMappings columnMappings = null;
     try {
-      columnMappings = HBaseSerDe.parseColumnsMapping(hbaseColumnsMapping,doColumnRegexMatching);
+      columnMappings = HBaseSerDe.parseColumnsMapping(hbaseColumnsMapping, doColumnRegexMatching);
     } catch (SerDeException e) {
       throw new IOException(e);
     }
@@ -463,10 +383,9 @@ public class HiveHBaseTableInputFormat e
     // definition into account and excludes regions which don't satisfy
     // the start/stop row conditions (HBASE-1829).
     Scan scan = createFilterScan(jobConf, iKey,
-        getStorageFormatOfKey(keyMapping.mappingSpec,
+        HiveHBaseInputFormatUtil.getStorageFormatOfKey(keyMapping.mappingSpec,
             jobConf.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, "string")));
 
-
     // The list of families that have been added to the scan
     List<String> addedFamilies = new ArrayList<String>();
 
@@ -503,28 +422,4 @@ public class HiveHBaseTableInputFormat e
 
     return results;
   }
-
-  private boolean getStorageFormatOfKey(String spec, String defaultFormat) throws IOException{
-
-    String[] mapInfo = spec.split("#");
-    boolean tblLevelDefault = "binary".equalsIgnoreCase(defaultFormat) ? true : false;
-
-    switch (mapInfo.length) {
-    case 1:
-      return tblLevelDefault;
-
-    case 2:
-      String storageType = mapInfo[1];
-      if(storageType.equals("-")) {
-        return tblLevelDefault;
-      } else if ("string".startsWith(storageType)){
-        return false;
-      } else if ("binary".startsWith(storageType)){
-        return true;
-      }
-
-    default:
-      throw new IOException("Malformed string: " + spec);
-    }
-  }
 }

Modified: hive/branches/cbo/hbase-handler/src/test/results/positive/external_table_ppd.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hbase-handler/src/test/results/positive/external_table_ppd.q.out?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hbase-handler/src/test/results/positive/external_table_ppd.q.out (original)
+++ hive/branches/cbo/hbase-handler/src/test/results/positive/external_table_ppd.q.out Tue Aug  5 07:23:02 2014
@@ -63,8 +63,8 @@ Table Parameters:	 	 
 	 	 
 # Storage Information	 	 
 SerDe Library:      	org.apache.hadoop.hive.hbase.HBaseSerDe	 
-InputFormat:        	org.apache.hadoop.hive.hbase.HiveHBaseTableInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat	 
+InputFormat:        	null                	 
+OutputFormat:       	null                	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

Modified: hive/branches/cbo/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out (original)
+++ hive/branches/cbo/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out Tue Aug  5 07:23:02 2014
@@ -63,8 +63,8 @@ Table Parameters:	 	 
 	 	 
 # Storage Information	 	 
 SerDe Library:      	org.apache.hadoop.hive.hbase.HBaseSerDe	 
-InputFormat:        	org.apache.hadoop.hive.hbase.HiveHBaseTableInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat	 
+InputFormat:        	null                	 
+OutputFormat:       	null                	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -238,8 +238,8 @@ Table Parameters:	 	 
 	 	 
 # Storage Information	 	 
 SerDe Library:      	org.apache.hadoop.hive.hbase.HBaseSerDe	 
-InputFormat:        	org.apache.hadoop.hive.hbase.HiveHBaseTableInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat	 
+InputFormat:        	null                	 
+OutputFormat:       	null                	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

Modified: hive/branches/cbo/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hbase-handler/src/test/templates/TestHBaseCliDriver.vm?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hbase-handler/src/test/templates/TestHBaseCliDriver.vm (original)
+++ hive/branches/cbo/hbase-handler/src/test/templates/TestHBaseCliDriver.vm Tue Aug  5 07:23:02 2014
@@ -27,7 +27,6 @@ import java.util.*;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.apache.hadoop.hive.hbase.HBaseQTestUtil;
 import org.apache.hadoop.hive.hbase.HBaseTestSetup;
-import org.apache.hadoop.hive.ql.session.SessionState;
 
 public class $className extends TestCase {
 

Modified: hive/branches/cbo/hcatalog/core/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/pom.xml?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/pom.xml (original)
+++ hive/branches/cbo/hcatalog/core/pom.xml Tue Aug  5 07:23:02 2014
@@ -60,6 +60,13 @@
       <artifactId>hive-exec</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-exec</artifactId>
+      <version>${project.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
     <!-- inter-project -->
     <dependency>
       <groupId>com.google.guava</groupId>

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java Tue Aug  5 07:23:02 2014
@@ -19,13 +19,15 @@
 
 package org.apache.hive.hcatalog.mapreduce;
 
+import com.google.common.collect.ImmutableSet;
+
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-
-import junit.framework.Assert;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -40,10 +42,10 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
-import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
-import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
+import org.apache.hadoop.hive.ql.io.StorageFormats;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
 import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
@@ -53,15 +55,23 @@ import org.apache.hadoop.mapreduce.JobSt
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
+
 import org.apache.hive.hcatalog.common.HCatConstants;
 import org.apache.hive.hcatalog.common.HCatUtil;
 import org.apache.hive.hcatalog.data.DefaultHCatRecord;
 import org.apache.hive.hcatalog.data.HCatRecord;
 import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
 import org.apache.hive.hcatalog.data.schema.HCatSchema;
+
+import junit.framework.Assert;
+
 import org.junit.After;
+import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -69,42 +79,62 @@ import static org.junit.Assert.assertTru
 
 /**
  * Test for HCatOutputFormat. Writes a partition using HCatOutputFormat and reads
- * it back using HCatInputFormat, checks the column values and counts.
+ * it back using HCatInputFormat, checks the column values and counts. This class
+ * can be tested to test different partitioning schemes.
+ *
+ * This is a parameterized test that tests HCatOutputFormat and HCatInputFormat against Hive's
+ * native storage formats enumerated using {@link org.apache.hive.hcatalog.mapreduce.StorageFormats}.
  */
+@RunWith(Parameterized.class)
 public abstract class HCatMapReduceTest extends HCatBaseTest {
-
   private static final Logger LOG = LoggerFactory.getLogger(HCatMapReduceTest.class);
+
   protected static String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME;
-  protected static String tableName = "testHCatMapReduceTable";
+  protected static final String TABLE_NAME = "testHCatMapReduceTable";
 
   private static List<HCatRecord> writeRecords = new ArrayList<HCatRecord>();
   private static List<HCatRecord> readRecords = new ArrayList<HCatRecord>();
 
-  protected abstract List<FieldSchema> getPartitionKeys();
-
-  protected abstract List<FieldSchema> getTableColumns();
-
   private static FileSystem fs;
   private String externalTableLocation = null;
+  protected String tableName;
+  protected String serdeClass;
+  protected String inputFormatClass;
+  protected String outputFormatClass;
 
-  protected Boolean isTableExternal() {
-    return false;
+  /**
+   * List of SerDe classes that the HCatalog core tests will not be run against.
+   */
+  public static final Set<String> DISABLED_SERDES = ImmutableSet.of(
+      AvroSerDe.class.getName(),
+      ParquetHiveSerDe.class.getName());
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> generateParameters() {
+    return StorageFormats.asParameters();
   }
 
-  protected boolean isTableImmutable() {
-    return true;
+  /**
+   * Test constructor that sets the storage format class names provided by the test parameter.
+   */
+  public HCatMapReduceTest(String name, String serdeClass, String inputFormatClass,
+      String outputFormatClass) throws Exception {
+    this.serdeClass = serdeClass;
+    this.inputFormatClass = inputFormatClass;
+    this.outputFormatClass = outputFormatClass;
+    this.tableName = TABLE_NAME + "_" + name;
   }
 
-  protected String inputFormat() {
-    return RCFileInputFormat.class.getName();
-  }
+  protected abstract List<FieldSchema> getPartitionKeys();
+
+  protected abstract List<FieldSchema> getTableColumns();
 
-  protected String outputFormat() { 
-    return RCFileOutputFormat.class.getName(); 
+  protected Boolean isTableExternal() {
+    return false;
   }
 
-  protected String serdeClass() { 
-    return ColumnarSerDe.class.getName(); 
+  protected boolean isTableImmutable() {
+    return true;
   }
 
   @BeforeClass
@@ -143,13 +173,16 @@ public abstract class HCatMapReduceTest 
 
   @Before
   public void createTable() throws Exception {
-    String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
+    // Use Junit's Assume to skip running this fixture against any storage formats whose
+    // SerDe is in the disabled serdes list.
+    Assume.assumeTrue(!DISABLED_SERDES.contains(serdeClass));
 
+    String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
     try {
       client.dropTable(databaseName, tableName);
     } catch (Exception e) {
-    } //can fail with NoSuchObjectException
-
+      // Can fail with NoSuchObjectException.
+    }
 
     Table tbl = new Table();
     tbl.setDbName(databaseName);
@@ -160,10 +193,9 @@ public abstract class HCatMapReduceTest 
       tbl.setTableType(TableType.MANAGED_TABLE.toString());
     }
     StorageDescriptor sd = new StorageDescriptor();
-
     sd.setCols(getTableColumns());
-    tbl.setPartitionKeys(getPartitionKeys());
 
+    tbl.setPartitionKeys(getPartitionKeys());
     tbl.setSd(sd);
 
     sd.setBucketCols(new ArrayList<String>(2));
@@ -171,12 +203,12 @@ public abstract class HCatMapReduceTest 
     sd.getSerdeInfo().setName(tbl.getTableName());
     sd.getSerdeInfo().setParameters(new HashMap<String, String>());
     sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
-    if (isTableExternal()){
+    if (isTableExternal()) {
       sd.getSerdeInfo().getParameters().put("EXTERNAL", "TRUE");
     }
-    sd.getSerdeInfo().setSerializationLib(serdeClass());
-    sd.setInputFormat(inputFormat());
-    sd.setOutputFormat(outputFormat());
+    sd.getSerdeInfo().setSerializationLib(serdeClass);
+    sd.setInputFormat(inputFormatClass);
+    sd.setOutputFormat(outputFormatClass);
 
     Map<String, String> tableParams = new HashMap<String, String>();
     if (isTableExternal()) {
@@ -190,68 +222,59 @@ public abstract class HCatMapReduceTest 
     client.createTable(tbl);
   }
 
-  //Create test input file with specified number of rows
+  /*
+   * Create test input file with specified number of rows
+   */
   private void createInputFile(Path path, int rowCount) throws IOException {
-
     if (fs.exists(path)) {
       fs.delete(path, true);
     }
 
     FSDataOutputStream os = fs.create(path);
-
     for (int i = 0; i < rowCount; i++) {
       os.writeChars(i + "\n");
     }
-
     os.close();
   }
 
-  public static class MapCreate extends
-      Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
-
-    static int writeCount = 0; //test will be in local mode
+  public static class MapCreate extends Mapper<LongWritable, Text, BytesWritable, HCatRecord> {
+    // Test will be in local mode.
+    static int writeCount = 0;
 
     @Override
-    public void map(LongWritable key, Text value, Context context
-    ) throws IOException, InterruptedException {
-      {
-        try {
-          HCatRecord rec = writeRecords.get(writeCount);
-          context.write(null, rec);
-          writeCount++;
-
-        } catch (Exception e) {
-
-          e.printStackTrace(System.err); //print since otherwise exception is lost
-          throw new IOException(e);
-        }
+    public void map(LongWritable key, Text value, Context context)
+        throws IOException, InterruptedException {
+      try {
+        HCatRecord rec = writeRecords.get(writeCount);
+        context.write(null, rec);
+        writeCount++;
+      } catch (Exception e) {
+        // Print since otherwise exception is lost.
+        e.printStackTrace(System.err);
+        throw new IOException(e);
       }
     }
   }
 
-  public static class MapRead extends
-      Mapper<WritableComparable, HCatRecord, BytesWritable, Text> {
-
+  public static class MapRead extends Mapper<WritableComparable, HCatRecord, BytesWritable, Text> {
     static int readCount = 0; //test will be in local mode
 
     @Override
-    public void map(WritableComparable key, HCatRecord value, Context context
-    ) throws IOException, InterruptedException {
-      {
-        try {
-          readRecords.add(value);
-          readCount++;
-        } catch (Exception e) {
-          e.printStackTrace(); //print since otherwise exception is lost
-          throw new IOException(e);
-        }
+    public void map(WritableComparable key, HCatRecord value, Context context)
+        throws IOException, InterruptedException {
+      try {
+        readRecords.add(value);
+        readCount++;
+      } catch (Exception e) {
+        // Print since otherwise exception is lost.
+        e.printStackTrace();
+        throw new IOException(e);
       }
     }
   }
 
-  Job runMRCreate(Map<String, String> partitionValues,
-          List<HCatFieldSchema> partitionColumns, List<HCatRecord> records,
-          int writeCount, boolean assertWrite) throws Exception {
+  Job runMRCreate(Map<String, String> partitionValues, List<HCatFieldSchema> partitionColumns,
+      List<HCatRecord> records, int writeCount, boolean assertWrite) throws Exception {
     return runMRCreate(partitionValues, partitionColumns, records, writeCount, assertWrite,
         true, null);
   }
@@ -267,10 +290,9 @@ public abstract class HCatMapReduceTest 
    * @return
    * @throws Exception
    */
-  Job runMRCreate(Map<String, String> partitionValues,
-          List<HCatFieldSchema> partitionColumns, List<HCatRecord> records,
-          int writeCount, boolean assertWrite, boolean asSingleMapTask,
-          String customDynamicPathPattern) throws Exception {
+  Job runMRCreate(Map<String, String> partitionValues, List<HCatFieldSchema> partitionColumns,
+      List<HCatRecord> records, int writeCount, boolean assertWrite, boolean asSingleMapTask,
+      String customDynamicPathPattern) throws Exception {
 
     writeRecords = records;
     MapCreate.writeCount = 0;
@@ -355,7 +377,6 @@ public abstract class HCatMapReduceTest 
    * @throws Exception
    */
   List<HCatRecord> runMRRead(int readCount, String filter) throws Exception {
-
     MapRead.readCount = 0;
     readRecords.clear();
 
@@ -388,9 +409,7 @@ public abstract class HCatMapReduceTest 
     return readRecords;
   }
 
-
   protected HCatSchema getTableSchema() throws Exception {
-
     Configuration conf = new Configuration();
     Job job = new Job(conf, "hcat mapreduce read schema test");
     job.setJarByClass(this.getClass());

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java Tue Aug  5 07:23:02 2014
@@ -37,8 +37,10 @@ import org.apache.hive.hcatalog.data.Def
 import org.apache.hive.hcatalog.data.HCatRecord;
 import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
 import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils;
+
 import org.junit.BeforeClass;
 import org.junit.Test;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -53,9 +55,10 @@ public class TestHCatDynamicPartitioned 
   protected static final int NUM_RECORDS = 20;
   protected static final int NUM_PARTITIONS = 5;
 
-  @BeforeClass
-  public static void generateInputData() throws Exception {
-    tableName = "testHCatDynamicPartitionedTable";
+  public TestHCatDynamicPartitioned(String formatName, String serdeClass, String inputFormatClass,
+      String outputFormatClass) throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+    tableName = "testHCatDynamicPartitionedTable_" + formatName;
     generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0);
     generateDataColumns();
   }

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java Tue Aug  5 07:23:02 2014
@@ -24,18 +24,20 @@ import org.junit.Test;
 
 public class TestHCatExternalDynamicPartitioned extends TestHCatDynamicPartitioned {
 
+  public TestHCatExternalDynamicPartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+    tableName = "testHCatExternalDynamicPartitionedTable_" + formatName;
+    generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0);
+    generateDataColumns();
+  }
+
   @Override
   protected Boolean isTableExternal() {
     return true;
   }
 
-  @BeforeClass
-  public static void generateInputData() throws Exception {
-    tableName = "testHCatExternalDynamicPartitionedTable";
-    generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0);
-    generateDataColumns();
-  }
-
   /**
    * Run the external dynamic partitioning test but with single map task
    * @throws Exception

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalNonPartitioned.java Tue Aug  5 07:23:02 2014
@@ -20,6 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatExternalNonPartitioned extends TestHCatNonPartitioned {
+  public TestHCatExternalNonPartitioned(String formatName, String serdeName,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeName, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected Boolean isTableExternal() {

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalPartitioned.java Tue Aug  5 07:23:02 2014
@@ -20,6 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatExternalPartitioned extends TestHCatPartitioned {
+  public TestHCatExternalPartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected Boolean isTableExternal() {

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableDynamicPartitioned.java Tue Aug  5 07:23:02 2014
@@ -20,6 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatMutableDynamicPartitioned extends TestHCatDynamicPartitioned {
+  public TestHCatMutableDynamicPartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected boolean isTableImmutable() {

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutableNonPartitioned.java Tue Aug  5 07:23:02 2014
@@ -20,7 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatMutableNonPartitioned extends TestHCatNonPartitioned {
-
+  public TestHCatMutableNonPartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected boolean isTableImmutable() {

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMutablePartitioned.java Tue Aug  5 07:23:02 2014
@@ -20,6 +20,11 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 public class TestHCatMutablePartitioned extends TestHCatPartitioned {
+  public TestHCatMutablePartitioned(String formatName, String serdeClass,
+      String inputFormatClass, String outputFormatClass)
+      throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+  }
 
   @Override
   protected boolean isTableImmutable() {

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatNonPartitioned.java Tue Aug  5 07:23:02 2014
@@ -43,16 +43,14 @@ import static org.junit.Assert.assertFal
 import static org.junit.Assert.assertNull;
 
 public class TestHCatNonPartitioned extends HCatMapReduceTest {
-
   private static List<HCatRecord> writeRecords;
   static List<HCatFieldSchema> partitionColumns;
 
-  @BeforeClass
-  public static void oneTimeSetUp() throws Exception {
-
+  public TestHCatNonPartitioned(String formatName, String serdeClass, String inputFormatClass,
+      String outputFormatClass) throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
     dbName = null; //test if null dbName works ("default" is used)
-    tableName = "testHCatNonPartitionedTable";
-
+    tableName = "testHCatNonPartitionedTable_" + formatName;
     writeRecords = new ArrayList<HCatRecord>();
 
     for (int i = 0; i < 20; i++) {

Modified: hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java (original)
+++ hive/branches/cbo/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitioned.java Tue Aug  5 07:23:02 2014
@@ -49,10 +49,10 @@ public class TestHCatPartitioned extends
   private static List<HCatRecord> writeRecords;
   private static List<HCatFieldSchema> partitionColumns;
 
-  @BeforeClass
-  public static void oneTimeSetUp() throws Exception {
-
-    tableName = "testHCatPartitionedTable";
+  public TestHCatPartitioned(String formatName, String serdeClass, String inputFormatClass,
+      String outputFormatClass) throws Exception {
+    super(formatName, serdeClass, inputFormatClass, outputFormatClass);
+    tableName = "testHCatPartitionedTable_" + formatName;
     writeRecords = new ArrayList<HCatRecord>();
 
     for (int i = 0; i < 20; i++) {
@@ -68,7 +68,6 @@ public class TestHCatPartitioned extends
     partitionColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", serdeConstants.STRING_TYPE_NAME, "")));
   }
 
-
   @Override
   protected List<FieldSchema> getPartitionKeys() {
     List<FieldSchema> fields = new ArrayList<FieldSchema>();

Modified: hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java (original)
+++ hive/branches/cbo/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatLoader.java Tue Aug  5 07:23:02 2014
@@ -119,7 +119,6 @@ public class HCatLoader extends HCatBase
       if (!HCatUtil.checkJobContextIfRunningFromBackend(job)) {
         //Combine credentials and credentials from job takes precedence for freshness
         Credentials crd = jobCredentials.get(INNER_SIGNATURE_PREFIX + "_" + signature);
-        crd.addAll(job.getCredentials());
         job.getCredentials().addAll(crd);
       }
     } else {

Modified: hive/branches/cbo/hcatalog/streaming/src/test/sit
URL: http://svn.apache.org/viewvc/hive/branches/cbo/hcatalog/streaming/src/test/sit?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/hcatalog/streaming/src/test/sit (original)
+++ hive/branches/cbo/hcatalog/streaming/src/test/sit Tue Aug  5 07:23:02 2014
@@ -33,7 +33,7 @@ for jar in ${HIVE_HOME}/hcatalog/share/h
   CLASSPATH=${CLASSPATH}:$jar
 done
 
-CLASSPATH=${CLASSPATH}:${HADOOP_HOME}/conf
+CLASSPATH=${CLASSPATH}:${HADOOP_HOME}/etc/hadoop
 CLASSPATH=${CLASSPATH}:${HIVE_HOME}/conf
 
 $JAVA_HOME/bin/java -cp ${CLASSPATH} org.apache.hive.hcatalog.streaming.StreamingIntegrationTester $@

Modified: hive/branches/cbo/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java (original)
+++ hive/branches/cbo/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java Tue Aug  5 07:23:02 2014
@@ -178,7 +178,6 @@ public class MiniHS2 extends AbstractHiv
     hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, getHost());
     hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, getBinaryPort());
     hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, getHttpPort());
-    HiveMetaStore.HMSHandler.resetDefaultDBFlag();
 
     Path scratchDir = new Path(baseDfsDir, "scratch");
     fs.mkdirs(scratchDir);