You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sz...@apache.org on 2015/01/22 06:05:10 UTC

svn commit: r1653769 [1/14] - in /hive/branches/spark: ./ beeline/src/java/org/apache/hive/beeline/ cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ data/scripts/ dev-s...

Author: szehon
Date: Thu Jan 22 05:05:05 2015
New Revision: 1653769

URL: http://svn.apache.org/r1653769
Log:
HIVE-9426 : Merge trunk to spark 1/21/2015

Added:
    hive/branches/spark/data/scripts/q_test_cleanup_for_encryption.sql
      - copied unchanged from r1653696, hive/trunk/data/scripts/q_test_cleanup_for_encryption.sql
    hive/branches/spark/data/scripts/q_test_init_for_encryption.sql
      - copied unchanged from r1653696, hive/trunk/data/scripts/q_test_init_for_encryption.sql
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/AlterPartitionMessage.java
      - copied unchanged from r1653696, hive/trunk/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/AlterPartitionMessage.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/AlterTableMessage.java
      - copied unchanged from r1653696, hive/trunk/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/AlterTableMessage.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONAlterPartitionMessage.java
      - copied unchanged from r1653696, hive/trunk/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONAlterPartitionMessage.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONAlterTableMessage.java
      - copied unchanged from r1653696, hive/trunk/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONAlterTableMessage.java
    hive/branches/spark/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatNotificationEvent.java
      - copied unchanged from r1653696, hive/trunk/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatNotificationEvent.java
    hive/branches/spark/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/api/
      - copied from r1653696, hive/trunk/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/api/
    hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java
      - copied unchanged from r1653696, hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/FlatRowContainer.java
      - copied unchanged from r1653696, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/FlatRowContainer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringInitCap.java
      - copied unchanged from r1653696, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringInitCap.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/hooks/Redactor.java
      - copied unchanged from r1653743, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/Redactor.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/CuratorFrameworkSingleton.java
      - copied unchanged from r1653696, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/CuratorFrameworkSingleton.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
      - copied unchanged from r1653696, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAddMonths.java
      - copied unchanged from r1653696, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAddMonths.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInitCap.java
      - copied unchanged from r1653696, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInitCap.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLastDay.java
      - copied unchanged from r1653696, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLastDay.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java
      - copied unchanged from r1653743, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFAddMonths.java
      - copied unchanged from r1653696, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFAddMonths.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFInitCap.java
      - copied unchanged from r1653696, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFInitCap.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLastDay.java
      - copied unchanged from r1653696, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLastDay.java
    hive/branches/spark/ql/src/test/queries/clientnegative/selectDistinctStarNeg_1.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientnegative/selectDistinctStarNeg_1.q
    hive/branches/spark/ql/src/test/queries/clientnegative/selectDistinctStarNeg_2.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientnegative/selectDistinctStarNeg_2.q
    hive/branches/spark/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
    hive/branches/spark/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
    hive/branches/spark/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q
    hive/branches/spark/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q
    hive/branches/spark/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
    hive/branches/spark/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q
    hive/branches/spark/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q
    hive/branches/spark/ql/src/test/queries/clientpositive/groupby_grouping_window.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/groupby_grouping_window.q
    hive/branches/spark/ql/src/test/queries/clientpositive/selectDistinctStar.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/selectDistinctStar.q
    hive/branches/spark/ql/src/test/queries/clientpositive/udf_add_months.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/udf_add_months.q
    hive/branches/spark/ql/src/test/queries/clientpositive/udf_initcap.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/udf_initcap.q
    hive/branches/spark/ql/src/test/queries/clientpositive/udf_last_day.q
      - copied unchanged from r1653696, hive/trunk/ql/src/test/queries/clientpositive/udf_last_day.q
    hive/branches/spark/ql/src/test/results/beelinepositive/udf_add_months.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/beelinepositive/udf_add_months.q.out
    hive/branches/spark/ql/src/test/results/beelinepositive/udf_initcap.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/beelinepositive/udf_initcap.q.out
    hive/branches/spark/ql/src/test/results/beelinepositive/udf_last_day.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/beelinepositive/udf_last_day.q.out
    hive/branches/spark/ql/src/test/results/clientnegative/selectDistinctStarNeg_1.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientnegative/selectDistinctStarNeg_1.q.out
    hive/branches/spark/ql/src/test/results/clientnegative/selectDistinctStarNeg_2.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientnegative/selectDistinctStarNeg_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/encrypted/
      - copied from r1653696, hive/trunk/ql/src/test/results/clientpositive/encrypted/
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_window.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/groupby_grouping_window.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/selectDistinctStar.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/selectDistinctStar.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/auto_join21.q.out
      - copied, changed from r1653696, hive/trunk/ql/src/test/results/clientpositive/tez/auto_join21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/auto_join29.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/tez/auto_join29.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/auto_join30.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/tez/auto_join30.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/auto_join_filters.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/tez/auto_join_filters.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/auto_join_nulls.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/tez/auto_join_nulls.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/udf_add_months.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/udf_add_months.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/udf_initcap.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/udf_initcap.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/udf_last_day.q.out
      - copied unchanged from r1653696, hive/trunk/ql/src/test/results/clientpositive/udf_last_day.q.out
Removed:
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/LazyFlatRowContainer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKeyBytes.java
Modified:
    hive/branches/spark/   (props changed)
    hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
    hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
    hive/branches/spark/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
    hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
    hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
    hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/spark/dev-support/jenkins-submit-build.sh
    hive/branches/spark/hbase-handler/pom.xml   (props changed)
    hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
    hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
    hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
    hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageDeserializer.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageDeserializer.java
    hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java
    hive/branches/spark/hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestNotificationListener.java
    hive/branches/spark/hcatalog/src/test/e2e/templeton/README.txt
    hive/branches/spark/hcatalog/src/test/e2e/templeton/deployers/deploy_e2e_artifacts.sh
    hive/branches/spark/hcatalog/src/test/e2e/templeton/deployers/env.sh
    hive/branches/spark/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm
    hive/branches/spark/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf
    hive/branches/spark/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
    hive/branches/spark/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
    hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/HDFSCleanup.java
    hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonUtils.java
    hive/branches/spark/itests/hcatalog-unit/pom.xml
    hive/branches/spark/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
    hive/branches/spark/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoop20SAuthBridge.java
    hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
    hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
    hive/branches/spark/itests/qtest/pom.xml
    hive/branches/spark/itests/src/test/resources/testconfiguration.properties
    hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java
    hive/branches/spark/metastore/scripts/upgrade/derby/020-HIVE-9296.derby.sql
    hive/branches/spark/metastore/scripts/upgrade/derby/hive-schema-0.15.0.derby.sql
    hive/branches/spark/metastore/scripts/upgrade/mssql/005-HIVE-9296.mssql.sql
    hive/branches/spark/metastore/scripts/upgrade/mssql/hive-schema-0.15.0.mssql.sql
    hive/branches/spark/metastore/scripts/upgrade/mysql/020-HIVE-9296.mysql.sql
    hive/branches/spark/metastore/scripts/upgrade/mysql/hive-schema-0.15.0.mysql.sql
    hive/branches/spark/metastore/scripts/upgrade/oracle/021-HIVE-9296.oracle.sql
    hive/branches/spark/metastore/scripts/upgrade/oracle/hive-schema-0.15.0.oracle.sql
    hive/branches/spark/metastore/scripts/upgrade/postgres/020-HIVE-9296.postgres.sql
    hive/branches/spark/metastore/scripts/upgrade/postgres/hive-schema-0.15.0.postgres.sql
    hive/branches/spark/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
    hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java
    hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
    hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
    hive/branches/spark/metastore/src/model/package.jdo
    hive/branches/spark/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
    hive/branches/spark/pom.xml
    hive/branches/spark/ql/pom.xml
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeEvaluatorFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SkewJoinOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBPartitionProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSMBJoinHintOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSortMergeJoinOptimizer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TableAccessAnalyzer.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDayOfMonth.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateAdd.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateDiff.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateSub.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInFile.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLower.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUpper.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java
    hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java
    hive/branches/spark/ql/src/test/queries/clientpositive/auto_join21.q
    hive/branches/spark/ql/src/test/queries/clientpositive/auto_join23.q
    hive/branches/spark/ql/src/test/queries/clientpositive/constprog_partitioner.q
    hive/branches/spark/ql/src/test/queries/clientpositive/groupby2.q
    hive/branches/spark/ql/src/test/queries/clientpositive/groupby_multi_single_reducer2.q
    hive/branches/spark/ql/src/test/queries/clientpositive/groupby_ppr.q
    hive/branches/spark/ql/src/test/queries/clientpositive/input14.q
    hive/branches/spark/ql/src/test/queries/clientpositive/input17.q
    hive/branches/spark/ql/src/test/queries/clientpositive/input18.q
    hive/branches/spark/ql/src/test/queries/clientpositive/input_part2.q
    hive/branches/spark/ql/src/test/queries/clientpositive/join0.q
    hive/branches/spark/ql/src/test/queries/clientpositive/join15.q
    hive/branches/spark/ql/src/test/queries/clientpositive/join18.q
    hive/branches/spark/ql/src/test/queries/clientpositive/join20.q
    hive/branches/spark/ql/src/test/queries/clientpositive/join21.q
    hive/branches/spark/ql/src/test/queries/clientpositive/join23.q
    hive/branches/spark/ql/src/test/queries/clientpositive/join6.q
    hive/branches/spark/ql/src/test/queries/clientpositive/join7.q
    hive/branches/spark/ql/src/test/queries/clientpositive/join_array.q
    hive/branches/spark/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q
    hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_decimal.q
    hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_filter_on_outerjoin.q
    hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_test_outer.q
    hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q
    hive/branches/spark/ql/src/test/queries/clientpositive/ppd_transform.q
    hive/branches/spark/ql/src/test/queries/clientpositive/ptf_matchpath.q
    hive/branches/spark/ql/src/test/queries/clientpositive/ptf_rcfile.q
    hive/branches/spark/ql/src/test/queries/clientpositive/ptf_register_tblfn.q
    hive/branches/spark/ql/src/test/queries/clientpositive/ptf_seqfile.q
    hive/branches/spark/ql/src/test/queries/clientpositive/sample3.q
    hive/branches/spark/ql/src/test/queries/clientpositive/sample5.q
    hive/branches/spark/ql/src/test/queries/clientpositive/scriptfile1.q
    hive/branches/spark/ql/src/test/queries/clientpositive/semijoin.q
    hive/branches/spark/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
    hive/branches/spark/ql/src/test/queries/clientpositive/sort.q
    hive/branches/spark/ql/src/test/queries/clientpositive/stats1.q
    hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr1.q
    hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr2.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union10.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union18.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union19.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union6.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_ppr.q
    hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_add.q.out
    hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_sub.q.out
    hive/branches/spark/ql/src/test/results/beelinepositive/udf_datediff.q.out
    hive/branches/spark/ql/src/test/results/beelinepositive/udf_day.q.out
    hive/branches/spark/ql/src/test/results/beelinepositive/udf_dayofmonth.q.out
    hive/branches/spark/ql/src/test/results/beelinepositive/udf_to_date.q.out
    hive/branches/spark/ql/src/test/results/clientnegative/fs_default_name2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/auto_join21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/auto_join23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/cluster.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/constprog2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/constprog_partitioner.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer12.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ctas_colname.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_cube1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_resolution.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/groupby_rollup1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/identity_project_remove_skip.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/input14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/input17.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/input18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/input_part2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join0.q.java1.7.out
    hive/branches/spark/ql/src/test/results/clientpositive/join15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join20.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join_array.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/join_nullsafe.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/nonmr_fetch_threshold.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ppd2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ppd_clusterby.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ppd_join4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ppd_outer_join5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ppd_transform.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ppd_union_view.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ptf_matchpath.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ptf_rcfile.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ptf_register_tblfn.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/ptf_seqfile.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/quotedid_basic.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/regex_col.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/sample3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/sample5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/scriptfile1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/semijoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/show_functions.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/sort.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input17.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/input_part2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join20.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_array.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_filter_on_outerjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_test_outer.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_outer_join5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ptf_matchpath.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ptf_rcfile.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ptf_register_tblfn.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ptf_seqfile.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sample5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/scriptfile1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/semijoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/stats1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/subquery_in.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/stats1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/subquery_in.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/subquery_in_having.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/subquery_notin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/groupby2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/join0.q.java1.7.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/join_nullsafe.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/scriptfile1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/subquery_in.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/transform_ppr1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/transform_ppr2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/union6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/transform_ppr1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/transform_ppr2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/udf_date_add.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/udf_date_sub.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/udf_datediff.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/udf_day.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/udf_dayofmonth.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union27.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/union_remove_6_subq.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/vectorized_ptf.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/windowing_streaming.q.out
    hive/branches/spark/ql/src/test/templates/TestCliDriver.vm
    hive/branches/spark/serde/src/java/org/apache/hadoop/hive/serde2/columnar/ColumnarSerDe.java
    hive/branches/spark/service/src/java/org/apache/hive/service/cli/session/HiveSession.java
    hive/branches/spark/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java
    hive/branches/spark/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
    hive/branches/spark/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java
    hive/branches/spark/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
    hive/branches/spark/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
    hive/branches/spark/shims/0.23/pom.xml
    hive/branches/spark/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
    hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
    hive/branches/spark/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
    hive/branches/spark/spark-client/src/test/java/org/apache/hive/spark/client/TestSparkClient.java

Propchange: hive/branches/spark/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jan 22 05:05:05 2015
@@ -2,4 +2,4 @@
 /hive/branches/cbo:1605012-1627125
 /hive/branches/tez:1494760-1622766
 /hive/branches/vectorization:1466908-1527856
-/hive/trunk:1608589-1651027
+/hive/trunk:1608589-1653743

Modified: hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java (original)
+++ hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java Thu Jan 22 05:05:05 2015
@@ -17,6 +17,7 @@
  */
 package org.apache.hive.beeline;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaException;
@@ -97,55 +98,65 @@ public class HiveSchemaHelper {
     }
 
     static final String DEFAUTL_DELIMITER = ";";
-    /***
+
+    /**
      * Find the type of given command
+     *
      * @param dbCommand
      * @return
      */
     public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException;
 
-    /** Parse the DB specific nesting format and extract the inner script name if any
+    /**
+     * Parse the DB specific nesting format and extract the inner script name if any
+     *
      * @param dbCommand command from parent script
      * @return
      * @throws IllegalFormatException
      */
     public String getScriptName(String dbCommand) throws IllegalArgumentException;
 
-    /***
+    /**
      * Find if the given command is a nested script execution
+     *
      * @param dbCommand
      * @return
      */
     public boolean isNestedScript(String dbCommand);
 
-    /***
+    /**
      * Find if the given command should not be passed to DB
+     *
      * @param dbCommand
      * @return
      */
     public boolean isNonExecCommand(String dbCommand);
 
-    /***
+    /**
      * Get the SQL statement delimiter
+     *
      * @return
      */
     public String getDelimiter();
 
-    /***
+    /**
      * Clear any client specific tags
+     *
      * @return
      */
     public String cleanseCommand(String dbCommand);
 
-    /***
+    /**
      * Does the DB required table/column names quoted
+     *
      * @return
      */
     public boolean needsQuotedIdentifier();
 
-    /***
+    /**
      * Flatten the nested upgrade script into a buffer
-     * @param scriptDir upgrade script directory
+     *
+     * @param scriptDir  upgrade script directory
      * @param scriptFile upgrade script file
      * @return string of sql commands
      */
@@ -258,6 +269,8 @@ public class HiveSchemaHelper {
     private void setDbOpts(String dbOpts) {
       if (dbOpts != null) {
         this.dbOpts = Lists.newArrayList(dbOpts.split(","));
+      } else {
+        this.dbOpts = Lists.newArrayList();
       }
     }
 
@@ -369,6 +382,10 @@ public class HiveSchemaHelper {
   // Postgres specific parser
   public static class PostgresCommandParser extends AbstractCommandParser {
     private static String POSTGRES_NESTING_TOKEN = "\\i";
+    @VisibleForTesting
+    public static String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings";
+    @VisibleForTesting
+    public static String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81";
 
     public PostgresCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {
@@ -394,6 +411,19 @@ public class HiveSchemaHelper {
     public boolean needsQuotedIdentifier() {
       return true;
     }
+
+    @Override
+    public boolean isNonExecCommand(String dbCommand) {
+      // Skip "standard_conforming_strings" command which is read-only in older
+      // Postgres versions like 8.1
+      // See: http://www.postgresql.org/docs/8.2/static/release-8-1.html
+      if (getDbOpts().contains(POSTGRES_SKIP_STANDARD_STRINGS_DBOPT)) {
+        if (dbCommand.startsWith(POSTGRES_STANDARD_STRINGS_OPT)) {
+          return true;
+        }
+      }
+      return super.isNonExecCommand(dbCommand);
+    }
   }
 
   //Oracle specific parser

Modified: hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java (original)
+++ hive/branches/spark/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java Thu Jan 22 05:05:05 2015
@@ -178,7 +178,8 @@ public class HiveSchemaTool {
         getConnectionToMetastore(false));
     // verify that the new version is added to schema
     if (!MetaStoreSchemaInfo.getHiveSchemaVersion().equalsIgnoreCase(newSchemaVersion)) {
-      throw new HiveMetaException("Found unexpected schema version " + newSchemaVersion);
+      throw new HiveMetaException("Expected schema version " + MetaStoreSchemaInfo.getHiveSchemaVersion() +
+        ", found version " + newSchemaVersion);
     }
   }
 

Modified: hive/branches/spark/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java (original)
+++ hive/branches/spark/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java Thu Jan 22 05:05:05 2015
@@ -40,6 +40,7 @@ import com.google.common.base.Splitter;
 import jline.console.ConsoleReader;
 import jline.console.completer.Completer;
 import jline.console.history.FileHistory;
+import jline.console.history.PersistentHistory;
 import jline.console.completer.StringsCompleter;
 import jline.console.completer.ArgumentCompleter;
 import jline.console.completer.ArgumentCompleter.ArgumentDelimiter;
@@ -721,10 +722,12 @@ public class CliDriver {
     String line;
     final String HISTORYFILE = ".hivehistory";
     String historyDirectory = System.getProperty("user.home");
+    PersistentHistory history = null;
     try {
       if ((new File(historyDirectory)).exists()) {
         String historyFile = historyDirectory + File.separator + HISTORYFILE;
-        reader.setHistory(new FileHistory(new File(historyFile)));
+        history = new FileHistory(new File(historyFile));
+        reader.setHistory(history);
       } else {
         System.err.println("WARNING: Directory for Hive history file: " + historyDirectory +
                            " does not exist.   History will not be available during this session.");
@@ -759,6 +762,10 @@ public class CliDriver {
         continue;
       }
     }
+
+    if (history != null) {
+      history.flush();
+    }
     return ret;
   }
 

Modified: hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java (original)
+++ hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/FileUtils.java Thu Jan 22 05:05:05 2015
@@ -53,17 +53,19 @@ import org.apache.hadoop.util.Shell;
 public final class FileUtils {
   private static final Log LOG = LogFactory.getLog(FileUtils.class.getName());
 
-  /**
-   * Accept all paths.
-   */
-  private static class AcceptAllPathFilter implements PathFilter {
-    @Override
-    public boolean accept(Path path) {
-      return true;
+  public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() {
+    public boolean accept(Path p) {
+      String name = p.getName();
+      return !name.startsWith("_") && !name.startsWith(".");
+    }
+  };
+
+  public static final PathFilter STAGING_DIR_PATH_FILTER = new PathFilter() {
+    public boolean accept(Path p) {
+      String name = p.getName();
+      return !name.startsWith(".");
     }
-  }
-
-  private static final PathFilter allPathFilter = new AcceptAllPathFilter();
+  };
 
   /**
    * Variant of Path.makeQualified that qualifies the input path against the default file system
@@ -319,14 +321,7 @@ public final class FileUtils {
       List<FileStatus> results) throws IOException {
 
     if (fileStatus.isDir()) {
-      for (FileStatus stat : fs.listStatus(fileStatus.getPath(), new PathFilter() {
-
-        @Override
-        public boolean accept(Path p) {
-          String name = p.getName();
-          return !name.startsWith("_") && !name.startsWith(".");
-        }
-      })) {
+      for (FileStatus stat : fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_PATH_FILTER)) {
         listStatusRecursively(fs, stat, results);
       }
     } else {
@@ -366,7 +361,6 @@ public final class FileUtils {
    *             check will be performed within a doAs() block to use the access privileges
    *             of this user. In this case the user must be configured to impersonate other
    *             users, otherwise this check will fail with error.
-   * @param groups  List of groups for the user
    * @throws IOException
    * @throws AccessControlException
    * @throws InterruptedException
@@ -547,10 +541,25 @@ public final class FileUtils {
     boolean deleteSource,
     boolean overwrite,
     HiveConf conf) throws IOException {
-    boolean copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
+
+    HadoopShims shims = ShimLoader.getHadoopShims();
+    boolean copied;
+
+    /* Run distcp if source file/dir is too big */
+    if (srcFS.getUri().getScheme().equals("hdfs") &&
+        srcFS.getFileStatus(src).getLen() > conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE)) {
+      LOG.info("Source is " + srcFS.getFileStatus(src).getLen() + " bytes. (MAX: " + conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE) + ")");
+      LOG.info("Launch distributed copy (distcp) job.");
+      copied = shims.runDistCp(src, dst, conf);
+      if (copied && deleteSource) {
+        srcFS.delete(src, true);
+      }
+    } else {
+      copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
+    }
+
     boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
     if (copied && inheritPerms) {
-      HadoopShims shims = ShimLoader.getHadoopShims();
       HdfsFileStatus fullFileStatus = shims.getFullFileStatus(conf, dstFS, dst);
       try {
         shims.setFullFileStatus(conf, fullFileStatus, dstFS, dst);
@@ -571,7 +580,7 @@ public final class FileUtils {
    * @throws IOException
    */
   public static boolean trashFilesUnderDir(FileSystem fs, Path f, Configuration conf) throws FileNotFoundException, IOException {
-    FileStatus[] statuses = fs.listStatus(f, allPathFilter);
+    FileStatus[] statuses = fs.listStatus(f, HIDDEN_FILES_PATH_FILTER);
     boolean result = true;
     for (FileStatus status : statuses) {
       result = result & moveToTrash(fs, status.getPath(), conf);
@@ -603,6 +612,25 @@ public final class FileUtils {
     return result;
   }
 
+  /**
+   * Check if first path is a subdirectory of second path.
+   * Both paths must belong to the same filesystem.
+   *
+   * @param p1 first path
+   * @param p2 second path
+   * @param fs FileSystem, both paths must belong to the same filesystem
+   * @return
+   */
+  public static boolean isSubDir(Path p1, Path p2, FileSystem fs) {
+    String path1 = fs.makeQualified(p1).toString();
+    String path2 = fs.makeQualified(p2).toString();
+    if (path1.startsWith(path2)) {
+      return true;
+    }
+
+    return false;
+  }
+
   public static boolean renameWithPerms(FileSystem fs, Path sourcePath,
                                Path destPath, boolean inheritPerms,
                                Configuration conf) throws IOException {

Modified: hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java (original)
+++ hive/branches/spark/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java Thu Jan 22 05:05:05 2015
@@ -70,7 +70,7 @@ public class HiveStatsUtils {
       sb.append(Path.SEPARATOR).append("*");
     }
     Path pathPattern = new Path(path, sb.toString());
-    return fs.globStatus(pathPattern);
+    return fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER);
   }
 
 }

Modified: hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/spark/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Thu Jan 22 05:05:05 2015
@@ -215,6 +215,10 @@ public class HiveConf extends Configurat
     PLAN_SERIALIZATION("hive.plan.serialization.format", "kryo",
         "Query plan format serialization between client and task nodes. \n" +
         "Two supported values are : kryo and javaXML. Kryo is default."),
+    STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
+        "Directory name that will be created inside table locations in order to support HDFS encryption. " +
+        "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " +
+        "In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."),
     SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
         "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
         "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
@@ -267,6 +271,10 @@ public class HiveConf extends Configurat
         "Comma-separated list of on-failure hooks to be invoked for each statement. \n" +
         "An on-failure hook is specified as the name of Java class which implements the \n" +
         "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
+    QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "",
+        "Comma-separated list of hooks to be invoked for each query which can \n" +
+        "tranform the query before it's placed in the job.xml file. Must be a Java class which \n" +
+        "extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."),
     CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
         "Comma-separated list of statistics publishers to be invoked on counters on each job. \n" +
         "A client stats publisher is specified as the name of a Java class which implements the \n" +
@@ -699,13 +707,6 @@ public class HiveConf extends Configurat
     HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
         "Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,\n" +
         "because memory-optimized hashtable cannot be serialized."),
-    HIVEMAPJOINUSEOPTIMIZEDKEYS("hive.mapjoin.optimized.keys", true,
-        "Whether MapJoin hashtable should use optimized (size-wise), keys, allowing the table to take less\n" +
-        "memory. Depending on key, the memory savings for entire table can be 5-15% or so."),
-    HIVEMAPJOINLAZYHASHTABLE("hive.mapjoin.lazy.hashtable", true,
-        "Whether MapJoin hashtable should deserialize values on demand. Depending on how many values in\n" +
-        "the table the join will actually touch, it can save a lot of memory by not creating objects for\n" +
-        "rows that are not needed. If all rows are needed obviously there's no gain."),
     HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 10 * 1024 * 1024,
         "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
         "store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
@@ -749,6 +750,10 @@ public class HiveConf extends Configurat
         "cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
         "assumption that the original group by will reduce the data size."),
 
+    // Max filesize used to do a single copy (after that, distcp is used)
+    HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
+        "Maximum file size (in Mb) that Hive uses to do single HDFS copies between directories." +
+        "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
 
     // for hive udtf operator
     HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,
@@ -1323,13 +1328,20 @@ public class HiveConf extends Configurat
         "The port of ZooKeeper servers to talk to.\n" +
         "If the list of Zookeeper servers specified in hive.zookeeper.quorum\n" +
         "does not contain port numbers, this value is used."),
-    HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", 600*1000,
-        "ZooKeeper client's session timeout. The client is disconnected, and as a result, all locks released, \n" +
+    HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", "600000ms",
+        new TimeValidator(TimeUnit.MILLISECONDS),
+        "ZooKeeper client's session timeout (in milliseconds). The client is disconnected, and as a result, all locks released, \n" +
         "if a heartbeat is not sent in the timeout."),
     HIVE_ZOOKEEPER_NAMESPACE("hive.zookeeper.namespace", "hive_zookeeper_namespace",
         "The parent node under which all ZooKeeper nodes are created."),
     HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES("hive.zookeeper.clean.extra.nodes", false,
         "Clean extra nodes at the end of the session."),
+    HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES("hive.zookeeper.connection.max.retries", 3,
+        "Max number of times to retry when connecting to the ZooKeeper server."),
+    HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME("hive.zookeeper.connection.basesleeptime", "1000ms",
+        new TimeValidator(TimeUnit.MILLISECONDS),
+        "Initial amount of time (in milliseconds) to wait between retries\n" +
+        "when connecting to the ZooKeeper server when using ExponentialBackoffRetry policy."),
 
     // Transactions
     HIVE_TXN_MANAGER("hive.txn.manager",
@@ -1608,6 +1620,10 @@ public class HiveConf extends Configurat
         "inheriting the permission of the warehouse or database directory."),
     HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true,
         "whether insert into external tables is allowed"),
+    HIVE_TEMPORARY_TABLE_STORAGE(
+        "hive.exec.temporary.table.storage", "default", new StringSet("memory",
+         "ssd", "default"), "Define the storage policy for temporary tables." +
+         "Choices between memory, ssd and default"),
 
     HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", "",
         "A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning " +

Modified: hive/branches/spark/dev-support/jenkins-submit-build.sh
URL: http://svn.apache.org/viewvc/hive/branches/spark/dev-support/jenkins-submit-build.sh?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/dev-support/jenkins-submit-build.sh (original)
+++ hive/branches/spark/dev-support/jenkins-submit-build.sh Thu Jan 22 05:05:05 2015
@@ -30,12 +30,18 @@ case "$BUILD_PROFILE" in
    curl -v -i "$url"
    exit 0
   ;;
-  spark-mr2|spark2-mr2)
+  spark-mr2)
    test -n "$SPARK_URL" || fail "SPARK_URL must be specified"
    url="$SPARK_URL&ISSUE_NUM=$ISSUE_NUM"
    curl -v -i "$url"
    exit 0
   ;;
+  encryption-mr2)
+   test -n "$ENCRYPTION_URL" || fail "ENCRYPTION_URL must be specified"
+   url="$ENCRYPTION_URL&ISSUE_NUM=$ISSUE_NUM"
+   curl -v -i "$url"
+   exit 0
+  ;;
   *)
   echo "Unknown profile '$BUILD_PROFILE'"
   exit 1

Propchange: hive/branches/spark/hbase-handler/pom.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jan 22 05:05:05 2015
@@ -2,4 +2,4 @@
 /hive/branches/cbo/hbase-handler/pom.xml:1605012-1627125
 /hive/branches/tez/hbase-handler/pom.xml:1494760-1622766
 /hive/branches/vectorization/hbase-handler/pom.xml:1466908-1527856
-/hive/trunk/hbase-handler/pom.xml:1494760-1537575,1608589-1651027
+/hive/trunk/hbase-handler/pom.xml:1494760-1537575,1608589-1653696

Modified: hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java (original)
+++ hive/branches/spark/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java Thu Jan 22 05:05:05 2015
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.KeyValueU
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -149,7 +150,7 @@ public class HiveHFileOutputFormat exten
           fs.mkdirs(columnFamilyPath);
           Path srcDir = outputdir;
           for (;;) {
-            FileStatus [] files = fs.listStatus(srcDir);
+            FileStatus [] files = fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER);
             if ((files == null) || (files.length == 0)) {
               throw new IOException("No family directories found in " + srcDir);
             }
@@ -161,7 +162,7 @@ public class HiveHFileOutputFormat exten
               break;
             }
           }
-          for (FileStatus regionFile : fs.listStatus(srcDir)) {
+          for (FileStatus regionFile : fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER)) {
             fs.rename(
               regionFile.getPath(),
               new Path(

Modified: hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java (original)
+++ hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java Thu Jan 22 05:05:05 2015
@@ -96,6 +96,19 @@ public final class HCatConstants {
   public static final String HCAT_DESIRED_PARTITION_NUM_SPLITS =
     "hcat.desired.partition.num.splits";
 
+  /**
+   * hcat.append.limit allows a hcat user to specify a custom append limit.
+   * By default, while appending to an existing directory, hcat will attempt
+   * to avoid naming clashes and try to append _a_NNN where NNN is a number to
+   * the desired filename to avoid clashes. However, by default, it only tries
+   * for NNN from 0 to 999 before giving up. This can cause an issue for some
+   * tables with an extraordinarily large number of files. Ideally, this should
+   * be fixed by the user changing their usage pattern and doing some manner of
+   * compaction, but in the meanwhile, until they can, setting this parameter
+   * can be used to bump that limit.
+   */
+  public static final String HCAT_APPEND_LIMIT = "hcat.append.limit";
+
   // IMPORTANT IMPORTANT IMPORTANT!!!!!
   //The keys used to store info into the job Configuration.
   //If any new keys are added, the HCatStorer needs to be updated. The HCatStorer
@@ -132,8 +145,10 @@ public final class HCatConstants {
   public static final String HCAT_EVENT = "HCAT_EVENT";
   public static final String HCAT_ADD_PARTITION_EVENT = "ADD_PARTITION";
   public static final String HCAT_DROP_PARTITION_EVENT = "DROP_PARTITION";
+  public static final String HCAT_ALTER_PARTITION_EVENT = "ALTER_PARTITION";
   public static final String HCAT_PARTITION_DONE_EVENT = "PARTITION_DONE";
   public static final String HCAT_CREATE_TABLE_EVENT = "CREATE_TABLE";
+  public static final String HCAT_ALTER_TABLE_EVENT = "ALTER_TABLE";
   public static final String HCAT_DROP_TABLE_EVENT = "DROP_TABLE";
   public static final String HCAT_CREATE_DATABASE_EVENT = "CREATE_DATABASE";
   public static final String HCAT_DROP_DATABASE_EVENT = "DROP_DATABASE";

Modified: hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java (original)
+++ hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java Thu Jan 22 05:05:05 2015
@@ -75,6 +75,8 @@ class FileOutputCommitterContainer exten
   static final String DYNTEMP_DIR_NAME = "_DYN";
   static final String SCRATCH_DIR_NAME = "_SCRATCH";
   private static final String APPEND_SUFFIX = "_a_";
+  private static final int APPEND_COUNTER_WARN_THRESHOLD = 1000;
+  private final int maxAppendAttempts;
 
   private static final Logger LOG = LoggerFactory.getLogger(FileOutputCommitterContainer.class);
   private final boolean dynamicPartitioningUsed;
@@ -112,6 +114,8 @@ class FileOutputCommitterContainer exten
     } else {
       customDynamicLocationUsed = false;
     }
+
+    this.maxAppendAttempts = context.getConfiguration().getInt(HCatConstants.HCAT_APPEND_LIMIT, APPEND_COUNTER_WARN_THRESHOLD);
   }
 
   @Override
@@ -646,19 +650,23 @@ class FileOutputCommitterContainer exten
           filetype = "";
         }
 
-        // Attempt to find COUNTER_MAX possible alternatives to a filename by
+        // Attempt to find maxAppendAttempts possible alternatives to a filename by
         // appending _a_N and seeing if that destination also clashes. If we're
         // still clashing after that, give up.
-        final int COUNTER_MAX = 1000;
         int counter = 1;
-        for (; fs.exists(itemDest) && counter < COUNTER_MAX ; counter++) {
+        for (; fs.exists(itemDest) && counter < maxAppendAttempts; counter++) {
           itemDest = new Path(dest, name + (APPEND_SUFFIX + counter) + filetype);
         }
 
-        if (counter == COUNTER_MAX){
+        if (counter == maxAppendAttempts){
           throw new HCatException(ErrorType.ERROR_MOVE_FAILED,
               "Could not find a unique destination path for move: file = "
                   + file + " , src = " + src + ", dest = " + dest);
+        } else if (counter > APPEND_COUNTER_WARN_THRESHOLD) {
+          LOG.warn("Append job used filename clash counter [" + counter
+              +"] which is greater than warning limit [" + APPEND_COUNTER_WARN_THRESHOLD
+              +"]. Please compact this table so that performance is not impacted."
+              + " Please see HIVE-9381 for details.");
         }
 
       }
@@ -696,7 +704,7 @@ class FileOutputCommitterContainer exten
 
       //      LOG.info("Searching for "+dynPathSpec);
       Path pathPattern = new Path(dynPathSpec);
-      FileStatus[] status = fs.globStatus(pathPattern);
+      FileStatus[] status = fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER);
 
       partitionsDiscoveredByPath = new LinkedHashMap<String, Map<String, String>>();
       contextDiscoveredByPath = new LinkedHashMap<String, JobContext>();

Modified: hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java (original)
+++ hive/branches/spark/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java Thu Jan 22 05:05:05 2015
@@ -20,9 +20,7 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;

Modified: hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java (original)
+++ hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java Thu Jan 22 05:05:05 2015
@@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
@@ -140,7 +141,7 @@ public class DbNotificationListener exte
    * @throws MetaException
    */
   public void onAlterTable (AlterTableEvent tableEvent) throws MetaException {
-    /*Table before = tableEvent.getOldTable();
+    Table before = tableEvent.getOldTable();
     Table after = tableEvent.getNewTable();
     NotificationEvent event = new NotificationEvent(0, now(),
         HCatConstants.HCAT_ALTER_TABLE_EVENT,
@@ -149,8 +150,7 @@ public class DbNotificationListener exte
       event.setDbName(after.getDbName());
       event.setTableName(after.getTableName());
       enqueue(event);
-    }*/
-    // TODO - once HIVE-9175 is committed
+    }
   }
 
   /**
@@ -187,7 +187,16 @@ public class DbNotificationListener exte
    * @throws MetaException
    */
   public void onAlterPartition (AlterPartitionEvent partitionEvent)  throws MetaException {
-    // TODO, MessageFactory doesn't support Alter Partition yet.
+    Partition before = partitionEvent.getOldPartition();
+    Partition after = partitionEvent.getNewPartition();
+    NotificationEvent event = new NotificationEvent(0, now(),
+        HCatConstants.HCAT_ALTER_PARTITION_EVENT,
+        msgFactory.buildAlterPartitionMessage(before, after).toString());
+    if (event != null) {
+      event.setDbName(before.getDbName());
+      event.setTableName(before.getTableName());
+      enqueue(event);
+    }
   }
 
   /**

Modified: hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java (original)
+++ hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java Thu Jan 22 05:05:05 2015
@@ -60,6 +60,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.events.ListenerEvent;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
 import org.apache.hive.hcatalog.common.HCatConstants;
+import org.apache.hive.hcatalog.messaging.AlterTableMessage;
 import org.apache.hive.hcatalog.messaging.HCatEventMessage;
 import org.apache.hive.hcatalog.messaging.MessageFactory;
 import org.slf4j.Logger;
@@ -116,7 +117,7 @@ public class NotificationListener extend
     testAndCreateConnection();
   }
 
-  private static String getTopicName(Table table, ListenerEvent partitionEvent) {
+  private static String getTopicName(Table table) {
     return table.getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME);
   }
 
@@ -129,7 +130,7 @@ public class NotificationListener extend
     if (partitionEvent.getStatus()) {
       Table table = partitionEvent.getTable();
       List<Partition> partitions = partitionEvent.getPartitions();
-      String topicName = getTopicName(table, partitionEvent);
+      String topicName = getTopicName(table);
       if (topicName != null && !topicName.equals("")) {
         send(messageFactory.buildAddPartitionMessage(table, partitions), topicName);
       } else {
@@ -144,6 +145,17 @@ public class NotificationListener extend
     }
   }
 
+  @Override
+  public void onAlterPartition(AlterPartitionEvent ape) throws MetaException {
+    if (ape.getStatus()) {
+      Partition before = ape.getOldPartition();
+      Partition after = ape.getNewPartition();
+
+      String topicName = getTopicName(ape.getTable());
+      send(messageFactory.buildAlterPartitionMessage(before, after), topicName);
+    }
+  }
+
   /**
    * Send dropped partition notifications. Subscribers can receive these notifications for a
    * particular table by listening on a topic named "dbName.tableName" with message selector
@@ -165,7 +177,7 @@ public class NotificationListener extend
       sd.setParameters(new HashMap<String, String>());
       sd.getSerdeInfo().setParameters(new HashMap<String, String>());
       sd.getSkewedInfo().setSkewedColNames(new ArrayList<String>());
-      String topicName = getTopicName(partitionEvent.getTable(), partitionEvent);
+      String topicName = getTopicName(partitionEvent.getTable());
       if (topicName != null && !topicName.equals("")) {
         send(messageFactory.buildDropPartitionMessage(partitionEvent.getTable(), partition), topicName);
       } else {
@@ -241,6 +253,35 @@ public class NotificationListener extend
   }
 
   /**
+   * Send altered table notifications. Subscribers can receive these notifications for
+   * dropped tables by listening on topic "HCAT" with message selector string
+   * {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
+   * {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_ALTER_TABLE_EVENT}
+   */
+  @Override
+  public void onAlterTable(AlterTableEvent tableEvent) throws MetaException {
+    if (tableEvent.getStatus()) {
+      Table before = tableEvent.getOldTable();
+      Table after = tableEvent.getNewTable();
+
+      // onCreateTable alters the table to add the topic name.  Since this class is generating
+      // that alter, we don't want to notify on that alter.  So take a quick look and see if
+      // that's what this this alter is, and if so swallow it.
+      if (after.getParameters() != null &&
+          after.getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME) != null &&
+          (before.getParameters() == null ||
+              before.getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME) == null)) {
+        return;
+      }
+      // I think this is wrong, the alter table statement should come on the table topic not the
+      // DB topic - Alan.
+      String topicName = getTopicPrefix(tableEvent.getHandler().getHiveConf()) + "." +
+          after.getDbName().toLowerCase();
+      send(messageFactory.buildAlterTableMessage(before, after), topicName);
+    }
+  }
+
+  /**
    * Send dropped table notifications. Subscribers can receive these notifications for
    * dropped tables by listening on topic "HCAT" with message selector string
    * {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
@@ -262,6 +303,8 @@ public class NotificationListener extend
 
     if (tableEvent.getStatus()) {
       Table table = tableEvent.getTable();
+      // I think this is wrong, the drop table statement should come on the table topic not the
+      // DB topic - Alan.
       String topicName = getTopicPrefix(tableEvent.getHandler().getHiveConf()) + "." + table.getDbName().toLowerCase();
       send(messageFactory.buildDropTableMessage(table), topicName);
     }
@@ -435,14 +478,4 @@ public class NotificationListener extend
 //        if(lpde.getStatus())
 //            send(lpde.getPartitionName(),lpde.getTable().getParameters().get(HCatConstants.HCAT_MSGBUS_TOPIC_NAME),HCatConstants.HCAT_PARTITION_DONE_EVENT);
   }
-
-  @Override
-  public void onAlterPartition(AlterPartitionEvent ape) throws MetaException {
-    // no-op
-  }
-
-  @Override
-  public void onAlterTable(AlterTableEvent ate) throws MetaException {
-    // no-op
-  }
 }

Modified: hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java (original)
+++ hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java Thu Jan 22 05:05:05 2015
@@ -37,7 +37,9 @@ public abstract class HCatEventMessage {
     CREATE_TABLE(HCatConstants.HCAT_CREATE_TABLE_EVENT),
     DROP_TABLE(HCatConstants.HCAT_DROP_TABLE_EVENT),
     ADD_PARTITION(HCatConstants.HCAT_ADD_PARTITION_EVENT),
-    DROP_PARTITION(HCatConstants.HCAT_DROP_PARTITION_EVENT);
+    DROP_PARTITION(HCatConstants.HCAT_DROP_PARTITION_EVENT),
+    ALTER_TABLE(HCatConstants.HCAT_ALTER_TABLE_EVENT),
+    ALTER_PARTITION(HCatConstants.HCAT_ALTER_PARTITION_EVENT);
 
     private String typeString;
 

Modified: hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageDeserializer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageDeserializer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageDeserializer.java (original)
+++ hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageDeserializer.java Thu Jan 22 05:05:05 2015
@@ -36,10 +36,14 @@ public abstract class MessageDeserialize
       return getDropDatabaseMessage(messageBody);
     case CREATE_TABLE:
       return getCreateTableMessage(messageBody);
+    case ALTER_TABLE:
+      return getAlterTableMessage(messageBody);
     case DROP_TABLE:
       return getDropTableMessage(messageBody);
     case ADD_PARTITION:
       return getAddPartitionMessage(messageBody);
+    case ALTER_PARTITION:
+      return getAlterPartitionMessage(messageBody);
     case DROP_PARTITION:
       return getDropPartitionMessage(messageBody);
 
@@ -64,6 +68,13 @@ public abstract class MessageDeserialize
   public abstract CreateTableMessage getCreateTableMessage(String messageBody);
 
   /**
+   * Method to de-serialize AlterTableMessge
+   * @param messageBody string message
+   * @return object message
+   */
+  public abstract AlterTableMessage getAlterTableMessage(String messageBody);
+
+  /**
    * Method to de-serialize DropTableMessage instance.
    */
   public abstract DropTableMessage getDropTableMessage(String messageBody);
@@ -74,6 +85,13 @@ public abstract class MessageDeserialize
   public abstract AddPartitionMessage getAddPartitionMessage(String messageBody);
 
   /**
+   * Method to deserialize AlterPartitionMessage
+   * @param messageBody the message in serialized form
+   * @return message in object form
+   */
+  public abstract AlterPartitionMessage getAlterPartitionMessage(String messageBody);
+
+  /**
    * Method to de-serialize DropPartitionMessage instance.
    */
   public abstract DropPartitionMessage getDropPartitionMessage(String messageBody);

Modified: hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java (original)
+++ hive/branches/spark/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java Thu Jan 22 05:05:05 2015
@@ -119,6 +119,17 @@ public abstract class MessageFactory {
   public abstract CreateTableMessage buildCreateTableMessage(Table table);
 
   /**
+   * Factory method for AlterTableMessage.  Unlike most of these calls, this one can return null,
+   * which means no message should be sent.  This is because there are many flavors of alter
+   * table (add column, add partition, etc.).  Some are covered elsewhere (like add partition)
+   * and some are not yet supported.
+   * @param before The table before the alter
+   * @param after The table after the alter
+   * @return
+   */
+  public abstract AlterTableMessage buildAlterTableMessage(Table before, Table after);
+
+  /**
    * Factory method for DropTableMessage.
    * @param table The Table being dropped.
    * @return DropTableMessage instance.
@@ -144,6 +155,15 @@ public abstract class MessageFactory {
   public abstract AddPartitionMessage buildAddPartitionMessage(Table table, PartitionSpecProxy partitionSpec);
 
   /**
+   * Factory method for building AlterPartitionMessage
+   * @param before The partition before it was altered
+   * @param after The partition after it was altered
+   * @return a new AlterPartitionMessage
+   */
+  public abstract AlterPartitionMessage buildAlterPartitionMessage(Partition before,
+                                                                   Partition after);
+
+  /**
    * Factory method for DropPartitionMessage.
    * @param table The Table from which the partition is dropped.
    * @param partition The Partition being dropped.