You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by su...@apache.org on 2015/07/20 22:12:44 UTC

[34/50] [abbrv] hive git commit: HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)

HIVE-11145 Remove OFFLINE and NO_DROP from tables and partitions (gates, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d6ec52ee
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d6ec52ee
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d6ec52ee

Branch: refs/heads/spark
Commit: d6ec52ee094d94377442d96d450575462a9497b7
Parents: 7338d8e
Author: Alan Gates <ga...@hortonworks.com>
Authored: Wed Jul 15 17:23:23 2015 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Wed Jul 15 17:23:23 2015 -0700

----------------------------------------------------------------------
 .../results/positive/external_table_ppd.q.out   |   1 -
 .../positive/hbase_binary_storage_queries.q.out |   2 -
 .../src/test/results/positive/hbase_stats.q.out |   7 --
 .../test/results/positive/hbase_stats2.q.out    |   7 --
 .../test/results/positive/hbase_stats3.q.out    |  12 --
 .../positive/hbase_stats_empty_partition.q.out  |   2 -
 .../SemanticAnalysis/HCatSemanticAnalyzer.java  |   7 +-
 .../hive/hcatalog/api/HCatClientHMSImpl.java    |  14 +--
 .../hadoop/hive/metastore/HiveMetaStore.java    |  11 +-
 .../hive/metastore/HiveMetaStoreClient.java     |   7 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |  57 +++++-----
 .../hadoop/hive/metastore/MetaStoreUtils.java   |  26 -----
 .../hive/metastore/PartitionDropOptions.java    |   6 -
 .../hadoop/hive/metastore/ProtectMode.java      |  97 ----------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  50 ---------
 .../hadoop/hive/ql/hooks/WriteEntity.java       |   6 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  57 +++++-----
 .../hadoop/hive/ql/metadata/Partition.java      |  51 ---------
 .../apache/hadoop/hive/ql/metadata/Table.java   |  65 ++---------
 .../formatting/MetaDataFormatUtils.java         |  24 ++--
 .../hive/ql/parse/DDLSemanticAnalyzer.java      | 112 +++++--------------
 .../apache/hadoop/hive/ql/parse/EximUtil.java   |  31 ++---
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |   5 -
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |  38 +------
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |  11 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  42 +------
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |   8 +-
 .../hadoop/hive/ql/plan/AlterTableDesc.java     |  20 ++--
 .../hadoop/hive/ql/plan/DropTableDesc.java      |  26 +----
 .../hadoop/hive/ql/plan/HiveOperation.java      |   2 -
 .../alter_partition_invalidspec.q               |   8 --
 .../clientnegative/alter_partition_nodrop.q     |   9 --
 .../alter_partition_nodrop_table.q              |   9 --
 .../clientnegative/alter_partition_offline.q    |  11 --
 .../clientnegative/drop_table_failure3.q        |  12 --
 .../queries/clientnegative/protectmode_part.q   |  15 ---
 .../queries/clientnegative/protectmode_part1.q  |  21 ----
 .../queries/clientnegative/protectmode_part2.q  |   9 --
 .../clientnegative/protectmode_part_no_drop.q   |  10 --
 .../clientnegative/protectmode_part_no_drop2.q  |  11 --
 .../queries/clientnegative/protectmode_tbl1.q   |   8 --
 .../queries/clientnegative/protectmode_tbl2.q   |  12 --
 .../queries/clientnegative/protectmode_tbl3.q   |  10 --
 .../queries/clientnegative/protectmode_tbl4.q   |  15 ---
 .../queries/clientnegative/protectmode_tbl5.q   |  15 ---
 .../queries/clientnegative/protectmode_tbl6.q   |   8 --
 .../queries/clientnegative/protectmode_tbl7.q   |  13 ---
 .../queries/clientnegative/protectmode_tbl8.q   |  13 ---
 .../clientnegative/protectmode_tbl_no_drop.q    |   9 --
 .../test/queries/clientnegative/sa_fail_hook3.q |   4 -
 .../alter_partition_protect_mode.q              |  26 -----
 .../drop_partitions_ignore_protection.q         |  10 --
 .../test/queries/clientpositive/protectmode.q   |  63 -----------
 .../test/queries/clientpositive/protectmode2.q  |  23 ----
 .../alter_numbuckets_partitioned_table.q.out    |   8 --
 .../results/beelinepositive/create_like.q.out   |   3 -
 .../results/beelinepositive/create_like2.q.out  |   1 -
 .../beelinepositive/create_like_view.q.out      |   4 -
 .../beelinepositive/create_skewed_table1.q.out  |   3 -
 .../results/beelinepositive/create_view.q.out   |  14 ---
 .../create_view_partitioned.q.out               |   3 -
 ql/src/test/results/beelinepositive/ctas.q.out  |   5 -
 .../describe_formatted_view_partitioned.q.out   |   1 -
 .../beelinepositive/describe_table.q.out        |   3 -
 .../test/results/beelinepositive/merge3.q.out   |   1 -
 .../part_inherit_tbl_props.q.out                |   1 -
 .../part_inherit_tbl_props_empty.q.out          |   1 -
 .../part_inherit_tbl_props_with_star.q.out      |   1 -
 .../results/beelinepositive/protectmode2.q.out  |   2 -
 .../test/results/beelinepositive/stats1.q.out   |   2 -
 .../test/results/beelinepositive/stats10.q.out  |   3 -
 .../test/results/beelinepositive/stats11.q.out  |   4 -
 .../test/results/beelinepositive/stats12.q.out  |   5 -
 .../test/results/beelinepositive/stats13.q.out  |   6 -
 .../test/results/beelinepositive/stats14.q.out  |   5 -
 .../test/results/beelinepositive/stats15.q.out  |   5 -
 .../test/results/beelinepositive/stats16.q.out  |   2 -
 .../test/results/beelinepositive/stats18.q.out  |   2 -
 .../test/results/beelinepositive/stats2.q.out   |   2 -
 .../test/results/beelinepositive/stats3.q.out   |   2 -
 .../test/results/beelinepositive/stats4.q.out   |   6 -
 .../test/results/beelinepositive/stats5.q.out   |   1 -
 .../test/results/beelinepositive/stats6.q.out   |   5 -
 .../test/results/beelinepositive/stats7.q.out   |   3 -
 .../test/results/beelinepositive/stats8.q.out   |  10 --
 .../test/results/beelinepositive/stats9.q.out   |   1 -
 .../beelinepositive/stats_empty_partition.q.out |   1 -
 .../clientnegative/alter_file_format.q.out      |   1 -
 .../alter_view_as_select_with_partition.q.out   |   1 -
 .../stats_partialscan_autogether.q.out          |   2 -
 .../clientpositive/alter_file_format.q.out      |  19 ----
 .../clientpositive/alter_merge_stats_orc.q.out  |   8 --
 .../alter_numbuckets_partitioned_table.q.out    |  16 ---
 .../alter_numbuckets_partitioned_table2.q.out   |  27 -----
 ...lter_numbuckets_partitioned_table2_h23.q.out |  27 -----
 ...alter_numbuckets_partitioned_table_h23.q.out |  16 ---
 .../alter_partition_clusterby_sortby.q.out      |   7 --
 .../clientpositive/alter_skewed_table.q.out     |   6 -
 .../clientpositive/alter_table_not_sorted.q.out |   2 -
 .../clientpositive/alter_table_serde2.q.out     |   6 -
 .../clientpositive/alter_view_as_select.q.out   |   3 -
 .../clientpositive/authorization_index.q.out    |   1 -
 .../test/results/clientpositive/bucket5.q.out   |   1 -
 .../create_alter_list_bucketing_table1.q.out    |   7 --
 .../results/clientpositive/create_like.q.out    |   9 --
 .../results/clientpositive/create_like2.q.out   |   1 -
 .../clientpositive/create_like_tbl_props.q.out  |   5 -
 .../clientpositive/create_like_view.q.out       |   4 -
 .../clientpositive/create_or_replace_view.q.out |   5 -
 .../clientpositive/create_skewed_table1.q.out   |   3 -
 .../results/clientpositive/create_view.q.out    |  14 ---
 .../create_view_partitioned.q.out               |   3 -
 .../clientpositive/create_view_translate.q.out  |   2 -
 ql/src/test/results/clientpositive/ctas.q.out   |   5 -
 .../results/clientpositive/ctas_colname.q.out   |   7 --
 .../results/clientpositive/ctas_hadoop20.q.out  |   5 -
 .../ctas_uses_database_location.q.out           |   1 -
 .../clientpositive/database_location.q.out      |   2 -
 .../results/clientpositive/decimal_serde.q.out  |   2 -
 .../clientpositive/default_file_format.q.out    |   5 -
 .../describe_comment_indent.q.out               |   1 -
 .../describe_comment_nonascii.q.out             |   1 -
 .../describe_formatted_view_partitioned.q.out   |   2 -
 .../clientpositive/describe_syntax.q.out        |   6 -
 .../results/clientpositive/describe_table.q.out |   7 --
 .../dynpart_sort_opt_vectorization.q.out        |  32 ------
 .../dynpart_sort_optimization.q.out             |  32 ------
 .../dynpart_sort_optimization2.q.out            |  24 ----
 .../encrypted/encryption_insert_values.q.out    |   1 -
 .../clientpositive/exim_hidden_files.q.out      |   1 -
 .../clientpositive/index_skewtable.q.out        |   1 -
 .../clientpositive/infer_bucket_sort.q.out      |  50 ---------
 .../infer_bucket_sort_bucketed_table.q.out      |   2 -
 .../infer_bucket_sort_convert_join.q.out        |   4 -
 .../infer_bucket_sort_dyn_part.q.out            |  16 ---
 .../infer_bucket_sort_grouping_operators.q.out  |  12 --
 .../infer_bucket_sort_list_bucket.q.out         |   4 -
 .../infer_bucket_sort_map_operators.q.out       |   8 --
 .../infer_bucket_sort_merge.q.out               |   4 -
 .../infer_bucket_sort_multi_insert.q.out        |  16 ---
 .../infer_bucket_sort_num_buckets.q.out         |   4 -
 .../infer_bucket_sort_reducers_power_two.q.out  |  12 --
 .../results/clientpositive/lb_fs_stats.q.out    |   2 -
 .../clientpositive/list_bucket_dml_1.q.out      |   4 -
 .../list_bucket_dml_10.q.java1.7.out            |   2 -
 .../list_bucket_dml_10.q.java1.8.out            |   2 -
 .../list_bucket_dml_11.q.java1.7.out            |   2 -
 .../list_bucket_dml_11.q.java1.8.out            |   2 -
 .../list_bucket_dml_12.q.java1.7.out            |   2 -
 .../list_bucket_dml_12.q.java1.8.out            |   2 -
 .../list_bucket_dml_13.q.java1.7.out            |   2 -
 .../list_bucket_dml_13.q.java1.8.out            |   2 -
 .../clientpositive/list_bucket_dml_14.q.out     |   1 -
 .../list_bucket_dml_2.q.java1.7.out             |   2 -
 .../list_bucket_dml_2.q.java1.8.out             |   2 -
 .../clientpositive/list_bucket_dml_3.q.out      |   2 -
 .../list_bucket_dml_4.q.java1.7.out             |   4 -
 .../list_bucket_dml_4.q.java1.8.out             |   4 -
 .../list_bucket_dml_5.q.java1.7.out             |   4 -
 .../list_bucket_dml_5.q.java1.8.out             |   4 -
 .../list_bucket_dml_6.q.java1.7.out             |   8 --
 .../list_bucket_dml_6.q.java1.8.out             |   8 --
 .../clientpositive/list_bucket_dml_7.q.out      |   8 --
 .../list_bucket_dml_8.q.java1.7.out             |   6 -
 .../list_bucket_dml_8.q.java1.8.out             |   6 -
 .../list_bucket_dml_9.q.java1.7.out             |   4 -
 .../list_bucket_dml_9.q.java1.8.out             |   4 -
 .../list_bucket_query_multiskew_1.q.out         |   2 -
 .../list_bucket_query_multiskew_2.q.out         |   2 -
 .../list_bucket_query_multiskew_3.q.out         |   6 -
 .../list_bucket_query_oneskew_1.q.out           |   2 -
 .../list_bucket_query_oneskew_2.q.out           |   2 -
 .../list_bucket_query_oneskew_3.q.out           |   2 -
 ql/src/test/results/clientpositive/merge3.q.out |   1 -
 .../results/clientpositive/orc_analyze.q.out    |  48 --------
 .../results/clientpositive/orc_create.q.out     |   6 -
 .../clientpositive/parallel_orderby.q.out       |   2 -
 .../parquet_array_null_element.q.out            |   1 -
 .../results/clientpositive/parquet_create.q.out |   1 -
 .../clientpositive/parquet_partitioned.q.out    |   1 -
 .../results/clientpositive/parquet_serde.q.out  |   5 -
 .../clientpositive/part_inherit_tbl_props.q.out |   2 -
 .../part_inherit_tbl_props_empty.q.out          |   2 -
 .../part_inherit_tbl_props_with_star.q.out      |   2 -
 .../partition_coltype_literals.q.out            |  16 ---
 .../results/clientpositive/protectmode2.q.out   |   2 -
 .../clientpositive/rcfile_default_format.q.out  |   8 --
 .../clientpositive/selectDistinctStar.q.out     |   2 -
 .../spark/alter_merge_stats_orc.q.out           |   8 --
 .../results/clientpositive/spark/bucket5.q.out  |   1 -
 .../results/clientpositive/spark/ctas.q.out     |   5 -
 .../infer_bucket_sort_bucketed_table.q.out      |   2 -
 .../spark/infer_bucket_sort_convert_join.q.out  |   4 -
 .../spark/infer_bucket_sort_map_operators.q.out |   8 --
 .../spark/infer_bucket_sort_merge.q.out         |   4 -
 .../spark/infer_bucket_sort_num_buckets.q.out   |   4 -
 .../infer_bucket_sort_reducers_power_two.q.out  |  12 --
 .../spark/list_bucket_dml_10.q.java1.7.out      |   2 -
 .../spark/list_bucket_dml_10.q.java1.8.out      |   2 -
 .../spark/list_bucket_dml_2.q.java1.7.out       |   2 -
 .../spark/list_bucket_dml_2.q.java1.8.out       |   2 -
 .../spark/list_bucket_dml_2.q.out               | Bin 28747 -> 28667 bytes
 .../clientpositive/spark/orc_analyze.q.out      |  22 ----
 .../clientpositive/spark/parallel_orderby.q.out |   2 -
 .../results/clientpositive/spark/stats1.q.out   |   2 -
 .../results/clientpositive/spark/stats10.q.out  |   5 -
 .../results/clientpositive/spark/stats12.q.out  |   9 --
 .../results/clientpositive/spark/stats13.q.out  |  10 --
 .../results/clientpositive/spark/stats14.q.out  |   7 --
 .../results/clientpositive/spark/stats15.q.out  |   7 --
 .../results/clientpositive/spark/stats16.q.out  |   2 -
 .../results/clientpositive/spark/stats18.q.out  |   4 -
 .../results/clientpositive/spark/stats2.q.out   |   2 -
 .../results/clientpositive/spark/stats20.q.out  |   2 -
 .../results/clientpositive/spark/stats3.q.out   |   2 -
 .../results/clientpositive/spark/stats5.q.out   |   1 -
 .../results/clientpositive/spark/stats6.q.out   |   9 --
 .../results/clientpositive/spark/stats7.q.out   |   5 -
 .../results/clientpositive/spark/stats8.q.out   |  18 ---
 .../results/clientpositive/spark/stats9.q.out   |   1 -
 .../clientpositive/spark/stats_counter.q.out    |   2 -
 .../spark/stats_counter_partitioned.q.out       |  16 ---
 .../clientpositive/spark/stats_noscan_1.q.out   |  17 ---
 .../clientpositive/spark/stats_noscan_2.q.out   |   6 -
 .../clientpositive/spark/stats_only_null.q.out  |   4 -
 .../spark/stats_partscan_1_23.q.out             |   6 -
 .../results/clientpositive/spark/statsfs.q.out  |  14 ---
 .../clientpositive/spark/union_remove_1.q.out   |   1 -
 .../clientpositive/spark/union_remove_10.q.out  |   1 -
 .../clientpositive/spark/union_remove_11.q.out  |   1 -
 .../clientpositive/spark/union_remove_12.q.out  |   1 -
 .../clientpositive/spark/union_remove_13.q.out  |   1 -
 .../clientpositive/spark/union_remove_14.q.out  |   1 -
 .../clientpositive/spark/union_remove_15.q.out  |   1 -
 .../clientpositive/spark/union_remove_16.q.out  |   1 -
 .../clientpositive/spark/union_remove_17.q.out  |   1 -
 .../clientpositive/spark/union_remove_18.q.out  |   1 -
 .../clientpositive/spark/union_remove_19.q.out  |   1 -
 .../clientpositive/spark/union_remove_2.q.out   |   1 -
 .../clientpositive/spark/union_remove_20.q.out  |   1 -
 .../clientpositive/spark/union_remove_21.q.out  |   1 -
 .../clientpositive/spark/union_remove_22.q.out  |   1 -
 .../clientpositive/spark/union_remove_23.q.out  |   1 -
 .../clientpositive/spark/union_remove_24.q.out  |   1 -
 .../clientpositive/spark/union_remove_25.q.out  |   6 -
 .../clientpositive/spark/union_remove_3.q.out   |   1 -
 .../clientpositive/spark/union_remove_4.q.out   |   1 -
 .../clientpositive/spark/union_remove_5.q.out   |   1 -
 .../clientpositive/spark/union_remove_7.q.out   |   1 -
 .../clientpositive/spark/union_remove_8.q.out   |   1 -
 .../clientpositive/spark/union_remove_9.q.out   |   1 -
 ql/src/test/results/clientpositive/stats1.q.out |   2 -
 .../test/results/clientpositive/stats10.q.out   |   5 -
 .../test/results/clientpositive/stats11.q.out   |   8 --
 .../test/results/clientpositive/stats12.q.out   |   9 --
 .../test/results/clientpositive/stats13.q.out   |  10 --
 .../test/results/clientpositive/stats14.q.out   |   7 --
 .../test/results/clientpositive/stats15.q.out   |   7 --
 .../test/results/clientpositive/stats16.q.out   |   2 -
 .../test/results/clientpositive/stats18.q.out   |   4 -
 .../test/results/clientpositive/stats19.q.out   |  12 --
 ql/src/test/results/clientpositive/stats2.q.out |   2 -
 .../test/results/clientpositive/stats20.q.out   |   2 -
 ql/src/test/results/clientpositive/stats3.q.out |   2 -
 ql/src/test/results/clientpositive/stats4.q.out |  10 --
 ql/src/test/results/clientpositive/stats5.q.out |   1 -
 ql/src/test/results/clientpositive/stats6.q.out |   9 --
 ql/src/test/results/clientpositive/stats7.q.out |   5 -
 ql/src/test/results/clientpositive/stats8.q.out |  18 ---
 ql/src/test/results/clientpositive/stats9.q.out |   1 -
 .../results/clientpositive/stats_counter.q.out  |   2 -
 .../stats_counter_partitioned.q.out             |  16 ---
 .../clientpositive/stats_empty_partition.q.out  |   2 -
 .../clientpositive/stats_invalidation.q.out     |   2 -
 .../stats_list_bucket.q.java1.7.out             |   3 -
 .../stats_list_bucket.q.java1.8.out             |   3 -
 .../results/clientpositive/stats_noscan_1.q.out |  17 ---
 .../results/clientpositive/stats_noscan_2.q.out |   6 -
 .../clientpositive/stats_only_null.q.out        |   4 -
 .../clientpositive/stats_partscan_1.q.out       |   6 -
 .../clientpositive/stats_partscan_1_23.q.out    |   6 -
 .../test/results/clientpositive/statsfs.q.out   |  14 ---
 .../tez/alter_merge_stats_orc.q.out             |   8 --
 .../test/results/clientpositive/tez/ctas.q.out  |   5 -
 .../tez/dynpart_sort_opt_vectorization.q.out    |  32 ------
 .../tez/dynpart_sort_optimization.q.out         |  32 ------
 .../tez/dynpart_sort_optimization2.q.out        |  24 ----
 .../clientpositive/tez/orc_analyze.q.out        |  48 --------
 .../clientpositive/tez/selectDistinctStar.q.out |   2 -
 .../clientpositive/tez/stats_counter.q.out      |   2 -
 .../tez/stats_counter_partitioned.q.out         |  16 ---
 .../clientpositive/tez/stats_noscan_1.q.out     |  17 ---
 .../clientpositive/tez/stats_only_null.q.out    |   4 -
 .../results/clientpositive/tez/tez_fsstat.q.out |   2 -
 .../clientpositive/truncate_column.q.out        |  11 --
 .../clientpositive/unicode_notation.q.out       |   3 -
 .../results/clientpositive/union_remove_1.q.out |   1 -
 .../clientpositive/union_remove_10.q.out        |   1 -
 .../clientpositive/union_remove_11.q.out        |   1 -
 .../clientpositive/union_remove_12.q.out        |   1 -
 .../clientpositive/union_remove_13.q.out        |   1 -
 .../clientpositive/union_remove_14.q.out        |   1 -
 .../clientpositive/union_remove_15.q.out        |   1 -
 .../clientpositive/union_remove_16.q.out        |   1 -
 .../clientpositive/union_remove_17.q.out        |   1 -
 .../clientpositive/union_remove_18.q.out        |   1 -
 .../clientpositive/union_remove_19.q.out        |   1 -
 .../results/clientpositive/union_remove_2.q.out |   1 -
 .../clientpositive/union_remove_20.q.out        |   1 -
 .../clientpositive/union_remove_21.q.out        |   1 -
 .../clientpositive/union_remove_22.q.out        |   1 -
 .../clientpositive/union_remove_23.q.out        |   1 -
 .../clientpositive/union_remove_24.q.out        |   1 -
 .../clientpositive/union_remove_25.q.out        |   6 -
 .../results/clientpositive/union_remove_3.q.out |   1 -
 .../results/clientpositive/union_remove_4.q.out |   1 -
 .../results/clientpositive/union_remove_5.q.out |   1 -
 .../results/clientpositive/union_remove_7.q.out |   1 -
 .../results/clientpositive/union_remove_8.q.out |   1 -
 .../results/clientpositive/union_remove_9.q.out |   1 -
 320 files changed, 150 insertions(+), 2473 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/external_table_ppd.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/external_table_ppd.q.out b/hbase-handler/src/test/results/positive/external_table_ppd.q.out
index 57424ce..83eb2f5 100644
--- a/hbase-handler/src/test/results/positive/external_table_ppd.q.out
+++ b/hbase-handler/src/test/results/positive/external_table_ppd.q.out
@@ -52,7 +52,6 @@ boolean_col         	boolean             	from deserializer
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out b/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
index 578ddb2..f212331 100644
--- a/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
@@ -52,7 +52,6 @@ boolean_col         	boolean             	from deserializer
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -228,7 +227,6 @@ boolean_col         	boolean             	from deserializer
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	EXTERNAL_TABLE      	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_stats.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_stats.q.out b/hbase-handler/src/test/results/positive/hbase_stats.q.out
index f12b136..f34720d 100644
--- a/hbase-handler/src/test/results/positive/hbase_stats.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_stats.q.out
@@ -38,7 +38,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -142,7 +141,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -181,8 +179,6 @@ Partition Value:    	[2010-04-08, 11]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -223,8 +219,6 @@ Partition Value:    	[2010-04-08, 12]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -283,7 +277,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_stats2.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_stats2.q.out b/hbase-handler/src/test/results/positive/hbase_stats2.q.out
index a60dee2..aad2e3a 100644
--- a/hbase-handler/src/test/results/positive/hbase_stats2.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_stats2.q.out
@@ -38,7 +38,6 @@ value               	string              	default
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -142,7 +141,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
@@ -181,8 +179,6 @@ Partition Value:    	[2010-04-08, 11]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -223,8 +219,6 @@ Partition Value:    	[2010-04-08, 12]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -283,7 +277,6 @@ hr                  	string
 # Detailed Table Information	 	 
 Database:           	default             	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
 Retention:          	0                   	 
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_stats3.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_stats3.q.out b/hbase-handler/src/test/results/positive/hbase_stats3.q.out
index 114847c..063800f 100644
--- a/hbase-handler/src/test/results/positive/hbase_stats3.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_stats3.q.out
@@ -40,8 +40,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -94,8 +92,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -148,8 +144,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -206,8 +200,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -260,8 +252,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
@@ -314,8 +304,6 @@ Partition Value:    	[2010-04-08, 13]
 Database:           	default             	 
 Table:              	stats_part          	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out b/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out
index 65e0a6f..c13817e 100644
--- a/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out
@@ -43,8 +43,6 @@ Partition Value:    	[1]
 Database:           	default             	 
 Table:              	tmptable            	 
 #### A masked pattern was here ####
-Protect Mode:       	None                	 
-#### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
index 11d0743..18bf172 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
@@ -18,9 +18,6 @@
  */
 package org.apache.hive.hcatalog.cli.SemanticAnalysis;
 
-import java.io.Serializable;
-import java.util.List;
-
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -50,6 +47,9 @@ import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hive.hcatalog.common.ErrorType;
 import org.apache.hive.hcatalog.common.HCatException;
 
+import java.io.Serializable;
+import java.util.List;
+
 public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
 
   private AbstractSemanticAnalyzerHook hook;
@@ -237,7 +237,6 @@ public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
           case HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION:
           case HiveParser.TOK_ALTERTABLE_SKEWED:
           case HiveParser.TOK_ALTERTABLE_FILEFORMAT:
-          case HiveParser.TOK_ALTERTABLE_PROTECTMODE:
           case HiveParser.TOK_ALTERTABLE_LOCATION:
           case HiveParser.TOK_ALTERTABLE_MERGEFILES:
           case HiveParser.TOK_ALTERTABLE_RENAMEPART:

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
index 3a69581..41571fc 100644
--- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
+++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java
@@ -18,13 +18,6 @@
  */
 package org.apache.hive.hcatalog.api;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
 import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -76,6 +69,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
 /**
  * The HCatClientHMSImpl is the Hive Metastore client based implementation of
@@ -588,7 +587,6 @@ public class HCatClientHMSImpl extends HCatClient {
             Utilities.serializeExpressionToKryo(partitionExpression));
     hmsClient.dropPartitions(table.getDbName(), table.getTableName(), Arrays.asList(serializedPartitionExpression),
         deleteData && !isExternal(table),  // Delete data?
-        false,                             // Ignore Protection?
         ifExists,                          // Fail if table doesn't exist?
         false);                            // Need results back?
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 2ef5aa0..0edf11f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -230,7 +230,9 @@ import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
 
 import static org.apache.commons.lang.StringUtils.join;
-import static org.apache.hadoop.hive.metastore.MetaStoreUtils.*;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName;
 
 /**
  * TODO:pc remove application logic to a separate interface.
@@ -2774,10 +2776,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         for (Partition part : parts) {
-          if (!ignoreProtection && !MetaStoreUtils.canDropPartition(tbl, part)) {
-            throw new MetaException("Table " + tbl.getTableName()
-                + " Partition " + part + " is protected from being dropped");
-          }
+
+          // TODO - we need to speed this up for the normal path where all partitions are under
+          // the table and we don't have to stat every partition
 
           firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
           if (colNames != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index a5f5053..66fbfe4 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -842,7 +842,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
     rps.setExprs(exprs);
     DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
     req.setDeleteData(options.deleteData);
-    req.setIgnoreProtection(options.ignoreProtection);
     req.setNeedResult(options.returnResults);
     req.setIfExists(options.ifExists);
     if (options.purgeData) {
@@ -854,13 +853,12 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
 
   @Override
   public List<Partition> dropPartitions(String dbName, String tblName,
-      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
       boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
 
     return dropPartitions(dbName, tblName, partExprs,
                           PartitionDropOptions.instance()
                                               .deleteData(deleteData)
-                                              .ignoreProtection(ignoreProtection)
                                               .ifExists(ifExists)
                                               .returnResults(needResult));
 
@@ -868,13 +866,12 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
 
   @Override
   public List<Partition> dropPartitions(String dbName, String tblName,
-      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
       boolean ifExists) throws NoSuchObjectException, MetaException, TException {
     // By default, we need the results from dropPartitions();
     return dropPartitions(dbName, tblName, partExprs,
                           PartitionDropOptions.instance()
                                               .deleteData(deleteData)
-                                              .ignoreProtection(ignoreProtection)
                                               .ifExists(ifExists));
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 341b0ca..147ffcc 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -19,50 +19,30 @@
 package org.apache.hadoop.hive.metastore;
 
 
+import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FireEventRequest;
-import org.apache.hadoop.hive.metastore.api.FireEventResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
-import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
-import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
-import org.apache.hadoop.hive.metastore.api.TxnOpenException;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.thrift.TException;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public;
 import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FireEventRequest;
+import org.apache.hadoop.hive.metastore.api.FireEventResponse;
 import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
 import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.Index;
@@ -70,8 +50,15 @@ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
@@ -79,10 +66,20 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnOpenException;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.thrift.TException;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
 
 /**
  * Wrapper around hive metastore thrift api
@@ -684,11 +681,11 @@ public interface IMetaStoreClient {
                         PartitionDropOptions options) throws TException;
 
   List<Partition> dropPartitions(String dbName, String tblName,
-      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
       boolean ifExists) throws NoSuchObjectException, MetaException, TException;
 
   List<Partition> dropPartitions(String dbName, String tblName,
-      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ignoreProtection,
+      List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
       boolean ifExists, boolean needResults) throws NoSuchObjectException, MetaException, TException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 38dc406..907cbbf 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -1604,32 +1604,6 @@ public class MetaStoreUtils {
     return null;
   }
 
-  public static ProtectMode getProtectMode(Partition partition) {
-    return getProtectMode(partition.getParameters());
-  }
-
-  public static ProtectMode getProtectMode(Table table) {
-    return getProtectMode(table.getParameters());
-  }
-
-  private static ProtectMode getProtectMode(Map<String, String> parameters) {
-    if (parameters == null) {
-      return null;
-    }
-
-    if (!parameters.containsKey(ProtectMode.PARAMETER_NAME)) {
-      return new ProtectMode();
-    } else {
-      return ProtectMode.getProtectModeFromString(parameters.get(ProtectMode.PARAMETER_NAME));
-    }
-  }
-
-  public static boolean canDropPartition(Table table, Partition partition) {
-    ProtectMode mode = getProtectMode(partition);
-    ProtectMode parentMode = getProtectMode(table);
-    return (!mode.noDrop && !mode.offline && !mode.readOnly && !parentMode.noDropCascade);
-  }
-
   public static String ARCHIVING_LEVEL = "archiving_level";
   public static int getArchivingLevel(Partition part) throws MetaException {
     if (!isArchived(part)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
index 5b2811f..e8ffbd5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java
@@ -24,7 +24,6 @@ package org.apache.hadoop.hive.metastore;
 public class PartitionDropOptions {
 
   public boolean deleteData = true;
-  public boolean ignoreProtection = false;
   public boolean ifExists = false;
   public boolean returnResults = true;
   public boolean purgeData = false;
@@ -36,11 +35,6 @@ public class PartitionDropOptions {
     return this;
   }
 
-  public PartitionDropOptions ignoreProtection(boolean ignoreProtection) {
-    this.ignoreProtection = ignoreProtection;
-    return this;
-  }
-
   public PartitionDropOptions ifExists(boolean ifExists) {
     this.ifExists = ifExists;
     return this;

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java
deleted file mode 100644
index b8f1390..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ProtectMode.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-
-public class ProtectMode {
-  public static String PARAMETER_NAME = "PROTECT_MODE";
-
-  public static String FLAG_OFFLINE = "OFFLINE";
-  public static String FLAG_NO_DROP = "NO_DROP";
-  public static String FLAG_NO_DROP_CASCADE = "NO_DROP_CASCADE";
-  public static String FLAG_READ_ONLY = "READ_ONLY";
-
-  public boolean offline = false;
-  public boolean readOnly = false;
-  public boolean noDrop = false;
-  public boolean noDropCascade = false;
-
-  static public ProtectMode getProtectModeFromString(String sourceString) {
-    return new ProtectMode(sourceString);
-  }
-
-  private ProtectMode(String sourceString) {
-    String[] tokens = sourceString.split(",");
-    for (String token: tokens) {
-      if (token.equalsIgnoreCase(FLAG_OFFLINE)) {
-        offline = true;
-      } else if (token.equalsIgnoreCase(FLAG_NO_DROP)) {
-        noDrop = true;
-      } else if (token.equalsIgnoreCase(FLAG_NO_DROP_CASCADE)) {
-        noDropCascade = true;
-      } else if (token.equalsIgnoreCase(FLAG_READ_ONLY)) {
-        readOnly = true;
-      }
-    }
-  }
-
-  public ProtectMode() {
-  }
-
-  @Override
-  public String toString() {
-    String retString = null;
-
-    if (offline) {
-        retString = FLAG_OFFLINE;
-    }
-
-    if (noDrop) {
-      if (retString != null) {
-        retString = retString + "," + FLAG_NO_DROP;
-      }
-      else
-      {
-        retString = FLAG_NO_DROP;
-      }
-    }
-
-    if (noDropCascade) {
-      if (retString != null) {
-        retString = retString + "," + FLAG_NO_DROP_CASCADE;
-      }
-      else
-      {
-        retString = FLAG_NO_DROP_CASCADE;
-      }
-    }
-
-    if (readOnly) {
-      if (retString != null) {
-        retString = retString + "," + FLAG_READ_ONLY;
-      }
-      else
-      {
-        retString = FLAG_READ_ONLY;
-      }
-    }
-
-    return retString;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index a8c6aca..734742c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.PartitionDropOptions;
-import org.apache.hadoop.hive.metastore.ProtectMode;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
@@ -3210,17 +3209,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return builder;
   }
 
-  private void setAlterProtectMode(boolean protectModeEnable,
-      AlterTableDesc.ProtectModeType protectMode,
-      ProtectMode mode) {
-    if (protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
-      mode.offline = protectModeEnable;
-    } else if (protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
-      mode.noDrop = protectModeEnable;
-    } else if (protectMode == AlterTableDesc.ProtectModeType.NO_DROP_CASCADE) {
-      mode.noDropCascade = protectModeEnable;
-    }
-  }
   /**
    * Alter a given table.
    *
@@ -3453,20 +3441,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       if (alterTbl.getSerdeName() != null) {
         sd.getSerdeInfo().setSerializationLib(alterTbl.getSerdeName());
       }
-    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
-      boolean protectModeEnable = alterTbl.isProtectModeEnable();
-      AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType();
-
-      ProtectMode mode = null;
-      if (part != null) {
-        mode = part.getProtectMode();
-        setAlterProtectMode(protectModeEnable, protectMode, mode);
-        part.setProtectMode(mode);
-      } else {
-        mode = tbl.getProtectMode();
-        setAlterProtectMode(protectModeEnable,protectMode, mode);
-        tbl.setProtectMode(mode);
-      }
     } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
       StorageDescriptor sd = (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd());
       // validate sort columns and bucket columns
@@ -3635,7 +3609,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
                             dropTbl.getPartSpecs(),
                             PartitionDropOptions.instance()
                                                 .deleteData(true)
-                                                .ignoreProtection(dropTbl.getIgnoreProtection())
                                                 .ifExists(true)
                                                 .purgeData(dropTbl.getIfPurge()));
     for (Partition partition : droppedParts) {
@@ -3666,11 +3639,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       }
     }
 
-    if (tbl != null && !tbl.canDrop()) {
-      throw new HiveException("Table " + tbl.getTableName() +
-          " is protected from being dropped");
-    }
-
     ReplicationSpec replicationSpec = dropTbl.getReplicationSpec();
     if ((tbl!= null) && replicationSpec.isInReplicationScope()){
       /**
@@ -3714,24 +3682,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     int partitionBatchSize = HiveConf.getIntVar(conf,
         ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX);
 
-    // We should check that all the partitions of the table can be dropped
-    if (tbl != null && tbl.isPartitioned()) {
-      List<String> partitionNames = db.getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short)-1);
-
-      for(int i=0; i < partitionNames.size(); i+= partitionBatchSize) {
-        List<String> partNames = partitionNames.subList(i, Math.min(i+partitionBatchSize,
-            partitionNames.size()));
-        List<Partition> listPartitions = db.getPartitionsByNames(tbl, partNames);
-        for (Partition p: listPartitions) {
-          if (!p.canDrop()) {
-            throw new HiveException("Table " + tbl.getTableName() +
-                " Partition" + p.getName() +
-                " is protected from being dropped");
-          }
-        }
-      }
-    }
-
     // drop the table
     db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge());
     if (tbl != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
index 968c1e1..298e7f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.hooks;
 
-import java.io.Serializable;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
@@ -29,6 +27,8 @@ import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 
+import java.io.Serializable;
+
 /**
  * This class encapsulates an object that is being written to by the query. This
  * object may be a table, partition, dfs directory or a local directory.
@@ -193,8 +193,6 @@ public class WriteEntity extends Entity implements Serializable {
       case REPLACECOLS:
       case ARCHIVE:
       case UNARCHIVE:
-      case ALTERPROTECTMODE:
-      case ALTERPARTITIONPROTECTMODE:
       case ALTERLOCATION:
       case DROPPARTITION:
       case RENAMEPARTITION:

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index d89aafc..00125fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -18,30 +18,7 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
-import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
-import static org.apache.hadoop.hive.serde.serdeConstants.COLLECTION_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.ESCAPE_CHAR;
-import static org.apache.hadoop.hive.serde.serdeConstants.FIELD_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.LINE_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT;
-import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
+import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -122,7 +99,29 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
 
-import com.google.common.collect.Sets;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
+import static org.apache.hadoop.hive.serde.serdeConstants.COLLECTION_DELIM;
+import static org.apache.hadoop.hive.serde.serdeConstants.ESCAPE_CHAR;
+import static org.apache.hadoop.hive.serde.serdeConstants.FIELD_DELIM;
+import static org.apache.hadoop.hive.serde.serdeConstants.LINE_DELIM;
+import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM;
+import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT;
+import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
 
 
 /**
@@ -1979,19 +1978,17 @@ private void constructOneLBLocationMap(FileStatus fSta,
   }
 
   public List<Partition> dropPartitions(String tblName, List<DropTableDesc.PartSpec> partSpecs,
-      boolean deleteData, boolean ignoreProtection, boolean ifExists) throws HiveException {
+      boolean deleteData, boolean ifExists) throws HiveException {
     String[] names = Utilities.getDbTableName(tblName);
-    return dropPartitions(
-        names[0], names[1], partSpecs, deleteData, ignoreProtection, ifExists);
+    return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists);
   }
 
   public List<Partition> dropPartitions(String dbName, String tblName,
-      List<DropTableDesc.PartSpec> partSpecs,  boolean deleteData, boolean ignoreProtection,
+      List<DropTableDesc.PartSpec> partSpecs,  boolean deleteData,
       boolean ifExists) throws HiveException {
     return dropPartitions(dbName, tblName, partSpecs,
                           PartitionDropOptions.instance()
                                               .deleteData(deleteData)
-                                              .ignoreProtection(ignoreProtection)
                                               .ifExists(ifExists));
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
index 08ff2e9..2e77bc4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.ProtectMode;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -541,56 +540,6 @@ public class Partition implements Serializable {
   }
 
   /**
-   * @param protectMode
-   */
-  public void setProtectMode(ProtectMode protectMode){
-    Map<String, String> parameters = tPartition.getParameters();
-    String pm = protectMode.toString();
-    if (pm != null) {
-      parameters.put(ProtectMode.PARAMETER_NAME, pm);
-    } else {
-      parameters.remove(ProtectMode.PARAMETER_NAME);
-    }
-    tPartition.setParameters(parameters);
-  }
-
-  /**
-   * @return protect mode
-   */
-  public ProtectMode getProtectMode(){
-    return MetaStoreUtils.getProtectMode(tPartition);
-  }
-
-  /**
-   * @return True protect mode indicates the partition if offline.
-   */
-  public boolean isOffline(){
-    ProtectMode pm = getProtectMode();
-    if (pm == null) {
-      return false;
-    } else {
-      return pm.offline;
-    }
-  }
-
-  /**
-   * @return True if protect mode attribute of the partition indicate
-   * that it is OK to drop the table
-   */
-  public boolean canDrop() {
-    return MetaStoreUtils.canDropPartition(table.getTTable(), tPartition);
-  }
-
-  /**
-   * @return True if protect mode attribute of the partition indicate
-   * that it is OK to write to the table
-   */
-  public boolean canWrite() {
-    ProtectMode mode = getProtectMode();
-    return (!mode.offline && !mode.readOnly);
-  }
-
-  /**
    * @return include the db name
    */
   public String getCompleteName() {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index e53933e..52ed4a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -18,15 +18,6 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -37,7 +28,6 @@ import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.ProtectMode;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -65,6 +55,15 @@ import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.OutputFormat;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
 /**
  * A Hive Table: is a fundamental unit of data in Hive that shares a common schema/DDL.
  *
@@ -849,52 +848,6 @@ public class Table implements Serializable {
   }
 
   /**
-   * @param protectMode
-   */
-  public void setProtectMode(ProtectMode protectMode){
-    Map<String, String> parameters = tTable.getParameters();
-    String pm = protectMode.toString();
-    if (pm != null) {
-      parameters.put(ProtectMode.PARAMETER_NAME, pm);
-    } else {
-      parameters.remove(ProtectMode.PARAMETER_NAME);
-    }
-    tTable.setParameters(parameters);
-  }
-
-  /**
-   * @return protect mode
-   */
-  public ProtectMode getProtectMode(){
-    return MetaStoreUtils.getProtectMode(tTable);
-  }
-
-  /**
-   * @return True protect mode indicates the table if offline.
-   */
-  public boolean isOffline(){
-    return getProtectMode().offline;
-  }
-
-  /**
-   * @return True if protect mode attribute of the partition indicate
-   * that it is OK to drop the partition
-   */
-  public boolean canDrop() {
-    ProtectMode mode = getProtectMode();
-    return (!mode.noDrop && !mode.offline && !mode.readOnly && !mode.noDropCascade);
-  }
-
-  /**
-   * @return True if protect mode attribute of the table indicate
-   * that it is OK to write the table
-   */
-  public boolean canWrite() {
-    ProtectMode mode = getProtectMode();
-    return (!mode.offline && !mode.readOnly);
-  }
-
-  /**
    * @return include the db name
    */
   public String getCompleteName() {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
index bc09fc3..a78700d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
@@ -18,16 +18,6 @@
 
 package org.apache.hadoop.hive.ql.metadata.formatting;
 
-import java.math.BigInteger;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
 import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -55,6 +45,16 @@ import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
 
 /**
  * This class provides methods to format table and index information.
@@ -357,8 +357,6 @@ public final class MetaDataFormatUtils {
     formatOutput("Owner:", tbl.getOwner(), tableInfo);
     formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo);
     formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo);
-    String protectMode = tbl.getProtectMode().toString();
-    formatOutput("Protect Mode:", protectMode == null ? "None" : protectMode, tableInfo);
     formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo);
     if (!tbl.isView()) {
       formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo);
@@ -378,8 +376,6 @@ public final class MetaDataFormatUtils {
     formatOutput("CreateTime:", formatDate(part.getTPartition().getCreateTime()), tableInfo);
     formatOutput("LastAccessTime:", formatDate(part.getTPartition().getLastAccessTime()),
         tableInfo);
-    String protectMode = part.getProtectMode().toString();
-    formatOutput("Protect Mode:", protectMode == null ? "None" : protectMode, tableInfo);
     formatOutput("Location:", part.getLocation(), tableInfo);
 
     if (part.getTPartition().getParameters().size() > 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 24ca663..21625bc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -18,28 +18,7 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION;
-import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES;
-
-import java.io.Serializable;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.Set;
-
+import com.google.common.collect.Lists;
 import org.antlr.runtime.tree.CommonTree;
 import org.antlr.runtime.tree.Tree;
 import org.apache.commons.logging.Log;
@@ -158,7 +137,27 @@ import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.util.StringUtils;
 
-import com.google.common.collect.Lists;
+import java.io.Serializable;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION;
+import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES;
 
 /**
  * DDLSemanticAnalyzer.
@@ -288,8 +287,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
         analyzeExchangePartition(qualified, ast);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) {
         analyzeAlterTableFileFormat(ast, tableName, partSpec);
-      } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROTECTMODE) {
-        analyzeAlterTableProtectMode(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) {
         analyzeAlterTableLocation(ast, tableName, partSpec);
       } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) {
@@ -1476,56 +1473,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
 
   }
 
-  private void analyzeAlterTableProtectMode(ASTNode ast, String tableName,
-      HashMap<String, String> partSpec)
-      throws SemanticException {
-
-    AlterTableDesc alterTblDesc =
-        new AlterTableDesc(AlterTableTypes.ALTERPROTECTMODE);
-
-    alterTblDesc.setOldName(tableName);
-    alterTblDesc.setPartSpec(partSpec);
-
-    ASTNode child = (ASTNode) ast.getChild(0);
-
-    switch (child.getToken().getType()) {
-    case HiveParser.TOK_ENABLE:
-      alterTblDesc.setProtectModeEnable(true);
-      break;
-    case HiveParser.TOK_DISABLE:
-      alterTblDesc.setProtectModeEnable(false);
-      break;
-    default:
-      throw new SemanticException(
-          "Set Protect mode Syntax parsing error.");
-    }
-
-    ASTNode grandChild = (ASTNode) child.getChild(0);
-    switch (grandChild.getToken().getType()) {
-    case HiveParser.TOK_OFFLINE:
-      alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.OFFLINE);
-      break;
-    case HiveParser.TOK_NO_DROP:
-      if (grandChild.getChildCount() > 0) {
-        alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP_CASCADE);
-      }
-      else {
-        alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP);
-      }
-      break;
-    case HiveParser.TOK_READONLY:
-      throw new SemanticException(
-          "Potect mode READONLY is not implemented");
-    default:
-      throw new SemanticException(
-          "Only protect mode NO_DROP or OFFLINE supported");
-    }
-
-    addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc);
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-        alterTblDesc), conf));
-  }
-
   private void analyzeAlterTablePartMergeFiles(ASTNode ast,
       String tableName, HashMap<String, String> partSpec)
       throws SemanticException {
@@ -2690,11 +2637,10 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     re.noLockNeeded();
     inputs.add(re);
 
-    boolean ignoreProtection = ast.getFirstChildWithType(HiveParser.TOK_IGNOREPROTECTION) != null;
-    addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, ignoreProtection);
+    addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists);
 
     DropTableDesc dropTblDesc =
-        new DropTableDesc(getDotName(qualified), partSpecs, expectView, ignoreProtection, mustPurge, replicationSpec);
+        new DropTableDesc(getDotName(qualified), partSpecs, expectView, mustPurge, replicationSpec);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf));
   }
 
@@ -3165,9 +3111,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
    * throwIfNonExistent is true, otherwise ignore it.
    */
   private void addTableDropPartsOutputs(Table tab,
-      Collection<List<ExprNodeGenericFuncDesc>> partSpecs, boolean throwIfNonExistent,
-      boolean ignoreProtection) throws SemanticException {
-
+                                        Collection<List<ExprNodeGenericFuncDesc>> partSpecs,
+                                        boolean throwIfNonExistent) throws SemanticException {
     for (List<ExprNodeGenericFuncDesc> specs : partSpecs) {
       for (ExprNodeGenericFuncDesc partSpec : specs) {
         List<Partition> parts = new ArrayList<Partition>();
@@ -3193,11 +3138,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
           }
         }
         for (Partition p : parts) {
-          // TODO: same thing, metastore already checks this but check here if we can.
-          if (!ignoreProtection && !p.canDrop()) {
-            throw new SemanticException(
-              ErrorMsg.DROP_COMMAND_NOT_ALLOWED_FOR_PARTITION.getMsg(p.getCompleteName()));
-          }
           outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL_EXCLUSIVE));
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
index a4c5d0e..bdf0ed7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
@@ -18,20 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.StringTokenizer;
-import java.util.TreeMap;
-
 import com.google.common.base.Function;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
@@ -56,6 +42,18 @@ import org.json.JSONException;
 import org.json.JSONObject;
 
 import javax.annotation.Nullable;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.StringTokenizer;
+import java.util.TreeMap;
 
 /**
  *
@@ -129,11 +127,6 @@ public class EximUtil {
   }
 
   static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws SemanticException {
-    if (table.isOffline()) {
-      throw new SemanticException(
-          ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(":Table "
-              + table.getTableName()));
-    }
     if (table.isView()) {
       throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index bdd7cb7..85c0ae6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -110,8 +110,6 @@ KW_AFTER: 'AFTER';
 KW_DESCRIBE: 'DESCRIBE';
 KW_DROP: 'DROP';
 KW_RENAME: 'RENAME';
-KW_IGNORE: 'IGNORE';
-KW_PROTECTION: 'PROTECTION';
 KW_TO: 'TO';
 KW_COMMENT: 'COMMENT';
 KW_BOOLEAN: 'BOOLEAN';
@@ -157,11 +155,8 @@ KW_INPUTFORMAT: 'INPUTFORMAT';
 KW_OUTPUTFORMAT: 'OUTPUTFORMAT';
 KW_INPUTDRIVER: 'INPUTDRIVER';
 KW_OUTPUTDRIVER: 'OUTPUTDRIVER';
-KW_OFFLINE: 'OFFLINE';
 KW_ENABLE: 'ENABLE';
 KW_DISABLE: 'DISABLE';
-KW_READONLY: 'READONLY';
-KW_NO_DROP: 'NO_DROP';
 KW_LOCATION: 'LOCATION';
 KW_TABLESAMPLE: 'TABLESAMPLE';
 KW_BUCKET: 'BUCKET';

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 15f1f11..3f95bb8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -151,7 +151,6 @@ TOK_ALTERTABLE_REPLACECOLS;
 TOK_ALTERTABLE_ADDPARTS;
 TOK_ALTERTABLE_DROPPARTS;
 TOK_ALTERTABLE_PARTCOLTYPE;
-TOK_ALTERTABLE_PROTECTMODE;
 TOK_ALTERTABLE_MERGEFILES;
 TOK_ALTERTABLE_TOUCH;
 TOK_ALTERTABLE_ARCHIVE;
@@ -330,7 +329,6 @@ TOK_WINDOWDEF;
 TOK_WINDOWSPEC;
 TOK_WINDOWVALUES;
 TOK_WINDOWRANGE;
-TOK_IGNOREPROTECTION;
 TOK_SUBQUERY_EXPR;
 TOK_SUBQUERY_OP;
 TOK_SUBQUERY_OP_NOTIN;
@@ -809,13 +807,6 @@ orReplace
     -> ^(TOK_ORREPLACE)
     ;
 
-ignoreProtection
-@init { pushMsg("ignore protection clause", state); }
-@after { popMsg(state); }
-        : KW_IGNORE KW_PROTECTION
-        -> ^(TOK_IGNOREPROTECTION)
-        ;
-
 createDatabaseStatement
 @init { pushMsg("create database statement", state); }
 @after { popMsg(state); }
@@ -1022,7 +1013,6 @@ alterTblPartitionStatementSuffix
 @after {popMsg(state);}
   : alterStatementSuffixFileFormat
   | alterStatementSuffixLocation
-  | alterStatementSuffixProtectMode
   | alterStatementSuffixMergeFiles
   | alterStatementSuffixSerdeProperties
   | alterStatementSuffixRenamePart
@@ -1166,9 +1156,9 @@ partitionLocation
 alterStatementSuffixDropPartitions[boolean table]
 @init { pushMsg("drop partition statement", state); }
 @after { popMsg(state); }
-    : KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? KW_PURGE? replicationClause?
-    -> { table }? ^(TOK_ALTERTABLE_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection? KW_PURGE? replicationClause?)
-    ->            ^(TOK_ALTERVIEW_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection? replicationClause?)
+    : KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* KW_PURGE? replicationClause?
+    -> { table }? ^(TOK_ALTERTABLE_DROPPARTS dropPartitionSpec+ ifExists? KW_PURGE? replicationClause?)
+    ->            ^(TOK_ALTERVIEW_DROPPARTS dropPartitionSpec+ ifExists? replicationClause?)
     ;
 
 alterStatementSuffixProperties
@@ -1276,13 +1266,6 @@ alterStatementSuffixExchangePartition
     -> ^(TOK_ALTERTABLE_EXCHANGEPARTITION partitionSpec $exchangename)
     ;
 
-alterStatementSuffixProtectMode
-@init { pushMsg("alter partition protect mode statement", state); }
-@after { popMsg(state); }
-    : alterProtectMode
-    -> ^(TOK_ALTERTABLE_PROTECTMODE alterProtectMode)
-    ;
-
 alterStatementSuffixRenamePart
 @init { pushMsg("alter table rename partition statement", state); }
 @after { popMsg(state); }
@@ -1304,21 +1287,6 @@ alterStatementSuffixMergeFiles
     -> ^(TOK_ALTERTABLE_MERGEFILES)
     ;
 
-alterProtectMode
-@init { pushMsg("protect mode specification enable", state); }
-@after { popMsg(state); }
-    : KW_ENABLE alterProtectModeMode  -> ^(TOK_ENABLE alterProtectModeMode)
-    | KW_DISABLE alterProtectModeMode  -> ^(TOK_DISABLE alterProtectModeMode)
-    ;
-
-alterProtectModeMode
-@init { pushMsg("protect mode specification enable", state); }
-@after { popMsg(state); }
-    : KW_OFFLINE  -> ^(TOK_OFFLINE)
-    | KW_NO_DROP KW_CASCADE? -> ^(TOK_NO_DROP KW_CASCADE?)
-    | KW_READONLY  -> ^(TOK_READONLY)
-    ;
-
 alterStatementSuffixBucketNum
 @init { pushMsg("", state); }
 @after { popMsg(state); }

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 944cee4..85fa9c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -206,12 +206,7 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
     // initialize destination table/partition
     TableSpec ts = new TableSpec(db, conf, (ASTNode) tableTree);
 
-    if (ts.tableHandle.isOffline()){
-      throw new SemanticException(
-          ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(":Table " + ts.tableName));
-    }
-
-    if (ts.tableHandle.isView()) {
+   if (ts.tableHandle.isView()) {
       throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
     }
     if (ts.tableHandle.isNonNative()) {
@@ -255,10 +250,6 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
       try{
         Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false);
         if (part != null) {
-          if (part.isOffline()) {
-            throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION.
-                getMsg(ts.tableName + ":" + part.getName()));
-          }
           if (isOverWrite){
             outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT_OVERWRITE));
           } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 8516631..aab4250 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -1601,19 +1601,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, tab_name);
         }
 
-        // We check offline of the table, as if people only select from an
-        // non-existing partition of an offline table, the partition won't
-        // be added to inputs and validate() won't have the information to
-        // check the table's offline status.
-        // TODO: Modify the code to remove the checking here and consolidate
-        // it in validate()
-        //
-        if (tab.isOffline()) {
-          throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION.
-              getMsg("Table " + getUnescapedName(qb.getParseInfo().getSrcForAlias(alias))));
-        }
-
-        if (tab.isView()) {
+       if (tab.isView()) {
           if (qb.getParseInfo().isAnalyzeCommand()) {
             throw new SemanticException(ErrorMsg.ANALYZE_VIEW.getMsg());
           }
@@ -10569,20 +10557,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
       Table tbl = readEntity.getTable();
       Partition p = readEntity.getPartition();
-
-
-      if (tbl.isOffline()) {
-        throw new SemanticException(
-            ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(
-                "Table " + tbl.getTableName()));
-      }
-
-      if (type == ReadEntity.Type.PARTITION && p != null && p.isOffline()) {
-        throw new SemanticException(
-            ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(
-                "Table " + tbl.getTableName() +
-                    " Partition " + p.getName()));
-      }
     }
 
     for (WriteEntity writeEntity : getOutputs()) {
@@ -10636,25 +10610,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         } catch (HiveException e) {
           throw new SemanticException(e);
         }
-
-        if (type == WriteEntity.Type.PARTITION && p != null && p.isOffline()) {
-          throw new SemanticException(
-              ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(
-                  " Table " + tbl.getTableName() +
-                      " Partition " + p.getName()));
-        }
-
       }
       else {
         LOG.debug("Not a partition.");
         tbl = writeEntity.getTable();
       }
-
-      if (tbl.isOffline()) {
-        throw new SemanticException(
-            ErrorMsg.OFFLINE_TABLE_OR_PARTITION.getMsg(
-                "Table " + tbl.getTableName()));
-      }
     }
 
     boolean reworkMapredWork = HiveConf.getBoolVar(this.conf,

http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
index 97d02ea..2fdf1e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.util.HashMap;
-
 import org.antlr.runtime.tree.Tree;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
+import java.util.HashMap;
+
 /**
  * SemanticAnalyzerFactory.
  *
@@ -114,10 +114,6 @@ public final class SemanticAnalyzerFactory {
   }
 
   static {
-    tablePartitionCommandType.put(
-        HiveParser.TOK_ALTERTABLE_PROTECTMODE,
-        new HiveOperation[] { HiveOperation.ALTERTABLE_PROTECTMODE,
-            HiveOperation.ALTERPARTITION_PROTECTMODE });
     tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_FILEFORMAT,
         new HiveOperation[] { HiveOperation.ALTERTABLE_FILEFORMAT,
             HiveOperation.ALTERPARTITION_FILEFORMAT });