You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sz...@apache.org on 2014/11/18 21:14:31 UTC
svn commit: r1640418 [2/3] - in /hive/trunk: metastore/if/
metastore/src/gen/thrift/gen-cpp/
metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/
metastore/src/gen/thrift/gen-php/metastore/
metastore/src/gen/thrift/gen-py/hive_me...
Modified: hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py Tue Nov 18 20:14:29 2014
@@ -215,6 +215,16 @@ class Iface(fb303.FacebookService.Iface)
"""
pass
+ def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade):
+ """
+ Parameters:
+ - dbname
+ - tbl_name
+ - new_tbl
+ - cascade
+ """
+ pass
+
def add_partition(self, new_part):
"""
Parameters:
@@ -1851,6 +1861,44 @@ class Client(fb303.FacebookService.Clien
raise result.o2
return
+ def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade):
+ """
+ Parameters:
+ - dbname
+ - tbl_name
+ - new_tbl
+ - cascade
+ """
+ self.send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade)
+ self.recv_alter_table_with_cascade()
+
+ def send_alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade):
+ self._oprot.writeMessageBegin('alter_table_with_cascade', TMessageType.CALL, self._seqid)
+ args = alter_table_with_cascade_args()
+ args.dbname = dbname
+ args.tbl_name = tbl_name
+ args.new_tbl = new_tbl
+ args.cascade = cascade
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_alter_table_with_cascade(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = alter_table_with_cascade_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ return
+
def add_partition(self, new_part):
"""
Parameters:
@@ -5177,6 +5225,7 @@ class Processor(fb303.FacebookService.Pr
self._processMap["get_table_names_by_filter"] = Processor.process_get_table_names_by_filter
self._processMap["alter_table"] = Processor.process_alter_table
self._processMap["alter_table_with_environment_context"] = Processor.process_alter_table_with_environment_context
+ self._processMap["alter_table_with_cascade"] = Processor.process_alter_table_with_cascade
self._processMap["add_partition"] = Processor.process_add_partition
self._processMap["add_partition_with_environment_context"] = Processor.process_add_partition_with_environment_context
self._processMap["add_partitions"] = Processor.process_add_partitions
@@ -5692,6 +5741,22 @@ class Processor(fb303.FacebookService.Pr
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_alter_table_with_cascade(self, seqid, iprot, oprot):
+ args = alter_table_with_cascade_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = alter_table_with_cascade_result()
+ try:
+ self._handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade)
+ except InvalidOperationException as o1:
+ result.o1 = o1
+ except MetaException as o2:
+ result.o2 = o2
+ oprot.writeMessageBegin("alter_table_with_cascade", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_add_partition(self, seqid, iprot, oprot):
args = add_partition_args()
args.read(iprot)
@@ -11052,6 +11117,177 @@ class alter_table_with_environment_conte
def __ne__(self, other):
return not (self == other)
+class alter_table_with_cascade_args:
+ """
+ Attributes:
+ - dbname
+ - tbl_name
+ - new_tbl
+ - cascade
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'dbname', None, None, ), # 1
+ (2, TType.STRING, 'tbl_name', None, None, ), # 2
+ (3, TType.STRUCT, 'new_tbl', (Table, Table.thrift_spec), None, ), # 3
+ (4, TType.BOOL, 'cascade', None, None, ), # 4
+ )
+
+ def __init__(self, dbname=None, tbl_name=None, new_tbl=None, cascade=None,):
+ self.dbname = dbname
+ self.tbl_name = tbl_name
+ self.new_tbl = new_tbl
+ self.cascade = cascade
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.dbname = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.tbl_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.new_tbl = Table()
+ self.new_tbl.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.BOOL:
+ self.cascade = iprot.readBool();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('alter_table_with_cascade_args')
+ if self.dbname is not None:
+ oprot.writeFieldBegin('dbname', TType.STRING, 1)
+ oprot.writeString(self.dbname)
+ oprot.writeFieldEnd()
+ if self.tbl_name is not None:
+ oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
+ oprot.writeString(self.tbl_name)
+ oprot.writeFieldEnd()
+ if self.new_tbl is not None:
+ oprot.writeFieldBegin('new_tbl', TType.STRUCT, 3)
+ self.new_tbl.write(oprot)
+ oprot.writeFieldEnd()
+ if self.cascade is not None:
+ oprot.writeFieldBegin('cascade', TType.BOOL, 4)
+ oprot.writeBool(self.cascade)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class alter_table_with_cascade_result:
+ """
+ Attributes:
+ - o1
+ - o2
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'o1', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2
+ )
+
+ def __init__(self, o1=None, o2=None,):
+ self.o1 = o1
+ self.o2 = o2
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = InvalidOperationException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o2 = MetaException()
+ self.o2.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('alter_table_with_cascade_result')
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o2 is not None:
+ oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+ self.o2.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class add_partition_args:
"""
Attributes:
Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Tue Nov 18 20:14:29 2014
@@ -431,6 +431,22 @@ module ThriftHiveMetastore
return
end
+ def alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade)
+ send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade)
+ recv_alter_table_with_cascade()
+ end
+
+ def send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade)
+ send_message('alter_table_with_cascade', Alter_table_with_cascade_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl, :cascade => cascade)
+ end
+
+ def recv_alter_table_with_cascade()
+ result = receive_message(Alter_table_with_cascade_result)
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ return
+ end
+
def add_partition(new_part)
send_add_partition(new_part)
return recv_add_partition()
@@ -2299,6 +2315,19 @@ module ThriftHiveMetastore
write_result(result, oprot, 'alter_table_with_environment_context', seqid)
end
+ def process_alter_table_with_cascade(seqid, iprot, oprot)
+ args = read_args(iprot, Alter_table_with_cascade_args)
+ result = Alter_table_with_cascade_result.new()
+ begin
+ @handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade)
+ rescue ::InvalidOperationException => o1
+ result.o1 = o1
+ rescue ::MetaException => o2
+ result.o2 = o2
+ end
+ write_result(result, oprot, 'alter_table_with_cascade', seqid)
+ end
+
def process_add_partition(seqid, iprot, oprot)
args = read_args(iprot, Add_partition_args)
result = Add_partition_result.new()
@@ -4399,6 +4428,46 @@ module ThriftHiveMetastore
::Thrift::Struct.generate_accessors self
end
+ class Alter_table_with_cascade_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ DBNAME = 1
+ TBL_NAME = 2
+ NEW_TBL = 3
+ CASCADE = 4
+
+ FIELDS = {
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
+ TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
+ NEW_TBL => {:type => ::Thrift::Types::STRUCT, :name => 'new_tbl', :class => ::Table},
+ CASCADE => {:type => ::Thrift::Types::BOOL, :name => 'cascade'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Alter_table_with_cascade_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ O1 = 1
+ O2 = 2
+
+ FIELDS = {
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidOperationException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
class Add_partition_args
include ::Thrift::Struct, ::Thrift::Struct_Union
NEW_PART = 1
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java Tue Nov 18 20:14:29 2014
@@ -49,13 +49,38 @@ public interface AlterHandler extends Co
* @throws InvalidOperationException
* thrown if the newTable object is invalid
* @throws MetaException
- * thrown if there is any other erro
+ * thrown if there is any other error
*/
public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname,
String name, Table newTable) throws InvalidOperationException,
MetaException;
/**
+ * handles alter table, the changes could be cascaded to partitions if applicable
+ *
+ * @param msdb
+ * object to get metadata
+ * @param wh
+ * Hive Warehouse where table data is stored
+ * @param dbname
+ * database of the table being altered
+ * @param name
+ * original name of the table being altered. same as
+ * <i>newTable.tableName</i> if alter op is not a rename.
+ * @param newTable
+ * new table object
+ * @param cascade
+ * if the changes will be cascaded to its partitions if applicable
+ * @throws InvalidOperationException
+ * thrown if the newTable object is invalid
+ * @throws MetaException
+ * thrown if there is any other error
+ */
+ public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname,
+ String name, Table newTable, boolean cascade) throws InvalidOperationException,
+ MetaException;
+
+ /**
* handles alter partition
*
* @param msdb
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java Tue Nov 18 20:14:29 2014
@@ -63,6 +63,11 @@ public class HiveAlterHandler implements
public void alterTable(RawStore msdb, Warehouse wh, String dbname,
String name, Table newt) throws InvalidOperationException, MetaException {
+ alterTable(msdb, wh, dbname, name, newt, false);
+ }
+
+ public void alterTable(RawStore msdb, Warehouse wh, String dbname,
+ String name, Table newt, boolean cascade) throws InvalidOperationException, MetaException {
if (newt == null) {
throw new InvalidOperationException("New table is invalid: " + newt);
}
@@ -118,6 +123,19 @@ public class HiveAlterHandler implements
oldt.getSd().getCols(), newt.getSd().getCols());
}
+ if (cascade) {
+ //Currently only column related changes can be cascaded in alter table
+ if(MetaStoreUtils.isCascadeNeededInAlterTable(oldt, newt)) {
+ List<Partition> parts = msdb.getPartitions(dbname, name, -1);
+ for (Partition part : parts) {
+ part.getSd().setCols(newt.getSd().getCols());
+ msdb.alterPartition(dbname, name, part.getValues(), part);
+ }
+ } else {
+ LOG.warn("Alter table does not cascade changes to its partitions.");
+ }
+ }
+
//check that partition keys have not changed, except for virtual views
//however, allow the partition comments to change
boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(),
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Tue Nov 18 20:14:29 2014
@@ -3257,7 +3257,15 @@ public class HiveMetaStore extends Thrif
final Table newTable)
throws InvalidOperationException, MetaException {
// Do not set an environment context.
- alter_table_with_environment_context(dbname, name, newTable, null);
+ alter_table_core(dbname,name, newTable, null, false);
+ }
+
+ @Override
+ public void alter_table_with_cascade(final String dbname, final String name,
+ final Table newTable, final boolean cascade)
+ throws InvalidOperationException, MetaException {
+ // Do not set an environment context.
+ alter_table_core(dbname,name, newTable, null, cascade);
}
@Override
@@ -3265,6 +3273,12 @@ public class HiveMetaStore extends Thrif
final String name, final Table newTable,
final EnvironmentContext envContext)
throws InvalidOperationException, MetaException {
+ alter_table_core(dbname, name, newTable, envContext, false);
+ }
+
+ private void alter_table_core(final String dbname, final String name, final Table newTable,
+ final EnvironmentContext envContext, final boolean cascade)
+ throws InvalidOperationException, MetaException {
startFunction("alter_table", ": db=" + dbname + " tbl=" + name
+ " newtbl=" + newTable.getTableName());
@@ -3279,7 +3293,7 @@ public class HiveMetaStore extends Thrif
try {
Table oldt = get_table_core(dbname, name);
firePreEvent(new PreAlterTableEvent(oldt, newTable, this));
- alterHandler.alterTable(getMS(), wh, dbname, name, newTable);
+ alterHandler.alterTable(getMS(), wh, dbname, name, newTable, cascade);
success = true;
for (MetaStoreEventListener listener : listeners) {
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Tue Nov 18 20:14:29 2014
@@ -311,6 +311,12 @@ public class HiveMetaStoreClient impleme
alter_table(dbname, tbl_name, new_tbl, null);
}
+ @Override
+ public void alter_table(String dbname, String tbl_name, Table new_tbl, boolean cascade)
+ throws InvalidOperationException, MetaException, TException {
+ client.alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade);
+ }
+
public void alter_table(String dbname, String tbl_name, Table new_tbl,
EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext);
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Tue Nov 18 20:14:29 2014
@@ -616,6 +616,10 @@ public interface IMetaStoreClient {
void alter_table(String defaultDatabaseName, String tblName,
Table table) throws InvalidOperationException, MetaException, TException;
+ //alter_table_with_cascade
+ void alter_table(String defaultDatabaseName, String tblName, Table table,
+ boolean cascade) throws InvalidOperationException, MetaException, TException;
+
void createDatabase(Database db)
throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Tue Nov 18 20:14:29 2014
@@ -554,6 +554,26 @@ public class MetaStoreUtils {
}
}
+ static boolean isCascadeNeededInAlterTable(Table oldTable, Table newTable) {
+ List<FieldSchema> oldCols = oldTable.getSd().getCols();
+ List<FieldSchema> newCols = newTable.getSd().getCols();
+
+ //currently cascade only supports add/replace columns and
+ //changing column type/position/name/comments
+ if (oldCols.size() != newCols.size()) {
+ return true;
+ } else {
+ for (int i = 0; i < oldCols.size(); i++) {
+ FieldSchema oldCol = oldCols.get(i);
+ FieldSchema newCol = newCols.get(i);
+ if(!oldCol.equals(newCol)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
/**
* @return true if oldType and newType are compatible.
* Two types are compatible if we have internal functions to cast one to another.
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Tue Nov 18 20:14:29 2014
@@ -423,6 +423,9 @@ public enum ErrorMsg {
"sorted, table {0}", true),
ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED(10299,
"Alter table partition type {0} does not allow partial partition spec", true),
+ ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED(10300,
+ "Alter table partition type {0} does not support cascade", true),
+
//========================== 20000 range starts here ========================//
SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Tue Nov 18 20:14:29 2014
@@ -3281,7 +3281,7 @@ public class DDLTask extends Task<DDLWor
List<Partition> allPartitions = null;
if (alterTbl.getPartSpec() != null) {
- Map<String, String> partSpec = alterTbl.getPartSpec();
+ Map<String, String> partSpec = alterTbl.getPartSpec();
if (DDLSemanticAnalyzer.isFullSpec(tbl, partSpec)) {
allPartitions = new ArrayList<Partition>();
Partition part = db.getPartition(tbl, partSpec, false);
@@ -3321,7 +3321,7 @@ public class DDLTask extends Task<DDLWor
try {
if (allPartitions == null) {
- db.alterTable(alterTbl.getOldName(), tbl);
+ db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade());
} else {
db.alterPartitions(tbl.getTableName(), allPartitions);
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Tue Nov 18 20:14:29 2014
@@ -443,6 +443,11 @@ public class Hive {
*/
public void alterTable(String tblName, Table newTbl)
throws InvalidOperationException, HiveException {
+ alterTable(tblName, newTbl, false);
+ }
+
+ public void alterTable(String tblName, Table newTbl, boolean cascade)
+ throws InvalidOperationException, HiveException {
String[] names = Utilities.getDbTableName(tblName);
try {
// Remove the DDL_TIME so it gets refreshed
@@ -450,7 +455,7 @@ public class Hive {
newTbl.getParameters().remove(hive_metastoreConstants.DDL_TIME);
}
newTbl.checkValidity();
- getMSC().alter_table(names[0], names[1], newTbl.getTTable());
+ getMSC().alter_table(names[0], names[1], newTbl.getTTable(), cascade);
} catch (MetaException e) {
throw new HiveException("Unable to alter table. " + e.getMessage(), e);
} catch (TException e) {
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java Tue Nov 18 20:14:29 2014
@@ -227,6 +227,18 @@ public class SessionHiveMetaStoreClient
@Override
public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl,
+ boolean cascade) throws InvalidOperationException, MetaException, TException {
+ org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name);
+ if (old_tbl != null) {
+ //actually temp table does not support partitions, cascade is not applicable here
+ alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null);
+ return;
+ }
+ super.alter_table(dbname, tbl_name, new_tbl, cascade);
+ }
+
+ @Override
+ public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl,
EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
// First try temp table
org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name);
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Tue Nov 18 20:14:29 2014
@@ -1369,20 +1369,38 @@ public class DDLSemanticAnalyzer extends
private void addInputsOutputsAlterTable(String tableName, Map<String, String> partSpec)
throws SemanticException {
- addInputsOutputsAlterTable(tableName, partSpec, null);
+ addInputsOutputsAlterTable(tableName, partSpec, null, false);
}
private void addInputsOutputsAlterTable(String tableName, Map<String, String> partSpec,
- AlterTableDesc desc) throws SemanticException {
+ AlterTableDesc desc)throws SemanticException {
+ addInputsOutputsAlterTable(tableName, partSpec, desc, false);
+ }
+
+ private void addInputsOutputsAlterTable(String tableName, Map<String, String> partSpec,
+ AlterTableDesc desc, boolean isCascade) throws SemanticException {
+ boolean alterPartitions = partSpec != null && !partSpec.isEmpty();
+ //cascade only occurs at table level then cascade to partition level
+ if (isCascade && alterPartitions) {
+ throw new SemanticException(
+ ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, desc.getOp().name());
+ }
+
Table tab = getTable(tableName, true);
// Determine the lock type to acquire
WriteEntity.WriteType writeType = desc == null ? WriteEntity.WriteType.DDL_EXCLUSIVE :
WriteEntity.determineAlterTableWriteType(desc.getOp());
- if (partSpec == null || partSpec.isEmpty()) {
+
+ if (!alterPartitions) {
inputs.add(new ReadEntity(tab));
outputs.add(new WriteEntity(tab, writeType));
- }
- else {
+ //do not need the lock for partitions since they are covered by the table lock
+ if (isCascade) {
+ for (Partition part : getPartitions(tab, partSpec, false)) {
+ outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
+ }
+ }
+ } else {
ReadEntity re = new ReadEntity(tab);
// In the case of altering a table for its partitions we don't need to lock the table
// itself, just the partitions. But the table will have a ReadEntity. So mark that
@@ -2495,32 +2513,36 @@ public class DDLSemanticAnalyzer extends
private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast,
HashMap<String, String> partSpec) throws SemanticException {
String newComment = null;
- String newType = null;
- newType = getTypeStringFromAST((ASTNode) ast.getChild(2));
boolean first = false;
String flagCol = null;
- ASTNode positionNode = null;
- if (ast.getChildCount() == 5) {
- newComment = unescapeSQLString(ast.getChild(3).getText());
- positionNode = (ASTNode) ast.getChild(4);
- } else if (ast.getChildCount() == 4) {
- if (ast.getChild(3).getType() == HiveParser.StringLiteral) {
- newComment = unescapeSQLString(ast.getChild(3).getText());
- } else {
- positionNode = (ASTNode) ast.getChild(3);
- }
- }
-
- if (positionNode != null) {
- if (positionNode.getChildCount() == 0) {
- first = true;
- } else {
- flagCol = unescapeIdentifier(positionNode.getChild(0).getText());
- }
- }
-
+ boolean isCascade = false;
+ //col_old_name col_new_name column_type [COMMENT col_comment] [FIRST|AFTER column_name] [CASCADE|RESTRICT]
String oldColName = ast.getChild(0).getText();
String newColName = ast.getChild(1).getText();
+ String newType = getTypeStringFromAST((ASTNode) ast.getChild(2));
+ int childCount = ast.getChildCount();
+ for (int i = 3; i < childCount; i++) {
+ ASTNode child = (ASTNode)ast.getChild(i);
+ switch (child.getToken().getType()) {
+ case HiveParser.StringLiteral:
+ newComment = unescapeSQLString(child.getText());
+ break;
+ case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
+ flagCol = unescapeIdentifier(child.getChild(0).getText());
+ break;
+ case HiveParser.KW_FIRST:
+ first = true;
+ break;
+ case HiveParser.TOK_CASCADE:
+ isCascade = true;
+ break;
+ case HiveParser.TOK_RESTRICT:
+ break;
+ default:
+ throw new SemanticException("Unsupported token: " + child.getToken()
+ + " for alter table");
+ }
+ }
/* Validate the operation of renaming a column name. */
Table tab = getTable(qualified);
@@ -2536,8 +2558,8 @@ public class DDLSemanticAnalyzer extends
String tblName = getDotName(qualified);
AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec,
unescapeIdentifier(oldColName), unescapeIdentifier(newColName),
- newType, newComment, first, flagCol);
- addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc);
+ newType, newComment, first, flagCol, isCascade);
+ addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc, isCascade);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc), conf));
@@ -2585,10 +2607,15 @@ public class DDLSemanticAnalyzer extends
String tblName = getDotName(qualified);
List<FieldSchema> newCols = getColumns((ASTNode) ast.getChild(0));
+ boolean isCascade = false;
+ if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) {
+ isCascade = true;
+ }
+
AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec, newCols,
- alterType);
+ alterType, isCascade);
- addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc);
+ addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc, isCascade);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc), conf));
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Tue Nov 18 20:14:29 2014
@@ -1043,16 +1043,16 @@ alterStatementSuffixRename[boolean table
alterStatementSuffixAddCol
@init { pushMsg("add column statement", state); }
@after { popMsg(state); }
- : (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN
- -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS columnNameTypeList)
- -> ^(TOK_ALTERTABLE_REPLACECOLS columnNameTypeList)
+ : (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN restrictOrCascade?
+ -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS columnNameTypeList restrictOrCascade?)
+ -> ^(TOK_ALTERTABLE_REPLACECOLS columnNameTypeList restrictOrCascade?)
;
alterStatementSuffixRenameCol
@init { pushMsg("rename column name", state); }
@after { popMsg(state); }
- : KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition?
- ->^(TOK_ALTERTABLE_RENAMECOL $oldName $newName colType $comment? alterStatementChangeColPosition?)
+ : KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition? restrictOrCascade?
+ ->^(TOK_ALTERTABLE_RENAMECOL $oldName $newName colType $comment? alterStatementChangeColPosition? restrictOrCascade?)
;
alterStatementSuffixUpdateStatsCol
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java?rev=1640418&r1=1640417&r2=1640418&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java Tue Nov 18 20:14:29 2014
@@ -112,6 +112,7 @@ public class AlterTableDesc extends DDLD
Table table;
boolean isDropIfExists = false;
boolean isTurnOffSorting = false;
+ boolean isCascade = false;
public AlterTableDesc() {
}
@@ -127,8 +128,8 @@ public class AlterTableDesc extends DDLD
* @param newType
*/
public AlterTableDesc(String tblName, HashMap<String, String> partSpec,
- String oldColName, String newColName,
- String newType, String newComment, boolean first, String afterCol) {
+ String oldColName, String newColName, String newType, String newComment,
+ boolean first, String afterCol, boolean isCascade) {
super();
oldName = tblName;
this.partSpec = partSpec;
@@ -139,6 +140,7 @@ public class AlterTableDesc extends DDLD
this.first = first;
this.afterCol = afterCol;
op = AlterTableTypes.RENAMECOLUMN;
+ this.isCascade = isCascade;
}
/**
@@ -161,11 +163,12 @@ public class AlterTableDesc extends DDLD
* new columns to be added
*/
public AlterTableDesc(String name, HashMap<String, String> partSpec, List<FieldSchema> newCols,
- AlterTableTypes alterType) {
+ AlterTableTypes alterType, boolean isCascade) {
op = alterType;
oldName = name;
this.newCols = new ArrayList<FieldSchema>(newCols);
this.partSpec = partSpec;
+ this.isCascade = isCascade;
}
/**
@@ -720,6 +723,13 @@ public class AlterTableDesc extends DDLD
return isDropIfExists;
}
+ /**
+ * @return isCascade
+ */
+ public boolean getIsCascade() {
+ return isCascade;
+ }
+
public static boolean doesAlterTableTypeSupportPartialPartitionSpec(AlterTableTypes type) {
return alterTableTypesWithPartialSpec.contains(type);
}
Added: hive/trunk/ql/src/test/queries/clientpositive/alter_table_cascade.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/alter_table_cascade.q?rev=1640418&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/alter_table_cascade.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/alter_table_cascade.q Tue Nov 18 20:14:29 2014
@@ -0,0 +1,137 @@
+SET hive.exec.dynamic.partition = true;
+SET hive.exec.dynamic.partition.mode = nonstrict;
+
+-- SORT_QUERY_RESULTS
+
+drop table if exists alter_table_src;
+drop table if exists alter_table_cascade;
+
+create table alter_table_src(c1 string, c2 string);
+load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_src;
+
+create table alter_table_cascade (c1 string) partitioned by (p1 string, p2 string);
+
+insert overwrite table alter_table_cascade partition (p1, p2)
+ select c1, 'abc', '123' from alter_table_src
+ union all
+ select c1, null, '123' from alter_table_src;
+
+show partitions alter_table_cascade;
+describe alter_table_cascade;
+describe alter_table_cascade partition (p1='abc', p2='123');
+describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+select * from alter_table_cascade where p1='abc';
+select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__';
+
+-- add columns c2 by replace columns (for HIVE-6131)
+-- reload data to existing partition __HIVE_DEFAULT_PARTITION__
+-- load data to a new partition xyz
+-- querying data (form new or existing partition) should return non-NULL values for the new column
+alter table alter_table_cascade replace columns (c1 string, c2 string) cascade;
+load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__',p2='123');
+load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_cascade partition (p1='xyz', p2='123');
+describe alter_table_cascade;
+describe alter_table_cascade partition (p1='xyz', p2='123');
+describe alter_table_cascade partition (p1='abc', p2='123');
+describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+select * from alter_table_cascade where p1='xyz';
+select * from alter_table_cascade where p1='abc';
+select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__';
+
+-- Change c2 to decimal(10,0), the change should cascaded to all partitions
+-- the c2 value returned should be in decimal(10,0)
+alter table alter_table_cascade change c2 c2 decimal(10,0) comment "change datatype" cascade;
+describe alter_table_cascade;
+describe alter_table_cascade partition (p1='xyz', p2='123');
+describe alter_table_cascade partition (p1='abc', p2='123');
+describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+select * from alter_table_cascade where p1='xyz';
+select * from alter_table_cascade where p1='abc';
+select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__';
+
+-- rename c1 to c2fromc1 and move it to after c2, the change should cascaded to all partitions
+alter table alter_table_cascade change c1 c2fromc1 string comment "change position after" after c2 cascade;
+describe alter_table_cascade;
+describe alter_table_cascade partition (p1='xyz', p2='123');
+describe alter_table_cascade partition (p1='abc', p2='123');
+describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+
+-- rename c2fromc1 back to c1 and move to first as c1, the change should cascaded to all partitions
+alter table alter_table_cascade change c2fromc1 c1 string comment "change position first" first cascade;
+describe alter_table_cascade;
+describe alter_table_cascade partition (p1='xyz', p2='123');
+describe alter_table_cascade partition (p1='abc', p2='123');
+describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+
+-- Try out replace columns, the change should cascaded to all partitions
+alter table alter_table_cascade replace columns (c1 string) cascade;
+describe alter_table_cascade;
+describe alter_table_cascade partition (p1='xyz', p2='123');
+describe alter_table_cascade partition (p1='abc', p2='123');
+describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+select * from alter_table_cascade where p1='xyz';
+select * from alter_table_cascade where p1='abc';
+select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__';
+
+-- Try add columns, the change should cascaded to all partitions
+alter table alter_table_cascade add columns (c2 decimal(14,4)) cascade;
+describe alter_table_cascade;
+describe alter_table_cascade partition (p1='xyz', p2='123');
+describe alter_table_cascade partition (p1='abc', p2='123');
+describe alter_table_cascade partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+select * from alter_table_cascade where p1='xyz';
+select * from alter_table_cascade where p1='abc';
+select * from alter_table_cascade where p1='__HIVE_DEFAULT_PARTITION__';
+
+--
+
+drop table if exists alter_table_restrict;
+
+create table alter_table_restrict (c1 string) partitioned by (p1 string, p2 string);
+insert overwrite table alter_table_restrict partition (p1, p2)
+ select c1, 'abc', '123' from alter_table_src
+ union all
+ select c1, null, '123' from alter_table_src;
+
+show partitions alter_table_restrict;
+describe alter_table_restrict;
+describe alter_table_restrict partition (p1='abc', p2='123');
+describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+select * from alter_table_restrict where p1='abc';
+select * from alter_table_restrict where p1='__HIVE_DEFAULT_PARTITION__';
+
+-- add columns c2 by replace columns (for HIVE-6131) without cascade
+-- only table column definition has changed, partitions do not
+-- after replace, only new partition xyz return the value to new added columns but not existing partitions abc and __HIVE_DEFAULT_PARTITION__
+alter table alter_table_restrict replace columns (c1 string, c2 string) restrict;
+load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='abc', p2='123');
+load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__',p2='123');
+load data local inpath '../../data/files/dec.txt' overwrite into table alter_table_restrict partition (p1='xyz', p2='123');
+describe alter_table_restrict;
+describe alter_table_restrict partition (p1='xyz', p2='123');
+describe alter_table_restrict partition (p1='abc', p2='123');
+describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+select * from alter_table_restrict where p1='xyz';
+select * from alter_table_restrict where p1='abc';
+select * from alter_table_restrict where p1='__HIVE_DEFAULT_PARTITION__';
+
+-- Change c2 to decimal(10,0), only limited to table and new partition
+alter table alter_table_restrict change c2 c2 decimal(10,0) restrict;
+describe alter_table_restrict;
+describe alter_table_restrict partition (p1='xyz', p2='123');
+describe alter_table_restrict partition (p1='abc', p2='123');
+describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+
+-- Try out replace columns, only limited to table and new partition
+alter table alter_table_restrict replace columns (c1 string);
+describe alter_table_restrict;
+describe alter_table_restrict partition (p1='xyz', p2='123');
+describe alter_table_restrict partition (p1='abc', p2='123');
+describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');
+
+-- Try add columns, only limited to table and new partition
+alter table alter_table_restrict add columns (c2 decimal(14,4));
+describe alter_table_restrict;
+describe alter_table_restrict partition (p1='xyz', p2='123');
+describe alter_table_restrict partition (p1='abc', p2='123');
+describe alter_table_restrict partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123');