You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jv...@apache.org on 2010/09/21 23:13:03 UTC
svn commit: r999644 [1/2] - in /hadoop/hive/trunk: ./ metastore/
metastore/if/ metastore/src/gen-cpp/
metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/
metastore/src/gen-php/ metastore/src/gen-py/hive_metastore/
metastore/src/java/org/ap...
Author: jvs
Date: Tue Sep 21 21:13:02 2010
New Revision: 999644
URL: http://svn.apache.org/viewvc?rev=999644&view=rev
Log:
HIVE-1609. Support partition filtering in metastore
(Ajay Kidave via jvs)
Added:
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java
Modified:
hadoop/hive/trunk/CHANGES.txt
hadoop/hive/trunk/metastore/build.xml
hadoop/hive/trunk/metastore/if/hive_metastore.thrift
hadoop/hive/trunk/metastore/ivy.xml
hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp
hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h
hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php
hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote
hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
hadoop/hive/trunk/metastore/src/model/package.jdo
hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Tue Sep 21 21:13:02 2010
@@ -74,6 +74,9 @@ Trunk - Unreleased
HIVE-1616. Add ProtocolsBufferStructObjectInspector
(Johan Oskarsson via namit)
+ HIVE-1609. Support partition filtering in metastore
+ (Ajay Kidave via jvs)
+
IMPROVEMENTS
HIVE-1394. Do not update transient_lastDdlTime if the partition is modified by a housekeeping
Modified: hadoop/hive/trunk/metastore/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/build.xml?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/build.xml (original)
+++ hadoop/hive/trunk/metastore/build.xml Tue Sep 21 21:13:02 2010
@@ -29,11 +29,29 @@
</exec>
</target>
- <target name="core-compile" depends="init, model-compile,ivy-retrieve">
+ <uptodate property="grammarBuild.notRequired">
+ <srcfiles dir= "${src.dir}/java/org/apache/hadoop/hive/metastore/parser" includes="**/*.g"/>
+ <mapper type="merge" to="${build.dir}/gen-java/org/apache/hadoop/hive/metastore/parser/FilterParser.java"/>
+ </uptodate>
+
+ <target name="build-grammar" unless="grammarBuild.notRequired">
+ <echo>Building Grammar ${src.dir}/java/org/apache/hadoop/hive/metastore/parser/Filter.g ....</echo>
+ <java classname="org.antlr.Tool" classpathref="classpath" fork="false">
+ <arg value="-fo" />
+ <arg value="${build.dir.hive}/metastore/gen-java/org/apache/hadoop/hive/metastore/parser" />
+ <arg value="${src.dir}/java/org/apache/hadoop/hive/metastore/parser/Filter.g" />
+ </java>
+ </target>
+
+ <target name="metastore-init">
+ <mkdir dir="${build.dir}/gen-java/org/apache/hadoop/hive/metastore/parser"/>
+ </target>
+
+ <target name="core-compile" depends="init,metastore-init,build-grammar,model-compile,ivy-retrieve">
<echo message="Compiling: "/>
<javac
encoding="${build.encoding}"
- srcdir="${src.dir}/java:${src.dir}/gen-javabean"
+ srcdir="${src.dir}/java:${src.dir}/gen-javabean:${build.dir}/gen-java"
includes="**/*.java"
destdir="${build.classes}"
debug="${javac.debug}"
Modified: hadoop/hive/trunk/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/if/hive_metastore.thrift?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/if/hive_metastore.thrift (original)
+++ hadoop/hive/trunk/metastore/if/hive_metastore.thrift Tue Sep 21 21:13:02 2010
@@ -228,6 +228,11 @@ service ThriftHiveMetastore extends fb30
2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1)
throws(1:MetaException o1)
+ // get the partitions matching the given partition filter
+ list<Partition> get_partitions_by_filter(1:string db_name 2:string tbl_name
+ 3:string filter, 4:i16 max_parts=-1)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
// changes the partition to the new partition object. partition is identified from the part values
// in the new_part
void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part)
Modified: hadoop/hive/trunk/metastore/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/ivy.xml?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/ivy.xml (original)
+++ hadoop/hive/trunk/metastore/ivy.xml Tue Sep 21 21:13:02 2010
@@ -9,7 +9,7 @@
<exclude org="org.apache.geronimo.specs" module="geronimo-jta_1.1_spec"/>
</dependency>
<dependency org="commons-pool" name="commons-pool" rev="1.5.4"/>
- <dependency org="org.datanucleus" name="datanucleus-connectionpool" rev="2.0.1">
+ <dependency org="org.datanucleus" name="datanucleus-connectionpool" rev="2.0.3">
<exclude module="proxool" />
<exclude module="c3p0" />
<exclude module="datanucleus-core" />
@@ -17,8 +17,9 @@
<exclude module="commons-collections" />
<exclude module="commons-pool" />
<exclude module="commons-dbcp" />
+ <exclude org="com.jolbox" module="bonecp"/>
</dependency>
- <dependency org="org.datanucleus" name="datanucleus-core" rev="2.0.3">
+ <dependency org="org.datanucleus" name="datanucleus-core" rev="2.1.1">
<exclude org="javax.jdo" module="jdo2-api"/>
<exclude org="org.apache.geronimo.specs" module="geronimo-jta_1.1_spec"/>
<exclude org="org.eclipse.equinox" module="org.eclipse.equinox.registry"/>
@@ -27,13 +28,13 @@
<exclude org="org.eclipse.osgi" module="org.eclipse.osgi"/>
<exclude org="log4j" module="log4j"/>
</dependency>
- <dependency org="org.datanucleus" name="datanucleus-enhancer" rev="2.0.3">
+ <dependency org="org.datanucleus" name="datanucleus-enhancer" rev="2.1.0-release">
<exclude org="org.datanucleus" module="datanucleus-core"/>
<exclude org="javax.jdo" module="jdo2-api"/>
<exclude org="asm" module="asm"/>
<exclude org="org.apache.ant" module="ant"/>
</dependency>
- <dependency org="org.datanucleus" name="datanucleus-rdbms" rev="2.0.3">
+ <dependency org="org.datanucleus" name="datanucleus-rdbms" rev="2.1.1">
<exclude org="org.datanucleus" module="datanucleus-core"/>
<exclude org="javax.jdo" module="jdo2-api"/>
<exclude org="org.apache.geronimo.specs" module="geronimo-jta_1.1_spec"/>
@@ -41,7 +42,7 @@
<exclude org="org.apache.ant" module="ant"/>
<exclude org="oracle" module="ojdbc14_g"/>
</dependency>
- <dependency org="javax.jdo" name="jdo2-api" rev="2.3-ec">
+ <dependency org="javax.jdo" name="jdo-api" rev="3.0">
<exclude org="javax.transaction" module="jta"/>
<exclude org="org.apache.ant" module="ant"/>
<exclude org="org.apache.geronimo.specs" module="geronimo-jpa_3.0_spec"/>
Modified: hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp (original)
+++ hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp Tue Sep 21 21:13:02 2010
@@ -6438,6 +6438,278 @@ uint32_t ThriftHiveMetastore_get_partiti
return xfer;
}
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->db_name);
+ this->__isset.db_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->tbl_name);
+ this->__isset.tbl_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->filter);
+ this->__isset.filter = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == apache::thrift::protocol::T_I16) {
+ xfer += iprot->readI16(this->max_parts);
+ this->__isset.max_parts = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_args");
+ xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->db_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->tbl_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("filter", apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->filter);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("max_parts", apache::thrift::protocol::T_I16, 4);
+ xfer += oprot->writeI16(this->max_parts);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_pargs");
+ xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->db_name)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString((*(this->tbl_name)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("filter", apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString((*(this->filter)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("max_parts", apache::thrift::protocol::T_I16, 4);
+ xfer += oprot->writeI16((*(this->max_parts)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size250;
+ apache::thrift::protocol::TType _etype253;
+ iprot->readListBegin(_etype253, _size250);
+ this->success.resize(_size250);
+ uint32_t _i254;
+ for (_i254 = 0; _i254 < _size250; ++_i254)
+ {
+ xfer += this->success[_i254].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size());
+ std::vector<Partition> ::const_iterator _iter255;
+ for (_iter255 = this->success.begin(); _iter255 != this->success.end(); ++_iter255)
+ {
+ xfer += (*_iter255).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o1) {
+ xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->o1.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o2) {
+ xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->o2.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size256;
+ apache::thrift::protocol::TType _etype259;
+ iprot->readListBegin(_etype259, _size256);
+ (*(this->success)).resize(_size256);
+ uint32_t _i260;
+ for (_i260 = 0; _i260 < _size256; ++_i260)
+ {
+ xfer += (*(this->success))[_i260].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
uint32_t ThriftHiveMetastore_alter_partition_args::read(apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
@@ -6922,14 +7194,14 @@ uint32_t ThriftHiveMetastore_partition_n
if (ftype == apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size250;
- apache::thrift::protocol::TType _etype253;
- iprot->readListBegin(_etype253, _size250);
- this->success.resize(_size250);
- uint32_t _i254;
- for (_i254 = 0; _i254 < _size250; ++_i254)
+ uint32_t _size261;
+ apache::thrift::protocol::TType _etype264;
+ iprot->readListBegin(_etype264, _size261);
+ this->success.resize(_size261);
+ uint32_t _i265;
+ for (_i265 = 0; _i265 < _size261; ++_i265)
{
- xfer += iprot->readString(this->success[_i254]);
+ xfer += iprot->readString(this->success[_i265]);
}
iprot->readListEnd();
}
@@ -6968,10 +7240,10 @@ uint32_t ThriftHiveMetastore_partition_n
xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size());
- std::vector<std::string> ::const_iterator _iter255;
- for (_iter255 = this->success.begin(); _iter255 != this->success.end(); ++_iter255)
+ std::vector<std::string> ::const_iterator _iter266;
+ for (_iter266 = this->success.begin(); _iter266 != this->success.end(); ++_iter266)
{
- xfer += oprot->writeString((*_iter255));
+ xfer += oprot->writeString((*_iter266));
}
xfer += oprot->writeListEnd();
}
@@ -7010,14 +7282,14 @@ uint32_t ThriftHiveMetastore_partition_n
if (ftype == apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size256;
- apache::thrift::protocol::TType _etype259;
- iprot->readListBegin(_etype259, _size256);
- (*(this->success)).resize(_size256);
- uint32_t _i260;
- for (_i260 = 0; _i260 < _size256; ++_i260)
+ uint32_t _size267;
+ apache::thrift::protocol::TType _etype270;
+ iprot->readListBegin(_etype270, _size267);
+ (*(this->success)).resize(_size267);
+ uint32_t _i271;
+ for (_i271 = 0; _i271 < _size267; ++_i271)
{
- xfer += iprot->readString((*(this->success))[_i260]);
+ xfer += iprot->readString((*(this->success))[_i271]);
}
iprot->readListEnd();
}
@@ -7132,17 +7404,17 @@ uint32_t ThriftHiveMetastore_partition_n
if (ftype == apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size261;
- apache::thrift::protocol::TType _ktype262;
- apache::thrift::protocol::TType _vtype263;
- iprot->readMapBegin(_ktype262, _vtype263, _size261);
- uint32_t _i265;
- for (_i265 = 0; _i265 < _size261; ++_i265)
+ uint32_t _size272;
+ apache::thrift::protocol::TType _ktype273;
+ apache::thrift::protocol::TType _vtype274;
+ iprot->readMapBegin(_ktype273, _vtype274, _size272);
+ uint32_t _i276;
+ for (_i276 = 0; _i276 < _size272; ++_i276)
{
- std::string _key266;
- xfer += iprot->readString(_key266);
- std::string& _val267 = this->success[_key266];
- xfer += iprot->readString(_val267);
+ std::string _key277;
+ xfer += iprot->readString(_key277);
+ std::string& _val278 = this->success[_key277];
+ xfer += iprot->readString(_val278);
}
iprot->readMapEnd();
}
@@ -7181,11 +7453,11 @@ uint32_t ThriftHiveMetastore_partition_n
xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(apache::thrift::protocol::T_STRING, apache::thrift::protocol::T_STRING, this->success.size());
- std::map<std::string, std::string> ::const_iterator _iter268;
- for (_iter268 = this->success.begin(); _iter268 != this->success.end(); ++_iter268)
+ std::map<std::string, std::string> ::const_iterator _iter279;
+ for (_iter279 = this->success.begin(); _iter279 != this->success.end(); ++_iter279)
{
- xfer += oprot->writeString(_iter268->first);
- xfer += oprot->writeString(_iter268->second);
+ xfer += oprot->writeString(_iter279->first);
+ xfer += oprot->writeString(_iter279->second);
}
xfer += oprot->writeMapEnd();
}
@@ -7224,17 +7496,17 @@ uint32_t ThriftHiveMetastore_partition_n
if (ftype == apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size269;
- apache::thrift::protocol::TType _ktype270;
- apache::thrift::protocol::TType _vtype271;
- iprot->readMapBegin(_ktype270, _vtype271, _size269);
- uint32_t _i273;
- for (_i273 = 0; _i273 < _size269; ++_i273)
+ uint32_t _size280;
+ apache::thrift::protocol::TType _ktype281;
+ apache::thrift::protocol::TType _vtype282;
+ iprot->readMapBegin(_ktype281, _vtype282, _size280);
+ uint32_t _i284;
+ for (_i284 = 0; _i284 < _size280; ++_i284)
{
- std::string _key274;
- xfer += iprot->readString(_key274);
- std::string& _val275 = (*(this->success))[_key274];
- xfer += iprot->readString(_val275);
+ std::string _key285;
+ xfer += iprot->readString(_key285);
+ std::string& _val286 = (*(this->success))[_key285];
+ xfer += iprot->readString(_val286);
}
iprot->readMapEnd();
}
@@ -8075,14 +8347,14 @@ uint32_t ThriftHiveMetastore_get_indexes
if (ftype == apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size276;
- apache::thrift::protocol::TType _etype279;
- iprot->readListBegin(_etype279, _size276);
- this->success.resize(_size276);
- uint32_t _i280;
- for (_i280 = 0; _i280 < _size276; ++_i280)
+ uint32_t _size287;
+ apache::thrift::protocol::TType _etype290;
+ iprot->readListBegin(_etype290, _size287);
+ this->success.resize(_size287);
+ uint32_t _i291;
+ for (_i291 = 0; _i291 < _size287; ++_i291)
{
- xfer += this->success[_i280].read(iprot);
+ xfer += this->success[_i291].read(iprot);
}
iprot->readListEnd();
}
@@ -8129,10 +8401,10 @@ uint32_t ThriftHiveMetastore_get_indexes
xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRUCT, this->success.size());
- std::vector<Index> ::const_iterator _iter281;
- for (_iter281 = this->success.begin(); _iter281 != this->success.end(); ++_iter281)
+ std::vector<Index> ::const_iterator _iter292;
+ for (_iter292 = this->success.begin(); _iter292 != this->success.end(); ++_iter292)
{
- xfer += (*_iter281).write(oprot);
+ xfer += (*_iter292).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8175,14 +8447,14 @@ uint32_t ThriftHiveMetastore_get_indexes
if (ftype == apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size282;
- apache::thrift::protocol::TType _etype285;
- iprot->readListBegin(_etype285, _size282);
- (*(this->success)).resize(_size282);
- uint32_t _i286;
- for (_i286 = 0; _i286 < _size282; ++_i286)
+ uint32_t _size293;
+ apache::thrift::protocol::TType _etype296;
+ iprot->readListBegin(_etype296, _size293);
+ (*(this->success)).resize(_size293);
+ uint32_t _i297;
+ for (_i297 = 0; _i297 < _size293; ++_i297)
{
- xfer += (*(this->success))[_i286].read(iprot);
+ xfer += (*(this->success))[_i297].read(iprot);
}
iprot->readListEnd();
}
@@ -8333,14 +8605,14 @@ uint32_t ThriftHiveMetastore_get_index_n
if (ftype == apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size287;
- apache::thrift::protocol::TType _etype290;
- iprot->readListBegin(_etype290, _size287);
- this->success.resize(_size287);
- uint32_t _i291;
- for (_i291 = 0; _i291 < _size287; ++_i291)
+ uint32_t _size298;
+ apache::thrift::protocol::TType _etype301;
+ iprot->readListBegin(_etype301, _size298);
+ this->success.resize(_size298);
+ uint32_t _i302;
+ for (_i302 = 0; _i302 < _size298; ++_i302)
{
- xfer += iprot->readString(this->success[_i291]);
+ xfer += iprot->readString(this->success[_i302]);
}
iprot->readListEnd();
}
@@ -8379,10 +8651,10 @@ uint32_t ThriftHiveMetastore_get_index_n
xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(apache::thrift::protocol::T_STRING, this->success.size());
- std::vector<std::string> ::const_iterator _iter292;
- for (_iter292 = this->success.begin(); _iter292 != this->success.end(); ++_iter292)
+ std::vector<std::string> ::const_iterator _iter303;
+ for (_iter303 = this->success.begin(); _iter303 != this->success.end(); ++_iter303)
{
- xfer += oprot->writeString((*_iter292));
+ xfer += oprot->writeString((*_iter303));
}
xfer += oprot->writeListEnd();
}
@@ -8421,14 +8693,14 @@ uint32_t ThriftHiveMetastore_get_index_n
if (ftype == apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size293;
- apache::thrift::protocol::TType _etype296;
- iprot->readListBegin(_etype296, _size293);
- (*(this->success)).resize(_size293);
- uint32_t _i297;
- for (_i297 = 0; _i297 < _size293; ++_i297)
+ uint32_t _size304;
+ apache::thrift::protocol::TType _etype307;
+ iprot->readListBegin(_etype307, _size304);
+ (*(this->success)).resize(_size304);
+ uint32_t _i308;
+ for (_i308 = 0; _i308 < _size304; ++_i308)
{
- xfer += iprot->readString((*(this->success))[_i297]);
+ xfer += iprot->readString((*(this->success))[_i308]);
}
iprot->readListEnd();
}
@@ -10318,6 +10590,75 @@ void ThriftHiveMetastoreClient::recv_get
throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result");
}
+void ThriftHiveMetastoreClient::get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts)
+{
+ send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts);
+ recv_get_partitions_by_filter(_return);
+}
+
+void ThriftHiveMetastoreClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get_partitions_by_filter", apache::thrift::protocol::T_CALL, cseqid);
+
+ ThriftHiveMetastore_get_partitions_by_filter_pargs args;
+ args.db_name = &db_name;
+ args.tbl_name = &tbl_name;
+ args.filter = &filter;
+ args.max_parts = &max_parts;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->flush();
+ oprot_->getTransport()->writeEnd();
+}
+
+void ThriftHiveMetastoreClient::recv_get_partitions_by_filter(std::vector<Partition> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == apache::thrift::protocol::T_EXCEPTION) {
+ apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE);
+ }
+ if (fname.compare("get_partitions_by_filter") != 0) {
+ iprot_->skip(apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME);
+ }
+ ThriftHiveMetastore_get_partitions_by_filter_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.o1) {
+ throw result.o1;
+ }
+ if (result.__isset.o2) {
+ throw result.o2;
+ }
+ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_filter failed: unknown result");
+}
+
void ThriftHiveMetastoreClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part)
{
send_alter_partition(db_name, tbl_name, new_part);
@@ -11900,6 +12241,40 @@ void ThriftHiveMetastoreProcessor::proce
oprot->getTransport()->writeEnd();
}
+void ThriftHiveMetastoreProcessor::process_get_partitions_by_filter(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot)
+{
+ ThriftHiveMetastore_get_partitions_by_filter_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ iprot->getTransport()->readEnd();
+
+ ThriftHiveMetastore_get_partitions_by_filter_result result;
+ try {
+ iface_->get_partitions_by_filter(result.success, args.db_name, args.tbl_name, args.filter, args.max_parts);
+ result.__isset.success = true;
+ } catch (MetaException &o1) {
+ result.o1 = o1;
+ result.__isset.o1 = true;
+ } catch (NoSuchObjectException &o2) {
+ result.o2 = o2;
+ result.__isset.o2 = true;
+ } catch (const std::exception& e) {
+ apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get_partitions_by_filter", apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->flush();
+ oprot->getTransport()->writeEnd();
+ return;
+ }
+
+ oprot->writeMessageBegin("get_partitions_by_filter", apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->flush();
+ oprot->getTransport()->writeEnd();
+}
+
void ThriftHiveMetastoreProcessor::process_alter_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot)
{
ThriftHiveMetastore_alter_partition_args args;
Modified: hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h (original)
+++ hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h Tue Sep 21 21:13:02 2010
@@ -43,6 +43,7 @@ class ThriftHiveMetastoreIf : virtual pu
virtual void get_partition_names(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0;
virtual void get_partitions_ps(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts) = 0;
virtual void get_partition_names_ps(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts) = 0;
+ virtual void get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) = 0;
virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0;
virtual void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) = 0;
virtual void partition_name_to_vals(std::vector<std::string> & _return, const std::string& part_name) = 0;
@@ -145,6 +146,9 @@ class ThriftHiveMetastoreNull : virtual
void get_partition_names_ps(std::vector<std::string> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const int16_t /* max_parts */) {
return;
}
+ void get_partitions_by_filter(std::vector<Partition> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* filter */, const int16_t /* max_parts */) {
+ return;
+ }
void alter_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */) {
return;
}
@@ -3253,6 +3257,126 @@ class ThriftHiveMetastore_get_partition_
};
+class ThriftHiveMetastore_get_partitions_by_filter_args {
+ public:
+
+ ThriftHiveMetastore_get_partitions_by_filter_args() : db_name(""), tbl_name(""), filter(""), max_parts(-1) {
+ }
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_filter_args() throw() {}
+
+ std::string db_name;
+ std::string tbl_name;
+ std::string filter;
+ int16_t max_parts;
+
+ struct __isset {
+ __isset() : db_name(false), tbl_name(false), filter(false), max_parts(false) {}
+ bool db_name;
+ bool tbl_name;
+ bool filter;
+ bool max_parts;
+ } __isset;
+
+ bool operator == (const ThriftHiveMetastore_get_partitions_by_filter_args & rhs) const
+ {
+ if (!(db_name == rhs.db_name))
+ return false;
+ if (!(tbl_name == rhs.tbl_name))
+ return false;
+ if (!(filter == rhs.filter))
+ return false;
+ if (!(max_parts == rhs.max_parts))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_get_partitions_by_filter_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_get_partitions_by_filter_args & ) const;
+
+ uint32_t read(apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+class ThriftHiveMetastore_get_partitions_by_filter_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_filter_pargs() throw() {}
+
+ const std::string* db_name;
+ const std::string* tbl_name;
+ const std::string* filter;
+ const int16_t* max_parts;
+
+ uint32_t write(apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+class ThriftHiveMetastore_get_partitions_by_filter_result {
+ public:
+
+ ThriftHiveMetastore_get_partitions_by_filter_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_filter_result() throw() {}
+
+ std::vector<Partition> success;
+ MetaException o1;
+ NoSuchObjectException o2;
+
+ struct __isset {
+ __isset() : success(false), o1(false), o2(false) {}
+ bool success;
+ bool o1;
+ bool o2;
+ } __isset;
+
+ bool operator == (const ThriftHiveMetastore_get_partitions_by_filter_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_get_partitions_by_filter_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_get_partitions_by_filter_result & ) const;
+
+ uint32_t read(apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+class ThriftHiveMetastore_get_partitions_by_filter_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_get_partitions_by_filter_presult() throw() {}
+
+ std::vector<Partition> * success;
+ MetaException o1;
+ NoSuchObjectException o2;
+
+ struct __isset {
+ __isset() : success(false), o1(false), o2(false) {}
+ bool success;
+ bool o1;
+ bool o2;
+ } __isset;
+
+ uint32_t read(apache::thrift::protocol::TProtocol* iprot);
+
+};
+
class ThriftHiveMetastore_alter_partition_args {
public:
@@ -4335,6 +4459,9 @@ class ThriftHiveMetastoreClient : virtua
void get_partition_names_ps(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
void recv_get_partition_names_ps(std::vector<std::string> & _return);
+ void get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
+ void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
+ void recv_get_partitions_by_filter(std::vector<Partition> & _return);
void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part);
void recv_alter_partition();
@@ -4398,6 +4525,7 @@ class ThriftHiveMetastoreProcessor : vir
void process_get_partition_names(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_get_partitions_ps(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_get_partition_names_ps(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
+ void process_get_partitions_by_filter(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_alter_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_get_config_value(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_partition_name_to_vals(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
@@ -4439,6 +4567,7 @@ class ThriftHiveMetastoreProcessor : vir
processMap_["get_partition_names"] = &ThriftHiveMetastoreProcessor::process_get_partition_names;
processMap_["get_partitions_ps"] = &ThriftHiveMetastoreProcessor::process_get_partitions_ps;
processMap_["get_partition_names_ps"] = &ThriftHiveMetastoreProcessor::process_get_partition_names_ps;
+ processMap_["get_partitions_by_filter"] = &ThriftHiveMetastoreProcessor::process_get_partitions_by_filter;
processMap_["alter_partition"] = &ThriftHiveMetastoreProcessor::process_alter_partition;
processMap_["get_config_value"] = &ThriftHiveMetastoreProcessor::process_get_config_value;
processMap_["partition_name_to_vals"] = &ThriftHiveMetastoreProcessor::process_partition_name_to_vals;
@@ -4778,6 +4907,18 @@ class ThriftHiveMetastoreMultiface : vir
}
}
+ void get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) {
+ uint32_t sz = ifaces_.size();
+ for (uint32_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->get_partitions_by_filter(_return, db_name, tbl_name, filter, max_parts);
+ return;
+ } else {
+ ifaces_[i]->get_partitions_by_filter(_return, db_name, tbl_name, filter, max_parts);
+ }
+ }
+ }
+
void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) {
uint32_t sz = ifaces_.size();
for (uint32_t i = 0; i < sz; ++i) {
Modified: hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (original)
+++ hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp Tue Sep 21 21:13:02 2010
@@ -162,6 +162,11 @@ class ThriftHiveMetastoreHandler : virtu
printf("get_partition_names_ps\n");
}
+ void get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) {
+ // Your implementation goes here
+ printf("get_partitions_by_filter\n");
+ }
+
void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) {
// Your implementation goes here
printf("alter_partition\n");
Modified: hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java?rev=999644&r1=999643&r2=999644&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (original)
+++ hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java Tue Sep 21 21:13:02 2010
@@ -81,6 +81,8 @@ public class ThriftHiveMetastore {
public List<String> get_partition_names_ps(String db_name, String tbl_name, List<String> part_vals, short max_parts) throws MetaException, TException;
+ public List<Partition> get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, TException;
+
public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException;
public String get_config_value(String name, String defaultValue) throws ConfigValSecurityException, TException;
@@ -1227,6 +1229,48 @@ public class ThriftHiveMetastore {
throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result");
}
+ public List<Partition> get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, TException
+ {
+ send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts);
+ return recv_get_partitions_by_filter();
+ }
+
+ public void send_get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws TException
+ {
+ oprot_.writeMessageBegin(new TMessage("get_partitions_by_filter", TMessageType.CALL, seqid_));
+ get_partitions_by_filter_args args = new get_partitions_by_filter_args();
+ args.db_name = db_name;
+ args.tbl_name = tbl_name;
+ args.filter = filter;
+ args.max_parts = max_parts;
+ args.write(oprot_);
+ oprot_.writeMessageEnd();
+ oprot_.getTransport().flush();
+ }
+
+ public List<Partition> recv_get_partitions_by_filter() throws MetaException, NoSuchObjectException, TException
+ {
+ TMessage msg = iprot_.readMessageBegin();
+ if (msg.type == TMessageType.EXCEPTION) {
+ TApplicationException x = TApplicationException.read(iprot_);
+ iprot_.readMessageEnd();
+ throw x;
+ }
+ get_partitions_by_filter_result result = new get_partitions_by_filter_result();
+ result.read(iprot_);
+ iprot_.readMessageEnd();
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result");
+ }
+
public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, TException
{
send_alter_partition(db_name, tbl_name, new_part);
@@ -1614,6 +1658,7 @@ public class ThriftHiveMetastore {
processMap_.put("get_partition_names", new get_partition_names());
processMap_.put("get_partitions_ps", new get_partitions_ps());
processMap_.put("get_partition_names_ps", new get_partition_names_ps());
+ processMap_.put("get_partitions_by_filter", new get_partitions_by_filter());
processMap_.put("alter_partition", new alter_partition());
processMap_.put("get_config_value", new get_config_value());
processMap_.put("partition_name_to_vals", new partition_name_to_vals());
@@ -2493,6 +2538,36 @@ public class ThriftHiveMetastore {
}
+ private class get_partitions_by_filter implements ProcessFunction {
+ public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException
+ {
+ get_partitions_by_filter_args args = new get_partitions_by_filter_args();
+ args.read(iprot);
+ iprot.readMessageEnd();
+ get_partitions_by_filter_result result = new get_partitions_by_filter_result();
+ try {
+ result.success = iface_.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts);
+ } catch (MetaException o1) {
+ result.o1 = o1;
+ } catch (NoSuchObjectException o2) {
+ result.o2 = o2;
+ } catch (Throwable th) {
+ LOGGER.error("Internal error processing get_partitions_by_filter", th);
+ TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing get_partitions_by_filter");
+ oprot.writeMessageBegin(new TMessage("get_partitions_by_filter", TMessageType.EXCEPTION, seqid));
+ x.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ return;
+ }
+ oprot.writeMessageBegin(new TMessage("get_partitions_by_filter", TMessageType.REPLY, seqid));
+ result.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ }
+
+ }
+
private class alter_partition implements ProcessFunction {
public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException
{
@@ -19871,21 +19946,25 @@ public class ThriftHiveMetastore {
}
- public static class alter_partition_args implements TBase, java.io.Serializable, Cloneable {
- private static final TStruct STRUCT_DESC = new TStruct("alter_partition_args");
+ public static class get_partitions_by_filter_args implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("get_partitions_by_filter_args");
private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1);
private static final TField TBL_NAME_FIELD_DESC = new TField("tbl_name", TType.STRING, (short)2);
- private static final TField NEW_PART_FIELD_DESC = new TField("new_part", TType.STRUCT, (short)3);
+ private static final TField FILTER_FIELD_DESC = new TField("filter", TType.STRING, (short)3);
+ private static final TField MAX_PARTS_FIELD_DESC = new TField("max_parts", TType.I16, (short)4);
private String db_name;
public static final int DB_NAME = 1;
private String tbl_name;
public static final int TBL_NAME = 2;
- private Partition new_part;
- public static final int NEW_PART = 3;
+ private String filter;
+ public static final int FILTER = 3;
+ private short max_parts;
+ public static final int MAX_PARTS = 4;
private final Isset __isset = new Isset();
private static final class Isset implements java.io.Serializable {
+ public boolean max_parts = false;
}
public static final Map<Integer, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new HashMap<Integer, FieldMetaData>() {{
@@ -19893,46 +19972,55 @@ public class ThriftHiveMetastore {
new FieldValueMetaData(TType.STRING)));
put(TBL_NAME, new FieldMetaData("tbl_name", TFieldRequirementType.DEFAULT,
new FieldValueMetaData(TType.STRING)));
- put(NEW_PART, new FieldMetaData("new_part", TFieldRequirementType.DEFAULT,
- new StructMetaData(TType.STRUCT, Partition.class)));
+ put(FILTER, new FieldMetaData("filter", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ put(MAX_PARTS, new FieldMetaData("max_parts", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.I16)));
}});
static {
- FieldMetaData.addStructMetaDataMap(alter_partition_args.class, metaDataMap);
+ FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_args.class, metaDataMap);
}
- public alter_partition_args() {
+ public get_partitions_by_filter_args() {
+ this.max_parts = (short)-1;
+
}
- public alter_partition_args(
+ public get_partitions_by_filter_args(
String db_name,
String tbl_name,
- Partition new_part)
+ String filter,
+ short max_parts)
{
this();
this.db_name = db_name;
this.tbl_name = tbl_name;
- this.new_part = new_part;
+ this.filter = filter;
+ this.max_parts = max_parts;
+ this.__isset.max_parts = true;
}
/**
* Performs a deep copy on <i>other</i>.
*/
- public alter_partition_args(alter_partition_args other) {
+ public get_partitions_by_filter_args(get_partitions_by_filter_args other) {
if (other.isSetDb_name()) {
this.db_name = other.db_name;
}
if (other.isSetTbl_name()) {
this.tbl_name = other.tbl_name;
}
- if (other.isSetNew_part()) {
- this.new_part = new Partition(other.new_part);
+ if (other.isSetFilter()) {
+ this.filter = other.filter;
}
+ __isset.max_parts = other.__isset.max_parts;
+ this.max_parts = other.max_parts;
}
@Override
- public alter_partition_args clone() {
- return new alter_partition_args(this);
+ public get_partitions_by_filter_args clone() {
+ return new get_partitions_by_filter_args(this);
}
public String getDb_name() {
@@ -19969,21 +20057,39 @@ public class ThriftHiveMetastore {
return this.tbl_name != null;
}
- public Partition getNew_part() {
- return this.new_part;
+ public String getFilter() {
+ return this.filter;
}
- public void setNew_part(Partition new_part) {
- this.new_part = new_part;
+ public void setFilter(String filter) {
+ this.filter = filter;
}
- public void unsetNew_part() {
- this.new_part = null;
+ public void unsetFilter() {
+ this.filter = null;
}
- // Returns true if field new_part is set (has been asigned a value) and false otherwise
- public boolean isSetNew_part() {
- return this.new_part != null;
+ // Returns true if field filter is set (has been asigned a value) and false otherwise
+ public boolean isSetFilter() {
+ return this.filter != null;
+ }
+
+ public short getMax_parts() {
+ return this.max_parts;
+ }
+
+ public void setMax_parts(short max_parts) {
+ this.max_parts = max_parts;
+ this.__isset.max_parts = true;
+ }
+
+ public void unsetMax_parts() {
+ this.__isset.max_parts = false;
+ }
+
+ // Returns true if field max_parts is set (has been asigned a value) and false otherwise
+ public boolean isSetMax_parts() {
+ return this.__isset.max_parts;
}
public void setFieldValue(int fieldID, Object value) {
@@ -20004,11 +20110,19 @@ public class ThriftHiveMetastore {
}
break;
- case NEW_PART:
+ case FILTER:
if (value == null) {
- unsetNew_part();
+ unsetFilter();
} else {
- setNew_part((Partition)value);
+ setFilter((String)value);
+ }
+ break;
+
+ case MAX_PARTS:
+ if (value == null) {
+ unsetMax_parts();
+ } else {
+ setMax_parts((Short)value);
}
break;
@@ -20025,8 +20139,11 @@ public class ThriftHiveMetastore {
case TBL_NAME:
return getTbl_name();
- case NEW_PART:
- return getNew_part();
+ case FILTER:
+ return getFilter();
+
+ case MAX_PARTS:
+ return new Short(getMax_parts());
default:
throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
@@ -20040,8 +20157,10 @@ public class ThriftHiveMetastore {
return isSetDb_name();
case TBL_NAME:
return isSetTbl_name();
- case NEW_PART:
- return isSetNew_part();
+ case FILTER:
+ return isSetFilter();
+ case MAX_PARTS:
+ return isSetMax_parts();
default:
throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
}
@@ -20051,12 +20170,12 @@ public class ThriftHiveMetastore {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof alter_partition_args)
- return this.equals((alter_partition_args)that);
+ if (that instanceof get_partitions_by_filter_args)
+ return this.equals((get_partitions_by_filter_args)that);
return false;
}
- public boolean equals(alter_partition_args that) {
+ public boolean equals(get_partitions_by_filter_args that) {
if (that == null)
return false;
@@ -20078,12 +20197,21 @@ public class ThriftHiveMetastore {
return false;
}
- boolean this_present_new_part = true && this.isSetNew_part();
- boolean that_present_new_part = true && that.isSetNew_part();
- if (this_present_new_part || that_present_new_part) {
- if (!(this_present_new_part && that_present_new_part))
+ boolean this_present_filter = true && this.isSetFilter();
+ boolean that_present_filter = true && that.isSetFilter();
+ if (this_present_filter || that_present_filter) {
+ if (!(this_present_filter && that_present_filter))
return false;
- if (!this.new_part.equals(that.new_part))
+ if (!this.filter.equals(that.filter))
+ return false;
+ }
+
+ boolean this_present_max_parts = true;
+ boolean that_present_max_parts = true;
+ if (this_present_max_parts || that_present_max_parts) {
+ if (!(this_present_max_parts && that_present_max_parts))
+ return false;
+ if (this.max_parts != that.max_parts)
return false;
}
@@ -20120,10 +20248,17 @@ public class ThriftHiveMetastore {
TProtocolUtil.skip(iprot, field.type);
}
break;
- case NEW_PART:
- if (field.type == TType.STRUCT) {
- this.new_part = new Partition();
- this.new_part.read(iprot);
+ case FILTER:
+ if (field.type == TType.STRING) {
+ this.filter = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case MAX_PARTS:
+ if (field.type == TType.I16) {
+ this.max_parts = iprot.readI16();
+ this.__isset.max_parts = true;
} else {
TProtocolUtil.skip(iprot, field.type);
}
@@ -20153,18 +20288,21 @@ public class ThriftHiveMetastore {
oprot.writeString(this.tbl_name);
oprot.writeFieldEnd();
}
- if (this.new_part != null) {
- oprot.writeFieldBegin(NEW_PART_FIELD_DESC);
- this.new_part.write(oprot);
+ if (this.filter != null) {
+ oprot.writeFieldBegin(FILTER_FIELD_DESC);
+ oprot.writeString(this.filter);
oprot.writeFieldEnd();
}
+ oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC);
+ oprot.writeI16(this.max_parts);
+ oprot.writeFieldEnd();
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("alter_partition_args(");
+ StringBuilder sb = new StringBuilder("get_partitions_by_filter_args(");
boolean first = true;
sb.append("db_name:");
@@ -20183,13 +20321,17 @@ public class ThriftHiveMetastore {
}
first = false;
if (!first) sb.append(", ");
- sb.append("new_part:");
- if (this.new_part == null) {
+ sb.append("filter:");
+ if (this.filter == null) {
sb.append("null");
} else {
- sb.append(this.new_part);
+ sb.append(this.filter);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("max_parts:");
+ sb.append(this.max_parts);
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -20201,14 +20343,17 @@ public class ThriftHiveMetastore {
}
- public static class alter_partition_result implements TBase, java.io.Serializable, Cloneable {
- private static final TStruct STRUCT_DESC = new TStruct("alter_partition_result");
+ public static class get_partitions_by_filter_result implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("get_partitions_by_filter_result");
+ private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.LIST, (short)0);
private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1);
private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2);
- private InvalidOperationException o1;
+ private List<Partition> success;
+ public static final int SUCCESS = 0;
+ private MetaException o1;
public static final int O1 = 1;
- private MetaException o2;
+ private NoSuchObjectException o2;
public static final int O2 = 2;
private final Isset __isset = new Isset();
@@ -20216,6 +20361,9 @@ public class ThriftHiveMetastore {
}
public static final Map<Integer, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new HashMap<Integer, FieldMetaData>() {{
+ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT,
+ new ListMetaData(TType.LIST,
+ new StructMetaData(TType.STRUCT, Partition.class))));
put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT,
new FieldValueMetaData(TType.STRUCT)));
put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT,
@@ -20223,17 +20371,19 @@ public class ThriftHiveMetastore {
}});
static {
- FieldMetaData.addStructMetaDataMap(alter_partition_result.class, metaDataMap);
+ FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_result.class, metaDataMap);
}
- public alter_partition_result() {
+ public get_partitions_by_filter_result() {
}
- public alter_partition_result(
- InvalidOperationException o1,
- MetaException o2)
+ public get_partitions_by_filter_result(
+ List<Partition> success,
+ MetaException o1,
+ NoSuchObjectException o2)
{
this();
+ this.success = success;
this.o1 = o1;
this.o2 = o2;
}
@@ -20241,25 +20391,64 @@ public class ThriftHiveMetastore {
/**
* Performs a deep copy on <i>other</i>.
*/
- public alter_partition_result(alter_partition_result other) {
+ public get_partitions_by_filter_result(get_partitions_by_filter_result other) {
+ if (other.isSetSuccess()) {
+ List<Partition> __this__success = new ArrayList<Partition>();
+ for (Partition other_element : other.success) {
+ __this__success.add(new Partition(other_element));
+ }
+ this.success = __this__success;
+ }
if (other.isSetO1()) {
- this.o1 = new InvalidOperationException(other.o1);
+ this.o1 = new MetaException(other.o1);
}
if (other.isSetO2()) {
- this.o2 = new MetaException(other.o2);
+ this.o2 = new NoSuchObjectException(other.o2);
}
}
@Override
- public alter_partition_result clone() {
- return new alter_partition_result(this);
+ public get_partitions_by_filter_result clone() {
+ return new get_partitions_by_filter_result(this);
}
- public InvalidOperationException getO1() {
+ public int getSuccessSize() {
+ return (this.success == null) ? 0 : this.success.size();
+ }
+
+ public java.util.Iterator<Partition> getSuccessIterator() {
+ return (this.success == null) ? null : this.success.iterator();
+ }
+
+ public void addToSuccess(Partition elem) {
+ if (this.success == null) {
+ this.success = new ArrayList<Partition>();
+ }
+ this.success.add(elem);
+ }
+
+ public List<Partition> getSuccess() {
+ return this.success;
+ }
+
+ public void setSuccess(List<Partition> success) {
+ this.success = success;
+ }
+
+ public void unsetSuccess() {
+ this.success = null;
+ }
+
+ // Returns true if field success is set (has been asigned a value) and false otherwise
+ public boolean isSetSuccess() {
+ return this.success != null;
+ }
+
+ public MetaException getO1() {
return this.o1;
}
- public void setO1(InvalidOperationException o1) {
+ public void setO1(MetaException o1) {
this.o1 = o1;
}
@@ -20272,11 +20461,11 @@ public class ThriftHiveMetastore {
return this.o1 != null;
}
- public MetaException getO2() {
+ public NoSuchObjectException getO2() {
return this.o2;
}
- public void setO2(MetaException o2) {
+ public void setO2(NoSuchObjectException o2) {
this.o2 = o2;
}
@@ -20291,9 +20480,657 @@ public class ThriftHiveMetastore {
public void setFieldValue(int fieldID, Object value) {
switch (fieldID) {
- case O1:
+ case SUCCESS:
if (value == null) {
- unsetO1();
+ unsetSuccess();
+ } else {
+ setSuccess((List<Partition>)value);
+ }
+ break;
+
+ case O1:
+ if (value == null) {
+ unsetO1();
+ } else {
+ setO1((MetaException)value);
+ }
+ break;
+
+ case O2:
+ if (value == null) {
+ unsetO2();
+ } else {
+ setO2((NoSuchObjectException)value);
+ }
+ break;
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ public Object getFieldValue(int fieldID) {
+ switch (fieldID) {
+ case SUCCESS:
+ return getSuccess();
+
+ case O1:
+ return getO1();
+
+ case O2:
+ return getO2();
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise
+ public boolean isSet(int fieldID) {
+ switch (fieldID) {
+ case SUCCESS:
+ return isSetSuccess();
+ case O1:
+ return isSetO1();
+ case O2:
+ return isSetO2();
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof get_partitions_by_filter_result)
+ return this.equals((get_partitions_by_filter_result)that);
+ return false;
+ }
+
+ public boolean equals(get_partitions_by_filter_result that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_success = true && this.isSetSuccess();
+ boolean that_present_success = true && that.isSetSuccess();
+ if (this_present_success || that_present_success) {
+ if (!(this_present_success && that_present_success))
+ return false;
+ if (!this.success.equals(that.success))
+ return false;
+ }
+
+ boolean this_present_o1 = true && this.isSetO1();
+ boolean that_present_o1 = true && that.isSetO1();
+ if (this_present_o1 || that_present_o1) {
+ if (!(this_present_o1 && that_present_o1))
+ return false;
+ if (!this.o1.equals(that.o1))
+ return false;
+ }
+
+ boolean this_present_o2 = true && this.isSetO2();
+ boolean that_present_o2 = true && that.isSetO2();
+ if (this_present_o2 || that_present_o2) {
+ if (!(this_present_o2 && that_present_o2))
+ return false;
+ if (!this.o2.equals(that.o2))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public void read(TProtocol iprot) throws TException {
+ TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == TType.STOP) {
+ break;
+ }
+ switch (field.id)
+ {
+ case SUCCESS:
+ if (field.type == TType.LIST) {
+ {
+ TList _list123 = iprot.readListBegin();
+ this.success = new ArrayList<Partition>(_list123.size);
+ for (int _i124 = 0; _i124 < _list123.size; ++_i124)
+ {
+ Partition _elem125;
+ _elem125 = new Partition();
+ _elem125.read(iprot);
+ this.success.add(_elem125);
+ }
+ iprot.readListEnd();
+ }
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case O1:
+ if (field.type == TType.STRUCT) {
+ this.o1 = new MetaException();
+ this.o1.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case O2:
+ if (field.type == TType.STRUCT) {
+ this.o2 = new NoSuchObjectException();
+ this.o2.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ TProtocolUtil.skip(iprot, field.type);
+ break;
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ validate();
+ }
+
+ public void write(TProtocol oprot) throws TException {
+ oprot.writeStructBegin(STRUCT_DESC);
+
+ if (this.isSetSuccess()) {
+ oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+ {
+ oprot.writeListBegin(new TList(TType.STRUCT, this.success.size()));
+ for (Partition _iter126 : this.success) {
+ _iter126.write(oprot);
+ }
+ oprot.writeListEnd();
+ }
+ oprot.writeFieldEnd();
+ } else if (this.isSetO1()) {
+ oprot.writeFieldBegin(O1_FIELD_DESC);
+ this.o1.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetO2()) {
+ oprot.writeFieldBegin(O2_FIELD_DESC);
+ this.o2.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("get_partitions_by_filter_result(");
+ boolean first = true;
+
+ sb.append("success:");
+ if (this.success == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.success);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o1:");
+ if (this.o1 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o1);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o2:");
+ if (this.o2 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o2);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws TException {
+ // check for required fields
+ // check that fields of type enum have valid values
+ }
+
+ }
+
+ public static class alter_partition_args implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("alter_partition_args");
+ private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1);
+ private static final TField TBL_NAME_FIELD_DESC = new TField("tbl_name", TType.STRING, (short)2);
+ private static final TField NEW_PART_FIELD_DESC = new TField("new_part", TType.STRUCT, (short)3);
+
+ private String db_name;
+ public static final int DB_NAME = 1;
+ private String tbl_name;
+ public static final int TBL_NAME = 2;
+ private Partition new_part;
+ public static final int NEW_PART = 3;
+
+ private final Isset __isset = new Isset();
+ private static final class Isset implements java.io.Serializable {
+ }
+
+ public static final Map<Integer, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new HashMap<Integer, FieldMetaData>() {{
+ put(DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ put(TBL_NAME, new FieldMetaData("tbl_name", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ put(NEW_PART, new FieldMetaData("new_part", TFieldRequirementType.DEFAULT,
+ new StructMetaData(TType.STRUCT, Partition.class)));
+ }});
+
+ static {
+ FieldMetaData.addStructMetaDataMap(alter_partition_args.class, metaDataMap);
+ }
+
+ public alter_partition_args() {
+ }
+
+ public alter_partition_args(
+ String db_name,
+ String tbl_name,
+ Partition new_part)
+ {
+ this();
+ this.db_name = db_name;
+ this.tbl_name = tbl_name;
+ this.new_part = new_part;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public alter_partition_args(alter_partition_args other) {
+ if (other.isSetDb_name()) {
+ this.db_name = other.db_name;
+ }
+ if (other.isSetTbl_name()) {
+ this.tbl_name = other.tbl_name;
+ }
+ if (other.isSetNew_part()) {
+ this.new_part = new Partition(other.new_part);
+ }
+ }
+
+ @Override
+ public alter_partition_args clone() {
+ return new alter_partition_args(this);
+ }
+
+ public String getDb_name() {
+ return this.db_name;
+ }
+
+ public void setDb_name(String db_name) {
+ this.db_name = db_name;
+ }
+
+ public void unsetDb_name() {
+ this.db_name = null;
+ }
+
+ // Returns true if field db_name is set (has been asigned a value) and false otherwise
+ public boolean isSetDb_name() {
+ return this.db_name != null;
+ }
+
+ public String getTbl_name() {
+ return this.tbl_name;
+ }
+
+ public void setTbl_name(String tbl_name) {
+ this.tbl_name = tbl_name;
+ }
+
+ public void unsetTbl_name() {
+ this.tbl_name = null;
+ }
+
+ // Returns true if field tbl_name is set (has been asigned a value) and false otherwise
+ public boolean isSetTbl_name() {
+ return this.tbl_name != null;
+ }
+
+ public Partition getNew_part() {
+ return this.new_part;
+ }
+
+ public void setNew_part(Partition new_part) {
+ this.new_part = new_part;
+ }
+
+ public void unsetNew_part() {
+ this.new_part = null;
+ }
+
+ // Returns true if field new_part is set (has been asigned a value) and false otherwise
+ public boolean isSetNew_part() {
+ return this.new_part != null;
+ }
+
+ public void setFieldValue(int fieldID, Object value) {
+ switch (fieldID) {
+ case DB_NAME:
+ if (value == null) {
+ unsetDb_name();
+ } else {
+ setDb_name((String)value);
+ }
+ break;
+
+ case TBL_NAME:
+ if (value == null) {
+ unsetTbl_name();
+ } else {
+ setTbl_name((String)value);
+ }
+ break;
+
+ case NEW_PART:
+ if (value == null) {
+ unsetNew_part();
+ } else {
+ setNew_part((Partition)value);
+ }
+ break;
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ public Object getFieldValue(int fieldID) {
+ switch (fieldID) {
+ case DB_NAME:
+ return getDb_name();
+
+ case TBL_NAME:
+ return getTbl_name();
+
+ case NEW_PART:
+ return getNew_part();
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise
+ public boolean isSet(int fieldID) {
+ switch (fieldID) {
+ case DB_NAME:
+ return isSetDb_name();
+ case TBL_NAME:
+ return isSetTbl_name();
+ case NEW_PART:
+ return isSetNew_part();
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof alter_partition_args)
+ return this.equals((alter_partition_args)that);
+ return false;
+ }
+
+ public boolean equals(alter_partition_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_db_name = true && this.isSetDb_name();
+ boolean that_present_db_name = true && that.isSetDb_name();
+ if (this_present_db_name || that_present_db_name) {
+ if (!(this_present_db_name && that_present_db_name))
+ return false;
+ if (!this.db_name.equals(that.db_name))
+ return false;
+ }
+
+ boolean this_present_tbl_name = true && this.isSetTbl_name();
+ boolean that_present_tbl_name = true && that.isSetTbl_name();
+ if (this_present_tbl_name || that_present_tbl_name) {
+ if (!(this_present_tbl_name && that_present_tbl_name))
+ return false;
+ if (!this.tbl_name.equals(that.tbl_name))
+ return false;
+ }
+
+ boolean this_present_new_part = true && this.isSetNew_part();
+ boolean that_present_new_part = true && that.isSetNew_part();
+ if (this_present_new_part || that_present_new_part) {
+ if (!(this_present_new_part && that_present_new_part))
+ return false;
+ if (!this.new_part.equals(that.new_part))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public void read(TProtocol iprot) throws TException {
+ TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == TType.STOP) {
+ break;
+ }
+ switch (field.id)
+ {
+ case DB_NAME:
+ if (field.type == TType.STRING) {
+ this.db_name = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case TBL_NAME:
+ if (field.type == TType.STRING) {
+ this.tbl_name = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case NEW_PART:
+ if (field.type == TType.STRUCT) {
+ this.new_part = new Partition();
+ this.new_part.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ TProtocolUtil.skip(iprot, field.type);
+ break;
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ validate();
+ }
+
+ public void write(TProtocol oprot) throws TException {
+ validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (this.db_name != null) {
+ oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+ oprot.writeString(this.db_name);
+ oprot.writeFieldEnd();
+ }
+ if (this.tbl_name != null) {
+ oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+ oprot.writeString(this.tbl_name);
+ oprot.writeFieldEnd();
+ }
+ if (this.new_part != null) {
+ oprot.writeFieldBegin(NEW_PART_FIELD_DESC);
+ this.new_part.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("alter_partition_args(");
+ boolean first = true;
+
+ sb.append("db_name:");
+ if (this.db_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.db_name);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("tbl_name:");
+ if (this.tbl_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.tbl_name);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("new_part:");
+ if (this.new_part == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.new_part);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws TException {
+ // check for required fields
+ // check that fields of type enum have valid values
+ }
+
+ }
+
+ public static class alter_partition_result implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("alter_partition_result");
+ private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1);
+ private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2);
+
+ private InvalidOperationException o1;
+ public static final int O1 = 1;
+ private MetaException o2;
+ public static final int O2 = 2;
+
+ private final Isset __isset = new Isset();
+ private static final class Isset implements java.io.Serializable {
+ }
+
+ public static final Map<Integer, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new HashMap<Integer, FieldMetaData>() {{
+ put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRUCT)));
+ put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRUCT)));
+ }});
+
+ static {
+ FieldMetaData.addStructMetaDataMap(alter_partition_result.class, metaDataMap);
+ }
+
+ public alter_partition_result() {
+ }
+
+ public alter_partition_result(
+ InvalidOperationException o1,
+ MetaException o2)
+ {
+ this();
+ this.o1 = o1;
+ this.o2 = o2;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public alter_partition_result(alter_partition_result other) {
+ if (other.isSetO1()) {
+ this.o1 = new InvalidOperationException(other.o1);
+ }
+ if (other.isSetO2()) {
+ this.o2 = new MetaException(other.o2);
+ }
+ }
+
+ @Override
+ public alter_partition_result clone() {
+ return new alter_partition_result(this);
+ }
+
+ public InvalidOperationException getO1() {
+ return this.o1;
+ }
+
+ public void setO1(InvalidOperationException o1) {
+ this.o1 = o1;
+ }
+
+ public void unsetO1() {
+ this.o1 = null;
+ }
+
+ // Returns true if field o1 is set (has been asigned a value) and false otherwise
+ public boolean isSetO1() {
+ return this.o1 != null;
+ }
+
+ public MetaException getO2() {
+ return this.o2;
+ }
+
+ public void setO2(MetaException o2) {
+ this.o2 = o2;
+ }
+
+ public void unsetO2() {
+ this.o2 = null;
+ }
+
+ // Returns true if field o2 is set (has been asigned a value) and false otherwise
+ public boolean isSetO2() {
+ return this.o2 != null;
+ }
+
+ public void setFieldValue(int fieldID, Object value) {
+ switch (fieldID) {
+ case O1:
+ if (value == null) {
+ unsetO1();
} else {
setO1((InvalidOperationException)value);
}
@@ -21380,13 +22217,13 @@ public class ThriftHiveMetastore {
case SUCCESS:
if (field.type == TType.LIST) {
{
- TList _list123 = iprot.readListBegin();
- this.success = new ArrayList<String>(_list123.size);
- for (int _i124 = 0; _i124 < _list123.size; ++_i124)
+ TList _list127 = iprot.readListBegin();
+ this.success = new ArrayList<String>(_list127.size);
+ for (int _i128 = 0; _i128 < _list127.size; ++_i128)
{
- String _elem125;
- _elem125 = iprot.readString();
- this.success.add(_elem125);
+ String _elem129;
+ _elem129 = iprot.readString();
+ this.success.add(_elem129);
}
iprot.readListEnd();
}
@@ -21420,8 +22257,8 @@ public class ThriftHiveMetastore {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRING, this.success.size()));
- for (String _iter126 : this.success) {
- oprot.writeString(_iter126);
+ for (String _iter130 : this.success) {
+ oprot.writeString(_iter130);
}
oprot.writeListEnd();
}
@@ -21871,15 +22708,15 @@ public class ThriftHiveMetastore {
case SUCCESS:
if (field.type == TType.MAP) {
{
- TMap _map127 = iprot.readMapBegin();
- this.success = new HashMap<String,String>(2*_map127.size);
- for (int _i128 = 0; _i128 < _map127.size; ++_i128)
+ TMap _map131 = iprot.readMapBegin();
+ this.success = new HashMap<String,String>(2*_map131.size);
+ for (int _i132 = 0; _i132 < _map131.size; ++_i132)
{
- String _key129;
- String _val130;
- _key129 = iprot.readString();
- _val130 = iprot.readString();
- this.success.put(_key129, _val130);
+ String _key133;
+ String _val134;
+ _key133 = iprot.readString();
+ _val134 = iprot.readString();
+ this.success.put(_key133, _val134);
}
iprot.readMapEnd();
}
@@ -21913,9 +22750,9 @@ public class ThriftHiveMetastore {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeMapBegin(new TMap(TType.STRING, TType.STRING, this.success.size()));
- for (Map.Entry<String, String> _iter131 : this.success.entrySet()) {
- oprot.writeString(_iter131.getKey());
- oprot.writeString(_iter131.getValue());
+ for (Map.Entry<String, String> _iter135 : this.success.entrySet()) {
+ oprot.writeString(_iter135.getKey());
+ oprot.writeString(_iter135.getValue());
}
oprot.writeMapEnd();
}
@@ -24586,14 +25423,14 @@ public class ThriftHiveMetastore {
case SUCCESS:
if (field.type == TType.LIST) {
{
- TList _list132 = iprot.readListBegin();
- this.success = new ArrayList<Index>(_list132.size);
- for (int _i133 = 0; _i133 < _list132.size; ++_i133)
+ TList _list136 = iprot.readListBegin();
+ this.success = new ArrayList<Index>(_list136.size);
+ for (int _i137 = 0; _i137 < _list136.size; ++_i137)
{
- Index _elem134;
- _elem134 = new Index();
- _elem134.read(iprot);
- this.success.add(_elem134);
+ Index _elem138;
+ _elem138 = new Index();
+ _elem138.read(iprot);
+ this.success.add(_elem138);
}
iprot.readListEnd();
}
@@ -24635,8 +25472,8 @@ public class ThriftHiveMetastore {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRUCT, this.success.size()));
- for (Index _iter135 : this.success) {
- _iter135.write(oprot);
+ for (Index _iter139 : this.success) {
+ _iter139.write(oprot);
}
oprot.writeListEnd();
}
@@ -25230,13 +26067,13 @@ public class ThriftHiveMetastore {
case SUCCESS:
if (field.type == TType.LIST) {
{
- TList _list136 = iprot.readListBegin();
- this.success = new ArrayList<String>(_list136.size);
- for (int _i137 = 0; _i137 < _list136.size; ++_i137)
+ TList _list140 = iprot.readListBegin();
+ this.success = new ArrayList<String>(_list140.size);
+ for (int _i141 = 0; _i141 < _list140.size; ++_i141)
{
- String _elem138;
- _elem138 = iprot.readString();
- this.success.add(_elem138);
+ String _elem142;
+ _elem142 = iprot.readString();
+ this.success.add(_elem142);
}
iprot.readListEnd();
}
@@ -25270,8 +26107,8 @@ public class ThriftHiveMetastore {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new TList(TType.STRING, this.success.size()));
- for (String _iter139 : this.success) {
- oprot.writeString(_iter139);
+ for (String _iter143 : this.success) {
+ oprot.writeString(_iter143);
}
oprot.writeListEnd();
}