You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by zs...@apache.org on 2010/02/05 04:11:43 UTC
svn commit: r906788 - in /hadoop/hive/trunk: ./ metastore/if/
metastore/src/gen-cpp/
metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/
metastore/src/gen-php/ metastore/src/gen-py/hive_metastore/
metastore/src/java/org/apache/hadoop/hive/...
Author: zshao
Date: Fri Feb 5 03:11:42 2010
New Revision: 906788
URL: http://svn.apache.org/viewvc?rev=906788&view=rev
Log:
HIVE-1132. Add metastore API method to get partition by name. (Paul Yang via zshao)
Modified:
hadoop/hive/trunk/CHANGES.txt
hadoop/hive/trunk/metastore/if/hive_metastore.thrift
hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp
hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h
hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php
hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote
hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Fri Feb 5 03:11:42 2010
@@ -55,6 +55,9 @@
HIVE-1103. Add .gitignore file. (Carl Steinbach via zshao)
+ HIVE-1132. Add metastore API method to get partition by name.
+ (Paul Yang via zshao)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/hive/trunk/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/if/hive_metastore.thrift?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/if/hive_metastore.thrift (original)
+++ hadoop/hive/trunk/metastore/if/hive_metastore.thrift Fri Feb 5 03:11:42 2010
@@ -198,6 +198,9 @@
throws(1:NoSuchObjectException o1, 2:MetaException o2)
Partition get_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
throws(1:MetaException o1)
+ Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name)
+ throws(1:MetaException o1, 2: UnknownTableException o2, 3:NoSuchObjectException o3)
+
// returns all the partitions for this table in reverse chronological order.
// if max parts is given then it will return only that many
list<Partition> get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
Modified: hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp (original)
+++ hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.cpp Fri Feb 5 03:11:42 2010
@@ -4170,6 +4170,252 @@
return xfer;
}
+uint32_t ThriftHiveMetastore_get_partition_by_name_args::read(apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->db_name);
+ this->__isset.db_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->tbl_name);
+ this->__isset.tbl_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->part_name);
+ this->__isset.part_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partition_by_name_args::write(apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_by_name_args");
+ xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->db_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->tbl_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("part_name", apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->part_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partition_by_name_pargs::write(apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_by_name_pargs");
+ xfer += oprot->writeFieldBegin("db_name", apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->db_name)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("tbl_name", apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString((*(this->tbl_name)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("part_name", apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString((*(this->part_name)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partition_by_name_result::read(apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->success.read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o3.read(iprot);
+ this->__isset.o3 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partition_by_name_result::write(apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_by_name_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", apache::thrift::protocol::T_STRUCT, 0);
+ xfer += this->success.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o1) {
+ xfer += oprot->writeFieldBegin("o1", apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->o1.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o2) {
+ xfer += oprot->writeFieldBegin("o2", apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->o2.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o3) {
+ xfer += oprot->writeFieldBegin("o3", apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->o3.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_partition_by_name_presult::read(apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += (*(this->success)).read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o2.read(iprot);
+ this->__isset.o2 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o3.read(iprot);
+ this->__isset.o3 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
uint32_t ThriftHiveMetastore_get_partitions_args::read(apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
@@ -6318,6 +6564,77 @@
throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_partition failed: unknown result");
}
+void ThriftHiveMetastoreClient::get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name)
+{
+ send_get_partition_by_name(db_name, tbl_name, part_name);
+ recv_get_partition_by_name(_return);
+}
+
+void ThriftHiveMetastoreClient::send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get_partition_by_name", apache::thrift::protocol::T_CALL, cseqid);
+
+ ThriftHiveMetastore_get_partition_by_name_pargs args;
+ args.db_name = &db_name;
+ args.tbl_name = &tbl_name;
+ args.part_name = &part_name;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->flush();
+ oprot_->getTransport()->writeEnd();
+}
+
+void ThriftHiveMetastoreClient::recv_get_partition_by_name(Partition& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == apache::thrift::protocol::T_EXCEPTION) {
+ apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE);
+ }
+ if (fname.compare("get_partition_by_name") != 0) {
+ iprot_->skip(apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::WRONG_METHOD_NAME);
+ }
+ ThriftHiveMetastore_get_partition_by_name_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.o1) {
+ throw result.o1;
+ }
+ if (result.__isset.o2) {
+ throw result.o2;
+ }
+ if (result.__isset.o3) {
+ throw result.o3;
+ }
+ throw apache::thrift::TApplicationException(apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_by_name failed: unknown result");
+}
+
void ThriftHiveMetastoreClient::get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts)
{
send_get_partitions(db_name, tbl_name, max_parts);
@@ -7257,6 +7574,43 @@
oprot->getTransport()->writeEnd();
}
+void ThriftHiveMetastoreProcessor::process_get_partition_by_name(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot)
+{
+ ThriftHiveMetastore_get_partition_by_name_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ iprot->getTransport()->readEnd();
+
+ ThriftHiveMetastore_get_partition_by_name_result result;
+ try {
+ iface_->get_partition_by_name(result.success, args.db_name, args.tbl_name, args.part_name);
+ result.__isset.success = true;
+ } catch (MetaException &o1) {
+ result.o1 = o1;
+ result.__isset.o1 = true;
+ } catch (UnknownTableException &o2) {
+ result.o2 = o2;
+ result.__isset.o2 = true;
+ } catch (NoSuchObjectException &o3) {
+ result.o3 = o3;
+ result.__isset.o3 = true;
+ } catch (const std::exception& e) {
+ apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get_partition_by_name", apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->flush();
+ oprot->getTransport()->writeEnd();
+ return;
+ }
+
+ oprot->writeMessageBegin("get_partition_by_name", apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->flush();
+ oprot->getTransport()->writeEnd();
+}
+
void ThriftHiveMetastoreProcessor::process_get_partitions(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot)
{
ThriftHiveMetastore_get_partitions_args args;
Modified: hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h (original)
+++ hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore.h Fri Feb 5 03:11:42 2010
@@ -34,6 +34,7 @@
virtual void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals) = 0;
virtual bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData) = 0;
virtual void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals) = 0;
+ virtual void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) = 0;
virtual void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0;
virtual void get_partition_names(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0;
virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0;
@@ -105,6 +106,9 @@
void get_partition(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */) {
return;
}
+ void get_partition_by_name(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */) {
+ return;
+ }
void get_partitions(std::vector<Partition> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const int16_t /* max_parts */) {
return;
}
@@ -2167,6 +2171,127 @@
};
+class ThriftHiveMetastore_get_partition_by_name_args {
+ public:
+
+ ThriftHiveMetastore_get_partition_by_name_args() : db_name(""), tbl_name(""), part_name("") {
+ }
+
+ virtual ~ThriftHiveMetastore_get_partition_by_name_args() throw() {}
+
+ std::string db_name;
+ std::string tbl_name;
+ std::string part_name;
+
+ struct __isset {
+ __isset() : db_name(false), tbl_name(false), part_name(false) {}
+ bool db_name;
+ bool tbl_name;
+ bool part_name;
+ } __isset;
+
+ bool operator == (const ThriftHiveMetastore_get_partition_by_name_args & rhs) const
+ {
+ if (!(db_name == rhs.db_name))
+ return false;
+ if (!(tbl_name == rhs.tbl_name))
+ return false;
+ if (!(part_name == rhs.part_name))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_get_partition_by_name_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_get_partition_by_name_args & ) const;
+
+ uint32_t read(apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+class ThriftHiveMetastore_get_partition_by_name_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_get_partition_by_name_pargs() throw() {}
+
+ const std::string* db_name;
+ const std::string* tbl_name;
+ const std::string* part_name;
+
+ uint32_t write(apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+class ThriftHiveMetastore_get_partition_by_name_result {
+ public:
+
+ ThriftHiveMetastore_get_partition_by_name_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_get_partition_by_name_result() throw() {}
+
+ Partition success;
+ MetaException o1;
+ UnknownTableException o2;
+ NoSuchObjectException o3;
+
+ struct __isset {
+ __isset() : success(false), o1(false), o2(false), o3(false) {}
+ bool success;
+ bool o1;
+ bool o2;
+ bool o3;
+ } __isset;
+
+ bool operator == (const ThriftHiveMetastore_get_partition_by_name_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o2 == rhs.o2))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_get_partition_by_name_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_get_partition_by_name_result & ) const;
+
+ uint32_t read(apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+class ThriftHiveMetastore_get_partition_by_name_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_get_partition_by_name_presult() throw() {}
+
+ Partition* success;
+ MetaException o1;
+ UnknownTableException o2;
+ NoSuchObjectException o3;
+
+ struct __isset {
+ __isset() : success(false), o1(false), o2(false), o3(false) {}
+ bool success;
+ bool o1;
+ bool o2;
+ bool o3;
+ } __isset;
+
+ uint32_t read(apache::thrift::protocol::TProtocol* iprot);
+
+};
+
class ThriftHiveMetastore_get_partitions_args {
public:
@@ -2673,6 +2798,9 @@
void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
void send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
void recv_get_partition(Partition& _return);
+ void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
+ void send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
+ void recv_get_partition_by_name(Partition& _return);
void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
void send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
void recv_get_partitions(std::vector<Partition> & _return);
@@ -2712,6 +2840,7 @@
void process_append_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_drop_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_get_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
+ void process_get_partition_by_name(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_get_partitions(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_get_partition_names(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
void process_alter_partition(int32_t seqid, apache::thrift::protocol::TProtocol* iprot, apache::thrift::protocol::TProtocol* oprot);
@@ -2739,6 +2868,7 @@
processMap_["append_partition"] = &ThriftHiveMetastoreProcessor::process_append_partition;
processMap_["drop_partition"] = &ThriftHiveMetastoreProcessor::process_drop_partition;
processMap_["get_partition"] = &ThriftHiveMetastoreProcessor::process_get_partition;
+ processMap_["get_partition_by_name"] = &ThriftHiveMetastoreProcessor::process_get_partition_by_name;
processMap_["get_partitions"] = &ThriftHiveMetastoreProcessor::process_get_partitions;
processMap_["get_partition_names"] = &ThriftHiveMetastoreProcessor::process_get_partition_names;
processMap_["alter_partition"] = &ThriftHiveMetastoreProcessor::process_alter_partition;
@@ -2974,6 +3104,18 @@
}
}
+ void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) {
+ uint32_t sz = ifaces_.size();
+ for (uint32_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->get_partition_by_name(_return, db_name, tbl_name, part_name);
+ return;
+ } else {
+ ifaces_[i]->get_partition_by_name(_return, db_name, tbl_name, part_name);
+ }
+ }
+ }
+
void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) {
uint32_t sz = ifaces_.size();
for (uint32_t i = 0; i < sz; ++i) {
Modified: hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (original)
+++ hadoop/hive/trunk/metastore/src/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp Fri Feb 5 03:11:42 2010
@@ -117,6 +117,11 @@
printf("get_partition\n");
}
+ void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) {
+ // Your implementation goes here
+ printf("get_partition_by_name\n");
+ }
+
void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) {
// Your implementation goes here
printf("get_partitions\n");
Modified: hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (original)
+++ hadoop/hive/trunk/metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java Fri Feb 5 03:11:42 2010
@@ -63,6 +63,8 @@
public Partition get_partition(String db_name, String tbl_name, List<String> part_vals) throws MetaException, TException;
+ public Partition get_partition_by_name(String db_name, String tbl_name, String part_name) throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+
public List<Partition> get_partitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, TException;
public List<String> get_partition_names(String db_name, String tbl_name, short max_parts) throws MetaException, TException;
@@ -831,6 +833,50 @@
throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partition failed: unknown result");
}
+ public Partition get_partition_by_name(String db_name, String tbl_name, String part_name) throws MetaException, UnknownTableException, NoSuchObjectException, TException
+ {
+ send_get_partition_by_name(db_name, tbl_name, part_name);
+ return recv_get_partition_by_name();
+ }
+
+ public void send_get_partition_by_name(String db_name, String tbl_name, String part_name) throws TException
+ {
+ oprot_.writeMessageBegin(new TMessage("get_partition_by_name", TMessageType.CALL, seqid_));
+ get_partition_by_name_args args = new get_partition_by_name_args();
+ args.db_name = db_name;
+ args.tbl_name = tbl_name;
+ args.part_name = part_name;
+ args.write(oprot_);
+ oprot_.writeMessageEnd();
+ oprot_.getTransport().flush();
+ }
+
+ public Partition recv_get_partition_by_name() throws MetaException, UnknownTableException, NoSuchObjectException, TException
+ {
+ TMessage msg = iprot_.readMessageBegin();
+ if (msg.type == TMessageType.EXCEPTION) {
+ TApplicationException x = TApplicationException.read(iprot_);
+ iprot_.readMessageEnd();
+ throw x;
+ }
+ get_partition_by_name_result result = new get_partition_by_name_result();
+ result.read(iprot_);
+ iprot_.readMessageEnd();
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o2 != null) {
+ throw result.o2;
+ }
+ if (result.o3 != null) {
+ throw result.o3;
+ }
+ throw new TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_by_name failed: unknown result");
+ }
+
public List<Partition> get_partitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, TException
{
send_get_partitions(db_name, tbl_name, max_parts);
@@ -1011,6 +1057,7 @@
processMap_.put("append_partition", new append_partition());
processMap_.put("drop_partition", new drop_partition());
processMap_.put("get_partition", new get_partition());
+ processMap_.put("get_partition_by_name", new get_partition_by_name());
processMap_.put("get_partitions", new get_partitions());
processMap_.put("get_partition_names", new get_partition_names());
processMap_.put("alter_partition", new alter_partition());
@@ -1612,6 +1659,38 @@
}
+ private class get_partition_by_name implements ProcessFunction {
+ public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException
+ {
+ get_partition_by_name_args args = new get_partition_by_name_args();
+ args.read(iprot);
+ iprot.readMessageEnd();
+ get_partition_by_name_result result = new get_partition_by_name_result();
+ try {
+ result.success = iface_.get_partition_by_name(args.db_name, args.tbl_name, args.part_name);
+ } catch (MetaException o1) {
+ result.o1 = o1;
+ } catch (UnknownTableException o2) {
+ result.o2 = o2;
+ } catch (NoSuchObjectException o3) {
+ result.o3 = o3;
+ } catch (Throwable th) {
+ LOGGER.error("Internal error processing get_partition_by_name", th);
+ TApplicationException x = new TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error processing get_partition_by_name");
+ oprot.writeMessageBegin(new TMessage("get_partition_by_name", TMessageType.EXCEPTION, seqid));
+ x.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ return;
+ }
+ oprot.writeMessageBegin(new TMessage("get_partition_by_name", TMessageType.REPLY, seqid));
+ result.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ }
+
+ }
+
private class get_partitions implements ProcessFunction {
public void process(int seqid, TProtocol iprot, TProtocol oprot) throws TException
{
@@ -12711,6 +12790,733 @@
}
+ public static class get_partition_by_name_args implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("get_partition_by_name_args");
+ private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1);
+ private static final TField TBL_NAME_FIELD_DESC = new TField("tbl_name", TType.STRING, (short)2);
+ private static final TField PART_NAME_FIELD_DESC = new TField("part_name", TType.STRING, (short)3);
+
+ private String db_name;
+ public static final int DB_NAME = 1;
+ private String tbl_name;
+ public static final int TBL_NAME = 2;
+ private String part_name;
+ public static final int PART_NAME = 3;
+
+ private final Isset __isset = new Isset();
+ private static final class Isset implements java.io.Serializable {
+ }
+
+ public static final Map<Integer, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new HashMap<Integer, FieldMetaData>() {{
+ put(DB_NAME, new FieldMetaData("db_name", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ put(TBL_NAME, new FieldMetaData("tbl_name", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ put(PART_NAME, new FieldMetaData("part_name", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRING)));
+ }});
+
+ static {
+ FieldMetaData.addStructMetaDataMap(get_partition_by_name_args.class, metaDataMap);
+ }
+
+ public get_partition_by_name_args() {
+ }
+
+ public get_partition_by_name_args(
+ String db_name,
+ String tbl_name,
+ String part_name)
+ {
+ this();
+ this.db_name = db_name;
+ this.tbl_name = tbl_name;
+ this.part_name = part_name;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public get_partition_by_name_args(get_partition_by_name_args other) {
+ if (other.isSetDb_name()) {
+ this.db_name = other.db_name;
+ }
+ if (other.isSetTbl_name()) {
+ this.tbl_name = other.tbl_name;
+ }
+ if (other.isSetPart_name()) {
+ this.part_name = other.part_name;
+ }
+ }
+
+ @Override
+ public get_partition_by_name_args clone() {
+ return new get_partition_by_name_args(this);
+ }
+
+ public String getDb_name() {
+ return this.db_name;
+ }
+
+ public void setDb_name(String db_name) {
+ this.db_name = db_name;
+ }
+
+ public void unsetDb_name() {
+ this.db_name = null;
+ }
+
+ // Returns true if field db_name is set (has been asigned a value) and false otherwise
+ public boolean isSetDb_name() {
+ return this.db_name != null;
+ }
+
+ public String getTbl_name() {
+ return this.tbl_name;
+ }
+
+ public void setTbl_name(String tbl_name) {
+ this.tbl_name = tbl_name;
+ }
+
+ public void unsetTbl_name() {
+ this.tbl_name = null;
+ }
+
+ // Returns true if field tbl_name is set (has been asigned a value) and false otherwise
+ public boolean isSetTbl_name() {
+ return this.tbl_name != null;
+ }
+
+ public String getPart_name() {
+ return this.part_name;
+ }
+
+ public void setPart_name(String part_name) {
+ this.part_name = part_name;
+ }
+
+ public void unsetPart_name() {
+ this.part_name = null;
+ }
+
+ // Returns true if field part_name is set (has been asigned a value) and false otherwise
+ public boolean isSetPart_name() {
+ return this.part_name != null;
+ }
+
+ public void setFieldValue(int fieldID, Object value) {
+ switch (fieldID) {
+ case DB_NAME:
+ if (value == null) {
+ unsetDb_name();
+ } else {
+ setDb_name((String)value);
+ }
+ break;
+
+ case TBL_NAME:
+ if (value == null) {
+ unsetTbl_name();
+ } else {
+ setTbl_name((String)value);
+ }
+ break;
+
+ case PART_NAME:
+ if (value == null) {
+ unsetPart_name();
+ } else {
+ setPart_name((String)value);
+ }
+ break;
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ public Object getFieldValue(int fieldID) {
+ switch (fieldID) {
+ case DB_NAME:
+ return getDb_name();
+
+ case TBL_NAME:
+ return getTbl_name();
+
+ case PART_NAME:
+ return getPart_name();
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise
+ public boolean isSet(int fieldID) {
+ switch (fieldID) {
+ case DB_NAME:
+ return isSetDb_name();
+ case TBL_NAME:
+ return isSetTbl_name();
+ case PART_NAME:
+ return isSetPart_name();
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof get_partition_by_name_args)
+ return this.equals((get_partition_by_name_args)that);
+ return false;
+ }
+
+ public boolean equals(get_partition_by_name_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_db_name = true && this.isSetDb_name();
+ boolean that_present_db_name = true && that.isSetDb_name();
+ if (this_present_db_name || that_present_db_name) {
+ if (!(this_present_db_name && that_present_db_name))
+ return false;
+ if (!this.db_name.equals(that.db_name))
+ return false;
+ }
+
+ boolean this_present_tbl_name = true && this.isSetTbl_name();
+ boolean that_present_tbl_name = true && that.isSetTbl_name();
+ if (this_present_tbl_name || that_present_tbl_name) {
+ if (!(this_present_tbl_name && that_present_tbl_name))
+ return false;
+ if (!this.tbl_name.equals(that.tbl_name))
+ return false;
+ }
+
+ boolean this_present_part_name = true && this.isSetPart_name();
+ boolean that_present_part_name = true && that.isSetPart_name();
+ if (this_present_part_name || that_present_part_name) {
+ if (!(this_present_part_name && that_present_part_name))
+ return false;
+ if (!this.part_name.equals(that.part_name))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public void read(TProtocol iprot) throws TException {
+ TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == TType.STOP) {
+ break;
+ }
+ switch (field.id)
+ {
+ case DB_NAME:
+ if (field.type == TType.STRING) {
+ this.db_name = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case TBL_NAME:
+ if (field.type == TType.STRING) {
+ this.tbl_name = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case PART_NAME:
+ if (field.type == TType.STRING) {
+ this.part_name = iprot.readString();
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ TProtocolUtil.skip(iprot, field.type);
+ break;
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ validate();
+ }
+
+ public void write(TProtocol oprot) throws TException {
+ validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (this.db_name != null) {
+ oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+ oprot.writeString(this.db_name);
+ oprot.writeFieldEnd();
+ }
+ if (this.tbl_name != null) {
+ oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+ oprot.writeString(this.tbl_name);
+ oprot.writeFieldEnd();
+ }
+ if (this.part_name != null) {
+ oprot.writeFieldBegin(PART_NAME_FIELD_DESC);
+ oprot.writeString(this.part_name);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("get_partition_by_name_args(");
+ boolean first = true;
+
+ sb.append("db_name:");
+ if (this.db_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.db_name);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("tbl_name:");
+ if (this.tbl_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.tbl_name);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("part_name:");
+ if (this.part_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.part_name);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws TException {
+ // check for required fields
+ // check that fields of type enum have valid values
+ }
+
+ }
+
+ public static class get_partition_by_name_result implements TBase, java.io.Serializable, Cloneable {
+ private static final TStruct STRUCT_DESC = new TStruct("get_partition_by_name_result");
+ private static final TField SUCCESS_FIELD_DESC = new TField("success", TType.STRUCT, (short)0);
+ private static final TField O1_FIELD_DESC = new TField("o1", TType.STRUCT, (short)1);
+ private static final TField O2_FIELD_DESC = new TField("o2", TType.STRUCT, (short)2);
+ private static final TField O3_FIELD_DESC = new TField("o3", TType.STRUCT, (short)3);
+
+ private Partition success;
+ public static final int SUCCESS = 0;
+ private MetaException o1;
+ public static final int O1 = 1;
+ private UnknownTableException o2;
+ public static final int O2 = 2;
+ private NoSuchObjectException o3;
+ public static final int O3 = 3;
+
+ private final Isset __isset = new Isset();
+ private static final class Isset implements java.io.Serializable {
+ }
+
+ public static final Map<Integer, FieldMetaData> metaDataMap = Collections.unmodifiableMap(new HashMap<Integer, FieldMetaData>() {{
+ put(SUCCESS, new FieldMetaData("success", TFieldRequirementType.DEFAULT,
+ new StructMetaData(TType.STRUCT, Partition.class)));
+ put(O1, new FieldMetaData("o1", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRUCT)));
+ put(O2, new FieldMetaData("o2", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRUCT)));
+ put(O3, new FieldMetaData("o3", TFieldRequirementType.DEFAULT,
+ new FieldValueMetaData(TType.STRUCT)));
+ }});
+
+ static {
+ FieldMetaData.addStructMetaDataMap(get_partition_by_name_result.class, metaDataMap);
+ }
+
+ public get_partition_by_name_result() {
+ }
+
+ public get_partition_by_name_result(
+ Partition success,
+ MetaException o1,
+ UnknownTableException o2,
+ NoSuchObjectException o3)
+ {
+ this();
+ this.success = success;
+ this.o1 = o1;
+ this.o2 = o2;
+ this.o3 = o3;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public get_partition_by_name_result(get_partition_by_name_result other) {
+ if (other.isSetSuccess()) {
+ this.success = new Partition(other.success);
+ }
+ if (other.isSetO1()) {
+ this.o1 = new MetaException(other.o1);
+ }
+ if (other.isSetO2()) {
+ this.o2 = new UnknownTableException(other.o2);
+ }
+ if (other.isSetO3()) {
+ this.o3 = new NoSuchObjectException(other.o3);
+ }
+ }
+
+ @Override
+ public get_partition_by_name_result clone() {
+ return new get_partition_by_name_result(this);
+ }
+
+ public Partition getSuccess() {
+ return this.success;
+ }
+
+ public void setSuccess(Partition success) {
+ this.success = success;
+ }
+
+ public void unsetSuccess() {
+ this.success = null;
+ }
+
+ // Returns true if field success is set (has been asigned a value) and false otherwise
+ public boolean isSetSuccess() {
+ return this.success != null;
+ }
+
+ public MetaException getO1() {
+ return this.o1;
+ }
+
+ public void setO1(MetaException o1) {
+ this.o1 = o1;
+ }
+
+ public void unsetO1() {
+ this.o1 = null;
+ }
+
+ // Returns true if field o1 is set (has been asigned a value) and false otherwise
+ public boolean isSetO1() {
+ return this.o1 != null;
+ }
+
+ public UnknownTableException getO2() {
+ return this.o2;
+ }
+
+ public void setO2(UnknownTableException o2) {
+ this.o2 = o2;
+ }
+
+ public void unsetO2() {
+ this.o2 = null;
+ }
+
+ // Returns true if field o2 is set (has been asigned a value) and false otherwise
+ public boolean isSetO2() {
+ return this.o2 != null;
+ }
+
+ public NoSuchObjectException getO3() {
+ return this.o3;
+ }
+
+ public void setO3(NoSuchObjectException o3) {
+ this.o3 = o3;
+ }
+
+ public void unsetO3() {
+ this.o3 = null;
+ }
+
+ // Returns true if field o3 is set (has been asigned a value) and false otherwise
+ public boolean isSetO3() {
+ return this.o3 != null;
+ }
+
+ public void setFieldValue(int fieldID, Object value) {
+ switch (fieldID) {
+ case SUCCESS:
+ if (value == null) {
+ unsetSuccess();
+ } else {
+ setSuccess((Partition)value);
+ }
+ break;
+
+ case O1:
+ if (value == null) {
+ unsetO1();
+ } else {
+ setO1((MetaException)value);
+ }
+ break;
+
+ case O2:
+ if (value == null) {
+ unsetO2();
+ } else {
+ setO2((UnknownTableException)value);
+ }
+ break;
+
+ case O3:
+ if (value == null) {
+ unsetO3();
+ } else {
+ setO3((NoSuchObjectException)value);
+ }
+ break;
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ public Object getFieldValue(int fieldID) {
+ switch (fieldID) {
+ case SUCCESS:
+ return getSuccess();
+
+ case O1:
+ return getO1();
+
+ case O2:
+ return getO2();
+
+ case O3:
+ return getO3();
+
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ // Returns true if field corresponding to fieldID is set (has been asigned a value) and false otherwise
+ public boolean isSet(int fieldID) {
+ switch (fieldID) {
+ case SUCCESS:
+ return isSetSuccess();
+ case O1:
+ return isSetO1();
+ case O2:
+ return isSetO2();
+ case O3:
+ return isSetO3();
+ default:
+ throw new IllegalArgumentException("Field " + fieldID + " doesn't exist!");
+ }
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof get_partition_by_name_result)
+ return this.equals((get_partition_by_name_result)that);
+ return false;
+ }
+
+ public boolean equals(get_partition_by_name_result that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_success = true && this.isSetSuccess();
+ boolean that_present_success = true && that.isSetSuccess();
+ if (this_present_success || that_present_success) {
+ if (!(this_present_success && that_present_success))
+ return false;
+ if (!this.success.equals(that.success))
+ return false;
+ }
+
+ boolean this_present_o1 = true && this.isSetO1();
+ boolean that_present_o1 = true && that.isSetO1();
+ if (this_present_o1 || that_present_o1) {
+ if (!(this_present_o1 && that_present_o1))
+ return false;
+ if (!this.o1.equals(that.o1))
+ return false;
+ }
+
+ boolean this_present_o2 = true && this.isSetO2();
+ boolean that_present_o2 = true && that.isSetO2();
+ if (this_present_o2 || that_present_o2) {
+ if (!(this_present_o2 && that_present_o2))
+ return false;
+ if (!this.o2.equals(that.o2))
+ return false;
+ }
+
+ boolean this_present_o3 = true && this.isSetO3();
+ boolean that_present_o3 = true && that.isSetO3();
+ if (this_present_o3 || that_present_o3) {
+ if (!(this_present_o3 && that_present_o3))
+ return false;
+ if (!this.o3.equals(that.o3))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public void read(TProtocol iprot) throws TException {
+ TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == TType.STOP) {
+ break;
+ }
+ switch (field.id)
+ {
+ case SUCCESS:
+ if (field.type == TType.STRUCT) {
+ this.success = new Partition();
+ this.success.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case O1:
+ if (field.type == TType.STRUCT) {
+ this.o1 = new MetaException();
+ this.o1.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case O2:
+ if (field.type == TType.STRUCT) {
+ this.o2 = new UnknownTableException();
+ this.o2.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case O3:
+ if (field.type == TType.STRUCT) {
+ this.o3 = new NoSuchObjectException();
+ this.o3.read(iprot);
+ } else {
+ TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ TProtocolUtil.skip(iprot, field.type);
+ break;
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ validate();
+ }
+
+ public void write(TProtocol oprot) throws TException {
+ oprot.writeStructBegin(STRUCT_DESC);
+
+ if (this.isSetSuccess()) {
+ oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+ this.success.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetO1()) {
+ oprot.writeFieldBegin(O1_FIELD_DESC);
+ this.o1.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetO2()) {
+ oprot.writeFieldBegin(O2_FIELD_DESC);
+ this.o2.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetO3()) {
+ oprot.writeFieldBegin(O3_FIELD_DESC);
+ this.o3.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("get_partition_by_name_result(");
+ boolean first = true;
+
+ sb.append("success:");
+ if (this.success == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.success);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o1:");
+ if (this.o1 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o1);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o2:");
+ if (this.o2 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o2);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("o3:");
+ if (this.o3 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.o3);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws TException {
+ // check for required fields
+ // check that fields of type enum have valid values
+ }
+
+ }
+
public static class get_partitions_args implements TBase, java.io.Serializable, Cloneable {
private static final TStruct STRUCT_DESC = new TStruct("get_partitions_args");
private static final TField DB_NAME_FIELD_DESC = new TField("db_name", TType.STRING, (short)1);
Modified: hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php (original)
+++ hadoop/hive/trunk/metastore/src/gen-php/ThriftHiveMetastore.php Fri Feb 5 03:11:42 2010
@@ -29,6 +29,7 @@
public function append_partition($db_name, $tbl_name, $part_vals);
public function drop_partition($db_name, $tbl_name, $part_vals, $deleteData);
public function get_partition($db_name, $tbl_name, $part_vals);
+ public function get_partition_by_name($db_name, $tbl_name, $part_name);
public function get_partitions($db_name, $tbl_name, $max_parts);
public function get_partition_names($db_name, $tbl_name, $max_parts);
public function alter_partition($db_name, $tbl_name, $new_part);
@@ -1129,6 +1130,68 @@
throw new Exception("get_partition failed: unknown result");
}
+ public function get_partition_by_name($db_name, $tbl_name, $part_name)
+ {
+ $this->send_get_partition_by_name($db_name, $tbl_name, $part_name);
+ return $this->recv_get_partition_by_name();
+ }
+
+ public function send_get_partition_by_name($db_name, $tbl_name, $part_name)
+ {
+ $args = new metastore_ThriftHiveMetastore_get_partition_by_name_args();
+ $args->db_name = $db_name;
+ $args->tbl_name = $tbl_name;
+ $args->part_name = $part_name;
+ $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'get_partition_by_name', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('get_partition_by_name', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
+
+ public function recv_get_partition_by_name()
+ {
+ $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'metastore_ThriftHiveMetastore_get_partition_by_name_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
+ }
+ $result = new metastore_ThriftHiveMetastore_get_partition_by_name_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->success !== null) {
+ return $result->success;
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o2 !== null) {
+ throw $result->o2;
+ }
+ if ($result->o3 !== null) {
+ throw $result->o3;
+ }
+ throw new Exception("get_partition_by_name failed: unknown result");
+ }
+
public function get_partitions($db_name, $tbl_name, $max_parts)
{
$this->send_get_partitions($db_name, $tbl_name, $max_parts);
@@ -5441,6 +5504,261 @@
}
+class metastore_ThriftHiveMetastore_get_partition_by_name_args {
+ static $_TSPEC;
+
+ public $db_name = null;
+ public $tbl_name = null;
+ public $part_name = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'db_name',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'tbl_name',
+ 'type' => TType::STRING,
+ ),
+ 3 => array(
+ 'var' => 'part_name',
+ 'type' => TType::STRING,
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['db_name'])) {
+ $this->db_name = $vals['db_name'];
+ }
+ if (isset($vals['tbl_name'])) {
+ $this->tbl_name = $vals['tbl_name'];
+ }
+ if (isset($vals['part_name'])) {
+ $this->part_name = $vals['part_name'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_partition_by_name_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->db_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->tbl_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->part_name);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partition_by_name_args');
+ if ($this->db_name !== null) {
+ $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1);
+ $xfer += $output->writeString($this->db_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->tbl_name !== null) {
+ $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2);
+ $xfer += $output->writeString($this->tbl_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->part_name !== null) {
+ $xfer += $output->writeFieldBegin('part_name', TType::STRING, 3);
+ $xfer += $output->writeString($this->part_name);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class metastore_ThriftHiveMetastore_get_partition_by_name_result {
+ static $_TSPEC;
+
+ public $success = null;
+ public $o1 = null;
+ public $o2 = null;
+ public $o3 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_Partition',
+ ),
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_MetaException',
+ ),
+ 2 => array(
+ 'var' => 'o2',
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_UnknownTableException',
+ ),
+ 3 => array(
+ 'var' => 'o3',
+ 'type' => TType::STRUCT,
+ 'class' => 'metastore_NoSuchObjectException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o2'])) {
+ $this->o2 = $vals['o2'];
+ }
+ if (isset($vals['o3'])) {
+ $this->o3 = $vals['o3'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_get_partition_by_name_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 0:
+ if ($ftype == TType::STRUCT) {
+ $this->success = new metastore_Partition();
+ $xfer += $this->success->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new metastore_MetaException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o2 = new metastore_UnknownTableException();
+ $xfer += $this->o2->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRUCT) {
+ $this->o3 = new metastore_NoSuchObjectException();
+ $xfer += $this->o3->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partition_by_name_result');
+ if ($this->success !== null) {
+ if (!is_object($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+ $xfer += $this->success->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o2 !== null) {
+ $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+ $xfer += $this->o2->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o3 !== null) {
+ $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+ $xfer += $this->o3->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
class metastore_ThriftHiveMetastore_get_partitions_args {
static $_TSPEC;
Modified: hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote (original)
+++ hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore-remote Fri Feb 5 03:11:42 2010
@@ -40,6 +40,7 @@
print ' Partition append_partition(string db_name, string tbl_name, part_vals)'
print ' bool drop_partition(string db_name, string tbl_name, part_vals, bool deleteData)'
print ' Partition get_partition(string db_name, string tbl_name, part_vals)'
+ print ' Partition get_partition_by_name(string db_name, string tbl_name, string part_name)'
print ' get_partitions(string db_name, string tbl_name, i16 max_parts)'
print ' get_partition_names(string db_name, string tbl_name, i16 max_parts)'
print ' void alter_partition(string db_name, string tbl_name, Partition new_part)'
@@ -206,6 +207,12 @@
sys.exit(1)
pp.pprint(client.get_partition(args[0],args[1],eval(args[2]),))
+elif cmd == 'get_partition_by_name':
+ if len(args) != 3:
+ print 'get_partition_by_name requires 3 args'
+ sys.exit(1)
+ pp.pprint(client.get_partition_by_name(args[0],args[1],args[2],))
+
elif cmd == 'get_partitions':
if len(args) != 3:
print 'get_partitions requires 3 args'
Modified: hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py (original)
+++ hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py Fri Feb 5 03:11:42 2010
@@ -165,6 +165,15 @@
"""
pass
+ def get_partition_by_name(self, db_name, tbl_name, part_name):
+ """
+ Parameters:
+ - db_name
+ - tbl_name
+ - part_name
+ """
+ pass
+
def get_partitions(self, db_name, tbl_name, max_parts):
"""
Parameters:
@@ -875,6 +884,46 @@
raise result.o1
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition failed: unknown result");
+ def get_partition_by_name(self, db_name, tbl_name, part_name):
+ """
+ Parameters:
+ - db_name
+ - tbl_name
+ - part_name
+ """
+ self.send_get_partition_by_name(db_name, tbl_name, part_name)
+ return self.recv_get_partition_by_name()
+
+ def send_get_partition_by_name(self, db_name, tbl_name, part_name):
+ self._oprot.writeMessageBegin('get_partition_by_name', TMessageType.CALL, self._seqid)
+ args = get_partition_by_name_args()
+ args.db_name = db_name
+ args.tbl_name = tbl_name
+ args.part_name = part_name
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get_partition_by_name(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_partition_by_name_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.o1 != None:
+ raise result.o1
+ if result.o2 != None:
+ raise result.o2
+ if result.o3 != None:
+ raise result.o3
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_by_name failed: unknown result");
+
def get_partitions(self, db_name, tbl_name, max_parts):
"""
Parameters:
@@ -1042,6 +1091,7 @@
self._processMap["append_partition"] = Processor.process_append_partition
self._processMap["drop_partition"] = Processor.process_drop_partition
self._processMap["get_partition"] = Processor.process_get_partition
+ self._processMap["get_partition_by_name"] = Processor.process_get_partition_by_name
self._processMap["get_partitions"] = Processor.process_get_partitions
self._processMap["get_partition_names"] = Processor.process_get_partition_names
self._processMap["alter_partition"] = Processor.process_alter_partition
@@ -1366,6 +1416,24 @@
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_get_partition_by_name(self, seqid, iprot, oprot):
+ args = get_partition_by_name_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_partition_by_name_result()
+ try:
+ result.success = self._handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name)
+ except MetaException, o1:
+ result.o1 = o1
+ except UnknownTableException, o2:
+ result.o2 = o2
+ except NoSuchObjectException, o3:
+ result.o3 = o3
+ oprot.writeMessageBegin("get_partition_by_name", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_get_partitions(self, seqid, iprot, oprot):
args = get_partitions_args()
args.read(iprot)
@@ -4253,6 +4321,181 @@
def __ne__(self, other):
return not (self == other)
+class get_partition_by_name_args:
+ """
+ Attributes:
+ - db_name
+ - tbl_name
+ - part_name
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'db_name', None, None, ), # 1
+ (2, TType.STRING, 'tbl_name', None, None, ), # 2
+ (3, TType.STRING, 'part_name', None, None, ), # 3
+ )
+
+ def __init__(self, db_name=None, tbl_name=None, part_name=None,):
+ self.db_name = db_name
+ self.tbl_name = tbl_name
+ self.part_name = part_name
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.db_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.tbl_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.part_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_partition_by_name_args')
+ if self.db_name != None:
+ oprot.writeFieldBegin('db_name', TType.STRING, 1)
+ oprot.writeString(self.db_name)
+ oprot.writeFieldEnd()
+ if self.tbl_name != None:
+ oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
+ oprot.writeString(self.tbl_name)
+ oprot.writeFieldEnd()
+ if self.part_name != None:
+ oprot.writeFieldBegin('part_name', TType.STRING, 3)
+ oprot.writeString(self.part_name)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class get_partition_by_name_result:
+ """
+ Attributes:
+ - success
+ - o1
+ - o2
+ - o3
+ """
+
+ thrift_spec = (
+ (0, TType.STRUCT, 'success', (Partition, Partition.thrift_spec), None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o2', (UnknownTableException, UnknownTableException.thrift_spec), None, ), # 2
+ (3, TType.STRUCT, 'o3', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 3
+ )
+
+ def __init__(self, success=None, o1=None, o2=None, o3=None,):
+ self.success = success
+ self.o1 = o1
+ self.o2 = o2
+ self.o3 = o3
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = Partition()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o2 = UnknownTableException()
+ self.o2.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.o3 = NoSuchObjectException()
+ self.o3.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_partition_by_name_result')
+ if self.success != None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o1 != None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o2 != None:
+ oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+ self.o2.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o3 != None:
+ oprot.writeFieldBegin('o3', TType.STRUCT, 3)
+ self.o3.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class get_partitions_args:
"""
Attributes:
Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Fri Feb 5 03:11:42 2010
@@ -18,6 +18,8 @@
package org.apache.hadoop.hive.metastore;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
@@ -802,6 +804,41 @@
}
return toReturn;
}
+
+ public Partition get_partition_by_name(String db_name, String tbl_name,
+ String part_name) throws MetaException, UnknownTableException, NoSuchObjectException, TException {
+ incrementCounter("get_partition_by_name");
+ logStartFunction("get_partition_by_name: db=" + db_name + " tbl_name="
+ + tbl_name + " part_name=" + part_name);
+
+ // Unescape the partition name
+ LinkedHashMap<String, String> hm = Warehouse.makeSpecFromName(part_name);
+
+ // getPartition expects partition values in a list. use info from the
+ // table to put the partition column values in order
+ Table t = getMS().getTable(db_name, tbl_name);
+ if (t == null) {
+ throw new UnknownTableException(db_name + "." + tbl_name
+ + " table not found");
+ }
+
+ List<String> partVals = new ArrayList<String>();
+ for(FieldSchema field : t.getPartitionKeys()) {
+ String key = field.getName();
+ String val = hm.get(key);
+ if(val == null) {
+ throw new NoSuchObjectException("incomplete partition name - missing " + key);
+ }
+ partVals.add(val);
+ }
+ Partition p = getMS().getPartition(db_name, tbl_name, partVals);
+
+ if(p == null) {
+ throw new NoSuchObjectException(db_name + "." + tbl_name
+ + " partition (" + part_name + ") not found");
+ }
+ return p;
+ }
}
/**
Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Fri Feb 5 03:11:42 2010
@@ -544,4 +544,9 @@
return client.get_config_value(name, defaultValue);
}
+ public Partition getPartitionByName(String db, String tableName, String partName)
+ throws MetaException, TException, UnknownTableException, NoSuchObjectException {
+ return client.get_partition_by_name(db, tableName, partName);
+ }
+
}
Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Fri Feb 5 03:11:42 2010
@@ -174,6 +174,19 @@
List<String> partVals) throws MetaException, TException;
/**
+ * @param dbName
+ * @param tblName
+ * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'
+ * @return the partition object
+ * @throws MetaException
+ * @throws TException
+ * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String,
+ * java.lang.String, java.util.List)
+ */
+ public Partition getPartitionByName(String dbName, String tblName,
+ String name) throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+
+ /**
* @param tbl_name
* @param db_name
* @param max_parts
Modified: hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=906788&r1=906787&r2=906788&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Fri Feb 5 03:11:42 2010
@@ -32,6 +32,7 @@
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
@@ -88,7 +89,7 @@
String tblName = "comptbl";
String typeName = "Person";
List<String> vals = new ArrayList<String>(2);
- vals.add("2008-07-01");
+ vals.add("2008-07-01 14:13:12");
vals.add("14");
client.dropTable(dbName, tblName);
@@ -149,6 +150,19 @@
Partition part2 = client.getPartition(dbName, tblName, part.getValues());
assertTrue("Partitions are not same", part.equals(part2));
+ String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
+ Partition part3 = client.getPartitionByName(dbName, tblName, partName);
+ assertTrue("Partitions are not the same", part.equals(part2));
+
+ boolean exceptionThrown = false;
+ try {
+ String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
+ client.getPartitionByName(dbName, tblName, badPartName);
+ } catch(NoSuchObjectException e) {
+ exceptionThrown = true;
+ }
+ assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
+
FileSystem fs = FileSystem.get(hiveConf);
Path partPath = new Path(part2.getSd().getLocation());