You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by dj...@apache.org on 2018/02/08 08:48:32 UTC
[1/7] hive git commit: HIVE-18350 : load data should rename files
consistent with insert statements. (Deepak Jaiswal,
reviewed by Sergey Shelukhin and Ashutosh Chauhan)
Repository: hive
Updated Branches:
refs/heads/master 1faadb074 -> 6e9b63e48
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 4c09bc8..44a2c83 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -132,6 +132,16 @@ struct EventRequestType {
extern const std::map<int, const char*> _EventRequestType_VALUES_TO_NAMES;
+struct BucketingVersion {
+ enum type {
+ INVALID_BUCKETING = 0,
+ JAVA_BUCKETING = 1,
+ MURMUR_BUCKETING = 2
+ };
+};
+
+extern const std::map<int, const char*> _BucketingVersion_VALUES_TO_NAMES;
+
struct FunctionType {
enum type {
JAVA = 1
@@ -2372,7 +2382,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj)
}
typedef struct _Table__isset {
- _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false) {}
+ _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), bucketingVersion(true), loadInBucketedTable(true) {}
bool tableName :1;
bool dbName :1;
bool owner :1;
@@ -2389,6 +2399,8 @@ typedef struct _Table__isset {
bool temporary :1;
bool rewriteEnabled :1;
bool creationMetadata :1;
+ bool bucketingVersion :1;
+ bool loadInBucketedTable :1;
} _Table__isset;
class Table {
@@ -2396,7 +2408,9 @@ class Table {
Table(const Table&);
Table& operator=(const Table&);
- Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0) {
+ Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), bucketingVersion((BucketingVersion::type)1), loadInBucketedTable(false) {
+ bucketingVersion = (BucketingVersion::type)1;
+
}
virtual ~Table() throw();
@@ -2416,6 +2430,8 @@ class Table {
bool temporary;
bool rewriteEnabled;
CreationMetadata creationMetadata;
+ BucketingVersion::type bucketingVersion;
+ bool loadInBucketedTable;
_Table__isset __isset;
@@ -2451,6 +2467,10 @@ class Table {
void __set_creationMetadata(const CreationMetadata& val);
+ void __set_bucketingVersion(const BucketingVersion::type val);
+
+ void __set_loadInBucketedTable(const bool val);
+
bool operator == (const Table & rhs) const
{
if (!(tableName == rhs.tableName))
@@ -2493,6 +2513,14 @@ class Table {
return false;
else if (__isset.creationMetadata && !(creationMetadata == rhs.creationMetadata))
return false;
+ if (__isset.bucketingVersion != rhs.__isset.bucketingVersion)
+ return false;
+ else if (__isset.bucketingVersion && !(bucketingVersion == rhs.bucketingVersion))
+ return false;
+ if (__isset.loadInBucketedTable != rhs.__isset.loadInBucketedTable)
+ return false;
+ else if (__isset.loadInBucketedTable && !(loadInBucketedTable == rhs.loadInBucketedTable))
+ return false;
return true;
}
bool operator != (const Table &rhs) const {
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BucketingVersion.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BucketingVersion.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BucketingVersion.java
new file mode 100644
index 0000000..b7de161
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BucketingVersion.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum BucketingVersion implements org.apache.thrift.TEnum {
+ INVALID_BUCKETING(0),
+ JAVA_BUCKETING(1),
+ MURMUR_BUCKETING(2);
+
+ private final int value;
+
+ private BucketingVersion(int value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the integer value of this enum value, as defined in the Thrift IDL.
+ */
+ public int getValue() {
+ return value;
+ }
+
+ /**
+ * Find a the enum type by its integer value, as defined in the Thrift IDL.
+ * @return null if the value is not found.
+ */
+ public static BucketingVersion findByValue(int value) {
+ switch (value) {
+ case 0:
+ return INVALID_BUCKETING;
+ case 1:
+ return JAVA_BUCKETING;
+ case 2:
+ return MURMUR_BUCKETING;
+ default:
+ return null;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
index a132e5e..068e542 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
@@ -54,6 +54,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14);
private static final org.apache.thrift.protocol.TField REWRITE_ENABLED_FIELD_DESC = new org.apache.thrift.protocol.TField("rewriteEnabled", org.apache.thrift.protocol.TType.BOOL, (short)15);
private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creationMetadata", org.apache.thrift.protocol.TType.STRUCT, (short)16);
+ private static final org.apache.thrift.protocol.TField BUCKETING_VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("bucketingVersion", org.apache.thrift.protocol.TType.I32, (short)17);
+ private static final org.apache.thrift.protocol.TField LOAD_IN_BUCKETED_TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("loadInBucketedTable", org.apache.thrift.protocol.TType.BOOL, (short)18);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -77,6 +79,8 @@ import org.slf4j.LoggerFactory;
private boolean temporary; // optional
private boolean rewriteEnabled; // optional
private CreationMetadata creationMetadata; // optional
+ private BucketingVersion bucketingVersion; // optional
+ private boolean loadInBucketedTable; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -95,7 +99,13 @@ import org.slf4j.LoggerFactory;
PRIVILEGES((short)13, "privileges"),
TEMPORARY((short)14, "temporary"),
REWRITE_ENABLED((short)15, "rewriteEnabled"),
- CREATION_METADATA((short)16, "creationMetadata");
+ CREATION_METADATA((short)16, "creationMetadata"),
+ /**
+ *
+ * @see BucketingVersion
+ */
+ BUCKETING_VERSION((short)17, "bucketingVersion"),
+ LOAD_IN_BUCKETED_TABLE((short)18, "loadInBucketedTable");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -142,6 +152,10 @@ import org.slf4j.LoggerFactory;
return REWRITE_ENABLED;
case 16: // CREATION_METADATA
return CREATION_METADATA;
+ case 17: // BUCKETING_VERSION
+ return BUCKETING_VERSION;
+ case 18: // LOAD_IN_BUCKETED_TABLE
+ return LOAD_IN_BUCKETED_TABLE;
default:
return null;
}
@@ -187,8 +201,9 @@ import org.slf4j.LoggerFactory;
private static final int __RETENTION_ISSET_ID = 2;
private static final int __TEMPORARY_ISSET_ID = 3;
private static final int __REWRITEENABLED_ISSET_ID = 4;
+ private static final int __LOADINBUCKETEDTABLE_ISSET_ID = 5;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA};
+ private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.BUCKETING_VERSION,_Fields.LOAD_IN_BUCKETED_TABLE};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -227,6 +242,10 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
tmpMap.put(_Fields.CREATION_METADATA, new org.apache.thrift.meta_data.FieldMetaData("creationMetadata", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT , "CreationMetadata")));
+ tmpMap.put(_Fields.BUCKETING_VERSION, new org.apache.thrift.meta_data.FieldMetaData("bucketingVersion", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, BucketingVersion.class)));
+ tmpMap.put(_Fields.LOAD_IN_BUCKETED_TABLE, new org.apache.thrift.meta_data.FieldMetaData("loadInBucketedTable", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap);
}
@@ -234,6 +253,10 @@ import org.slf4j.LoggerFactory;
public Table() {
this.temporary = false;
+ this.bucketingVersion = org.apache.hadoop.hive.metastore.api.BucketingVersion.JAVA_BUCKETING;
+
+ this.loadInBucketedTable = false;
+
}
public Table(
@@ -316,6 +339,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetCreationMetadata()) {
this.creationMetadata = other.creationMetadata;
}
+ if (other.isSetBucketingVersion()) {
+ this.bucketingVersion = other.bucketingVersion;
+ }
+ this.loadInBucketedTable = other.loadInBucketedTable;
}
public Table deepCopy() {
@@ -345,6 +372,10 @@ import org.slf4j.LoggerFactory;
setRewriteEnabledIsSet(false);
this.rewriteEnabled = false;
this.creationMetadata = null;
+ this.bucketingVersion = org.apache.hadoop.hive.metastore.api.BucketingVersion.JAVA_BUCKETING;
+
+ this.loadInBucketedTable = false;
+
}
public String getTableName() {
@@ -736,6 +767,59 @@ import org.slf4j.LoggerFactory;
}
}
+ /**
+ *
+ * @see BucketingVersion
+ */
+ public BucketingVersion getBucketingVersion() {
+ return this.bucketingVersion;
+ }
+
+ /**
+ *
+ * @see BucketingVersion
+ */
+ public void setBucketingVersion(BucketingVersion bucketingVersion) {
+ this.bucketingVersion = bucketingVersion;
+ }
+
+ public void unsetBucketingVersion() {
+ this.bucketingVersion = null;
+ }
+
+ /** Returns true if field bucketingVersion is set (has been assigned a value) and false otherwise */
+ public boolean isSetBucketingVersion() {
+ return this.bucketingVersion != null;
+ }
+
+ public void setBucketingVersionIsSet(boolean value) {
+ if (!value) {
+ this.bucketingVersion = null;
+ }
+ }
+
+ public boolean isLoadInBucketedTable() {
+ return this.loadInBucketedTable;
+ }
+
+ public void setLoadInBucketedTable(boolean loadInBucketedTable) {
+ this.loadInBucketedTable = loadInBucketedTable;
+ setLoadInBucketedTableIsSet(true);
+ }
+
+ public void unsetLoadInBucketedTable() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOADINBUCKETEDTABLE_ISSET_ID);
+ }
+
+ /** Returns true if field loadInBucketedTable is set (has been assigned a value) and false otherwise */
+ public boolean isSetLoadInBucketedTable() {
+ return EncodingUtils.testBit(__isset_bitfield, __LOADINBUCKETEDTABLE_ISSET_ID);
+ }
+
+ public void setLoadInBucketedTableIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOADINBUCKETEDTABLE_ISSET_ID, value);
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case TABLE_NAME:
@@ -866,6 +950,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case BUCKETING_VERSION:
+ if (value == null) {
+ unsetBucketingVersion();
+ } else {
+ setBucketingVersion((BucketingVersion)value);
+ }
+ break;
+
+ case LOAD_IN_BUCKETED_TABLE:
+ if (value == null) {
+ unsetLoadInBucketedTable();
+ } else {
+ setLoadInBucketedTable((Boolean)value);
+ }
+ break;
+
}
}
@@ -919,6 +1019,12 @@ import org.slf4j.LoggerFactory;
case CREATION_METADATA:
return getCreationMetadata();
+ case BUCKETING_VERSION:
+ return getBucketingVersion();
+
+ case LOAD_IN_BUCKETED_TABLE:
+ return isLoadInBucketedTable();
+
}
throw new IllegalStateException();
}
@@ -962,6 +1068,10 @@ import org.slf4j.LoggerFactory;
return isSetRewriteEnabled();
case CREATION_METADATA:
return isSetCreationMetadata();
+ case BUCKETING_VERSION:
+ return isSetBucketingVersion();
+ case LOAD_IN_BUCKETED_TABLE:
+ return isSetLoadInBucketedTable();
}
throw new IllegalStateException();
}
@@ -1123,6 +1233,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_bucketingVersion = true && this.isSetBucketingVersion();
+ boolean that_present_bucketingVersion = true && that.isSetBucketingVersion();
+ if (this_present_bucketingVersion || that_present_bucketingVersion) {
+ if (!(this_present_bucketingVersion && that_present_bucketingVersion))
+ return false;
+ if (!this.bucketingVersion.equals(that.bucketingVersion))
+ return false;
+ }
+
+ boolean this_present_loadInBucketedTable = true && this.isSetLoadInBucketedTable();
+ boolean that_present_loadInBucketedTable = true && that.isSetLoadInBucketedTable();
+ if (this_present_loadInBucketedTable || that_present_loadInBucketedTable) {
+ if (!(this_present_loadInBucketedTable && that_present_loadInBucketedTable))
+ return false;
+ if (this.loadInBucketedTable != that.loadInBucketedTable)
+ return false;
+ }
+
return true;
}
@@ -1210,6 +1338,16 @@ import org.slf4j.LoggerFactory;
if (present_creationMetadata)
list.add(creationMetadata);
+ boolean present_bucketingVersion = true && (isSetBucketingVersion());
+ list.add(present_bucketingVersion);
+ if (present_bucketingVersion)
+ list.add(bucketingVersion.getValue());
+
+ boolean present_loadInBucketedTable = true && (isSetLoadInBucketedTable());
+ list.add(present_loadInBucketedTable);
+ if (present_loadInBucketedTable)
+ list.add(loadInBucketedTable);
+
return list.hashCode();
}
@@ -1381,6 +1519,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetBucketingVersion()).compareTo(other.isSetBucketingVersion());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetBucketingVersion()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bucketingVersion, other.bucketingVersion);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetLoadInBucketedTable()).compareTo(other.isSetLoadInBucketedTable());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetLoadInBucketedTable()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.loadInBucketedTable, other.loadInBucketedTable);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -1516,6 +1674,22 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetBucketingVersion()) {
+ if (!first) sb.append(", ");
+ sb.append("bucketingVersion:");
+ if (this.bucketingVersion == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.bucketingVersion);
+ }
+ first = false;
+ }
+ if (isSetLoadInBucketedTable()) {
+ if (!first) sb.append(", ");
+ sb.append("loadInBucketedTable:");
+ sb.append(this.loadInBucketedTable);
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -1721,6 +1895,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 17: // BUCKETING_VERSION
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.bucketingVersion = org.apache.hadoop.hive.metastore.api.BucketingVersion.findByValue(iprot.readI32());
+ struct.setBucketingVersionIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 18: // LOAD_IN_BUCKETED_TABLE
+ if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+ struct.loadInBucketedTable = iprot.readBool();
+ struct.setLoadInBucketedTableIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -1827,6 +2017,18 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.bucketingVersion != null) {
+ if (struct.isSetBucketingVersion()) {
+ oprot.writeFieldBegin(BUCKETING_VERSION_FIELD_DESC);
+ oprot.writeI32(struct.bucketingVersion.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isSetLoadInBucketedTable()) {
+ oprot.writeFieldBegin(LOAD_IN_BUCKETED_TABLE_FIELD_DESC);
+ oprot.writeBool(struct.loadInBucketedTable);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -1893,7 +2095,13 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCreationMetadata()) {
optionals.set(15);
}
- oprot.writeBitSet(optionals, 16);
+ if (struct.isSetBucketingVersion()) {
+ optionals.set(16);
+ }
+ if (struct.isSetLoadInBucketedTable()) {
+ optionals.set(17);
+ }
+ oprot.writeBitSet(optionals, 18);
if (struct.isSetTableName()) {
oprot.writeString(struct.tableName);
}
@@ -1955,12 +2163,18 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCreationMetadata()) {
struct.creationMetadata.write(oprot);
}
+ if (struct.isSetBucketingVersion()) {
+ oprot.writeI32(struct.bucketingVersion.getValue());
+ }
+ if (struct.isSetLoadInBucketedTable()) {
+ oprot.writeBool(struct.loadInBucketedTable);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(16);
+ BitSet incoming = iprot.readBitSet(18);
if (incoming.get(0)) {
struct.tableName = iprot.readString();
struct.setTableNameIsSet(true);
@@ -2049,6 +2263,14 @@ import org.slf4j.LoggerFactory;
struct.creationMetadata.read(iprot);
struct.setCreationMetadataIsSet(true);
}
+ if (incoming.get(16)) {
+ struct.bucketingVersion = org.apache.hadoop.hive.metastore.api.BucketingVersion.findByValue(iprot.readI32());
+ struct.setBucketingVersionIsSet(true);
+ }
+ if (incoming.get(17)) {
+ struct.loadInBucketedTable = iprot.readBool();
+ struct.setLoadInBucketedTableIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
index a5b578e..687911e 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -142,6 +142,17 @@ final class EventRequestType {
);
}
+final class BucketingVersion {
+ const INVALID_BUCKETING = 0;
+ const JAVA_BUCKETING = 1;
+ const MURMUR_BUCKETING = 2;
+ static public $__names = array(
+ 0 => 'INVALID_BUCKETING',
+ 1 => 'JAVA_BUCKETING',
+ 2 => 'MURMUR_BUCKETING',
+ );
+}
+
final class FunctionType {
const JAVA = 1;
static public $__names = array(
@@ -5042,6 +5053,14 @@ class Table {
* @var \metastore\CreationMetadata
*/
public $creationMetadata = null;
+ /**
+ * @var int
+ */
+ public $bucketingVersion = 1;
+ /**
+ * @var bool
+ */
+ public $loadInBucketedTable = false;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -5126,6 +5145,14 @@ class Table {
'type' => TType::STRUCT,
'class' => '\metastore\CreationMetadata',
),
+ 17 => array(
+ 'var' => 'bucketingVersion',
+ 'type' => TType::I32,
+ ),
+ 18 => array(
+ 'var' => 'loadInBucketedTable',
+ 'type' => TType::BOOL,
+ ),
);
}
if (is_array($vals)) {
@@ -5177,6 +5204,12 @@ class Table {
if (isset($vals['creationMetadata'])) {
$this->creationMetadata = $vals['creationMetadata'];
}
+ if (isset($vals['bucketingVersion'])) {
+ $this->bucketingVersion = $vals['bucketingVersion'];
+ }
+ if (isset($vals['loadInBucketedTable'])) {
+ $this->loadInBucketedTable = $vals['loadInBucketedTable'];
+ }
}
}
@@ -5338,6 +5371,20 @@ class Table {
$xfer += $input->skip($ftype);
}
break;
+ case 17:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->bucketingVersion);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 18:
+ if ($ftype == TType::BOOL) {
+ $xfer += $input->readBool($this->loadInBucketedTable);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -5465,6 +5512,16 @@ class Table {
$xfer += $this->creationMetadata->write($output);
$xfer += $output->writeFieldEnd();
}
+ if ($this->bucketingVersion !== null) {
+ $xfer += $output->writeFieldBegin('bucketingVersion', TType::I32, 17);
+ $xfer += $output->writeI32($this->bucketingVersion);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->loadInBucketedTable !== null) {
+ $xfer += $output->writeFieldBegin('loadInBucketedTable', TType::BOOL, 18);
+ $xfer += $output->writeBool($this->loadInBucketedTable);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 5598859..86c1937 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -211,6 +211,23 @@ class EventRequestType:
"DELETE": 3,
}
+class BucketingVersion:
+ INVALID_BUCKETING = 0
+ JAVA_BUCKETING = 1
+ MURMUR_BUCKETING = 2
+
+ _VALUES_TO_NAMES = {
+ 0: "INVALID_BUCKETING",
+ 1: "JAVA_BUCKETING",
+ 2: "MURMUR_BUCKETING",
+ }
+
+ _NAMES_TO_VALUES = {
+ "INVALID_BUCKETING": 0,
+ "JAVA_BUCKETING": 1,
+ "MURMUR_BUCKETING": 2,
+ }
+
class FunctionType:
JAVA = 1
@@ -3468,6 +3485,8 @@ class Table:
- temporary
- rewriteEnabled
- creationMetadata
+ - bucketingVersion
+ - loadInBucketedTable
"""
thrift_spec = (
@@ -3488,9 +3507,11 @@ class Table:
(14, TType.BOOL, 'temporary', None, False, ), # 14
(15, TType.BOOL, 'rewriteEnabled', None, None, ), # 15
(16, TType.STRUCT, 'creationMetadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 16
+ (17, TType.I32, 'bucketingVersion', None, 1, ), # 17
+ (18, TType.BOOL, 'loadInBucketedTable', None, False, ), # 18
)
- def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None,):
+ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, bucketingVersion=thrift_spec[17][4], loadInBucketedTable=thrift_spec[18][4],):
self.tableName = tableName
self.dbName = dbName
self.owner = owner
@@ -3507,6 +3528,8 @@ class Table:
self.temporary = temporary
self.rewriteEnabled = rewriteEnabled
self.creationMetadata = creationMetadata
+ self.bucketingVersion = bucketingVersion
+ self.loadInBucketedTable = loadInBucketedTable
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -3612,6 +3635,16 @@ class Table:
self.creationMetadata.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 17:
+ if ftype == TType.I32:
+ self.bucketingVersion = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ elif fid == 18:
+ if ftype == TType.BOOL:
+ self.loadInBucketedTable = iprot.readBool()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -3693,6 +3726,14 @@ class Table:
oprot.writeFieldBegin('creationMetadata', TType.STRUCT, 16)
self.creationMetadata.write(oprot)
oprot.writeFieldEnd()
+ if self.bucketingVersion is not None:
+ oprot.writeFieldBegin('bucketingVersion', TType.I32, 17)
+ oprot.writeI32(self.bucketingVersion)
+ oprot.writeFieldEnd()
+ if self.loadInBucketedTable is not None:
+ oprot.writeFieldBegin('loadInBucketedTable', TType.BOOL, 18)
+ oprot.writeBool(self.loadInBucketedTable)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -3718,6 +3759,8 @@ class Table:
value = (value * 31) ^ hash(self.temporary)
value = (value * 31) ^ hash(self.rewriteEnabled)
value = (value * 31) ^ hash(self.creationMetadata)
+ value = (value * 31) ^ hash(self.bucketingVersion)
+ value = (value * 31) ^ hash(self.loadInBucketedTable)
return value
def __repr__(self):
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index bc58cfe..2c3edb7 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -98,6 +98,14 @@ module EventRequestType
VALID_VALUES = Set.new([INSERT, UPDATE, DELETE]).freeze
end
+module BucketingVersion
+ INVALID_BUCKETING = 0
+ JAVA_BUCKETING = 1
+ MURMUR_BUCKETING = 2
+ VALUE_MAP = {0 => "INVALID_BUCKETING", 1 => "JAVA_BUCKETING", 2 => "MURMUR_BUCKETING"}
+ VALID_VALUES = Set.new([INVALID_BUCKETING, JAVA_BUCKETING, MURMUR_BUCKETING]).freeze
+end
+
module FunctionType
JAVA = 1
VALUE_MAP = {1 => "JAVA"}
@@ -810,6 +818,8 @@ class Table
TEMPORARY = 14
REWRITEENABLED = 15
CREATIONMETADATA = 16
+ BUCKETINGVERSION = 17
+ LOADINBUCKETEDTABLE = 18
FIELDS = {
TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
@@ -827,12 +837,17 @@ class Table
PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true},
TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true},
REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true},
- CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true}
+ CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true},
+ BUCKETINGVERSION => {:type => ::Thrift::Types::I32, :name => 'bucketingVersion', :default => 1, :optional => true, :enum_class => ::BucketingVersion},
+ LOADINBUCKETEDTABLE => {:type => ::Thrift::Types::BOOL, :name => 'loadInBucketedTable', :default => false, :optional => true}
}
def struct_fields; FIELDS; end
def validate
+ unless @bucketingVersion.nil? || ::BucketingVersion::VALID_VALUES.include?(@bucketingVersion)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field bucketingVersion!')
+ end
end
::Thrift::Struct.generate_accessors self
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index d58ed67..7003fa8 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1688,6 +1688,8 @@ public class ObjectStore implements RawStore, Configurable {
convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()),
mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType);
t.setRewriteEnabled(mtbl.isRewriteEnabled());
+ t.setBucketingVersion(mtbl.getBucketingVersion());
+ t.setLoadInBucketedTable(mtbl.isLoadInBucketedTable());
return t;
}
@@ -1726,7 +1728,8 @@ public class ObjectStore implements RawStore, Configurable {
.getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(),
- tableType);
+ tableType,
+ tbl.getBucketingVersion(), tbl.isLoadInBucketedTable());
}
private List<MFieldSchema> convertToMFieldSchemas(List<FieldSchema> keys) {
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
index a38a125..d97826b 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.hive.metastore.model;
+import org.apache.hadoop.hive.metastore.api.BucketingVersion;
+
import java.util.List;
import java.util.Map;
@@ -36,6 +38,8 @@ public class MTable {
private String viewExpandedText;
private boolean rewriteEnabled;
private String tableType;
+ private BucketingVersion bucketingVersion;
+ private boolean loadInBucketedTable;
public MTable() {}
@@ -56,7 +60,8 @@ public class MTable {
public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner,
int createTime, int lastAccessTime, int retention, List<MFieldSchema> partitionKeys,
Map<String, String> parameters, String viewOriginalText, String viewExpandedText,
- boolean rewriteEnabled, String tableType) {
+ boolean rewriteEnabled,
+ String tableType, BucketingVersion bucketingVersion, boolean loadInBucketedTable) {
this.tableName = tableName;
this.database = database;
this.sd = sd;
@@ -70,6 +75,8 @@ public class MTable {
this.viewExpandedText = viewExpandedText;
this.rewriteEnabled = rewriteEnabled;
this.tableType = tableType;
+ this.bucketingVersion = bucketingVersion;
+ this.loadInBucketedTable = loadInBucketedTable;
}
/**
@@ -253,4 +260,32 @@ public class MTable {
public String getTableType() {
return tableType;
}
+
+ /**
+ * @param bucketingVersion used in bucketed table
+ */
+ public void setBucketingVersion(BucketingVersion bucketingVersion) {
+ this.bucketingVersion = bucketingVersion;
+ }
+
+ /**
+ * @return the bucketingVersion
+ */
+ public BucketingVersion getBucketingVersion() {
+ return bucketingVersion;
+ }
+ /**
+ * @param loadInBucketedTable
+ */
+ public void setLoadInBucketedTable(boolean loadInBucketedTable) {
+ this.loadInBucketedTable = loadInBucketedTable;
+ }
+
+ /**
+ * @return the expertMode
+ */
+ public boolean isLoadInBucketedTable() {
+ return loadInBucketedTable;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift
index 371b975..cf4f64f 100644
--- a/standalone-metastore/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift
@@ -310,6 +310,13 @@ struct StorageDescriptor {
12: optional bool storedAsSubDirectories // stored as subdirectories or not
}
+// Hash version for bucketing table
+enum BucketingVersion {
+ INVALID_BUCKETING = 0,
+ JAVA_BUCKETING = 1,
+ MURMUR_BUCKETING = 2,
+}
+
// table information
struct Table {
1: string tableName, // name of the table
@@ -327,7 +334,9 @@ struct Table {
13: optional PrincipalPrivilegeSet privileges,
14: optional bool temporary=false,
15: optional bool rewriteEnabled, // rewrite enabled or not
- 16: optional CreationMetadata creationMetadata // only for MVs, it stores table names used and txn list at MV creation
+ 16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation
+ 17: optional BucketingVersion bucketingVersion = BucketingVersion.JAVA_BUCKETING, // For bucketed table only. Default : 2, for existing tables, 1, for new tables 2.
+ 18: optional bool loadInBucketedTable = false // For bucketed table only. Default : false. true if user loads data using “load data” command.
}
struct Partition {
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index bd61df6..09bed5d 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@ -29,21 +29,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
import org.apache.hadoop.hive.metastore.ObjectStore;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.TestObjectStore.MockPartitionExpressionProxy;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
-import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
@@ -225,6 +211,8 @@ public class TestCachedStore {
Table tbl1 =
new Table(tblName1, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams,
null, null, TableType.MANAGED_TABLE.toString());
+ tbl1.setBucketingVersion(BucketingVersion.JAVA_BUCKETING);
+ tbl1.setLoadInBucketedTable(false);
cachedStore.createTable(tbl1);
tbl1 = cachedStore.getTable(dbName, tblName1);
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
index 00f38ee..f10be72 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
@@ -21,18 +21,7 @@ package org.apache.hadoop.hive.metastore.client;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
@@ -224,6 +213,9 @@ public class TestTablesCreateDropAlterTruncate {
public void testCreateGetDeleteTable() throws Exception {
// Try to create a table with all of the parameters set
Table table = getTableWithAllParametersSet();
+ // Set parameters set outside
+ table.setBucketingVersion(BucketingVersion.MURMUR_BUCKETING);
+ table.setLoadInBucketedTable(false);
client.createTable(table);
Table createdTable = client.getTable(table.getDbName(), table.getTableName());
// The createTime will be set on the server side, so the comparison should skip it
@@ -684,6 +676,9 @@ public class TestTablesCreateDropAlterTruncate {
// Partition keys can not be set, but getTableWithAllParametersSet is added one, so remove for
// this test
newTable.setPartitionKeys(originalTable.getPartitionKeys());
+ // Set the optional bucketingVersion and expertMode with default values
+ newTable.setBucketingVersion(BucketingVersion.JAVA_BUCKETING);
+ newTable.setLoadInBucketedTable(false);
client.alter_table(originalDatabase, originalTableName, newTable);
Table alteredTable = client.getTable(originalDatabase, originalTableName);
[2/7] hive git commit: HIVE-18350 : load data should rename files
consistent with insert statements. (Deepak Jaiswal,
reviewed by Sergey Shelukhin and Ashutosh Chauhan)
Posted by dj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index aadf8f1..2169265 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -149,6 +149,18 @@ const char* _kEventRequestTypeNames[] = {
};
const std::map<int, const char*> _EventRequestType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kEventRequestTypeValues, _kEventRequestTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+int _kBucketingVersionValues[] = {
+ BucketingVersion::INVALID_BUCKETING,
+ BucketingVersion::JAVA_BUCKETING,
+ BucketingVersion::MURMUR_BUCKETING
+};
+const char* _kBucketingVersionNames[] = {
+ "INVALID_BUCKETING",
+ "JAVA_BUCKETING",
+ "MURMUR_BUCKETING"
+};
+const std::map<int, const char*> _BucketingVersion_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kBucketingVersionValues, _kBucketingVersionNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
int _kFunctionTypeValues[] = {
FunctionType::JAVA
};
@@ -4945,6 +4957,16 @@ void Table::__set_creationMetadata(const CreationMetadata& val) {
__isset.creationMetadata = true;
}
+void Table::__set_bucketingVersion(const BucketingVersion::type val) {
+ this->bucketingVersion = val;
+__isset.bucketingVersion = true;
+}
+
+void Table::__set_loadInBucketedTable(const bool val) {
+ this->loadInBucketedTable = val;
+__isset.loadInBucketedTable = true;
+}
+
uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -5121,6 +5143,24 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 17:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast223;
+ xfer += iprot->readI32(ecast223);
+ this->bucketingVersion = (BucketingVersion::type)ecast223;
+ this->__isset.bucketingVersion = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 18:
+ if (ftype == ::apache::thrift::protocol::T_BOOL) {
+ xfer += iprot->readBool(this->loadInBucketedTable);
+ this->__isset.loadInBucketedTable = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -5169,10 +5209,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitionKeys.size()));
- std::vector<FieldSchema> ::const_iterator _iter223;
- for (_iter223 = this->partitionKeys.begin(); _iter223 != this->partitionKeys.end(); ++_iter223)
+ std::vector<FieldSchema> ::const_iterator _iter224;
+ for (_iter224 = this->partitionKeys.begin(); _iter224 != this->partitionKeys.end(); ++_iter224)
{
- xfer += (*_iter223).write(oprot);
+ xfer += (*_iter224).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5181,11 +5221,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter224;
- for (_iter224 = this->parameters.begin(); _iter224 != this->parameters.end(); ++_iter224)
+ std::map<std::string, std::string> ::const_iterator _iter225;
+ for (_iter225 = this->parameters.begin(); _iter225 != this->parameters.end(); ++_iter225)
{
- xfer += oprot->writeString(_iter224->first);
- xfer += oprot->writeString(_iter224->second);
+ xfer += oprot->writeString(_iter225->first);
+ xfer += oprot->writeString(_iter225->second);
}
xfer += oprot->writeMapEnd();
}
@@ -5223,6 +5263,16 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += this->creationMetadata.write(oprot);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.bucketingVersion) {
+ xfer += oprot->writeFieldBegin("bucketingVersion", ::apache::thrift::protocol::T_I32, 17);
+ xfer += oprot->writeI32((int32_t)this->bucketingVersion);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.loadInBucketedTable) {
+ xfer += oprot->writeFieldBegin("loadInBucketedTable", ::apache::thrift::protocol::T_BOOL, 18);
+ xfer += oprot->writeBool(this->loadInBucketedTable);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -5246,29 +5296,12 @@ void swap(Table &a, Table &b) {
swap(a.temporary, b.temporary);
swap(a.rewriteEnabled, b.rewriteEnabled);
swap(a.creationMetadata, b.creationMetadata);
+ swap(a.bucketingVersion, b.bucketingVersion);
+ swap(a.loadInBucketedTable, b.loadInBucketedTable);
swap(a.__isset, b.__isset);
}
-Table::Table(const Table& other225) {
- tableName = other225.tableName;
- dbName = other225.dbName;
- owner = other225.owner;
- createTime = other225.createTime;
- lastAccessTime = other225.lastAccessTime;
- retention = other225.retention;
- sd = other225.sd;
- partitionKeys = other225.partitionKeys;
- parameters = other225.parameters;
- viewOriginalText = other225.viewOriginalText;
- viewExpandedText = other225.viewExpandedText;
- tableType = other225.tableType;
- privileges = other225.privileges;
- temporary = other225.temporary;
- rewriteEnabled = other225.rewriteEnabled;
- creationMetadata = other225.creationMetadata;
- __isset = other225.__isset;
-}
-Table& Table::operator=(const Table& other226) {
+Table::Table(const Table& other226) {
tableName = other226.tableName;
dbName = other226.dbName;
owner = other226.owner;
@@ -5285,7 +5318,30 @@ Table& Table::operator=(const Table& other226) {
temporary = other226.temporary;
rewriteEnabled = other226.rewriteEnabled;
creationMetadata = other226.creationMetadata;
+ bucketingVersion = other226.bucketingVersion;
+ loadInBucketedTable = other226.loadInBucketedTable;
__isset = other226.__isset;
+}
+Table& Table::operator=(const Table& other227) {
+ tableName = other227.tableName;
+ dbName = other227.dbName;
+ owner = other227.owner;
+ createTime = other227.createTime;
+ lastAccessTime = other227.lastAccessTime;
+ retention = other227.retention;
+ sd = other227.sd;
+ partitionKeys = other227.partitionKeys;
+ parameters = other227.parameters;
+ viewOriginalText = other227.viewOriginalText;
+ viewExpandedText = other227.viewExpandedText;
+ tableType = other227.tableType;
+ privileges = other227.privileges;
+ temporary = other227.temporary;
+ rewriteEnabled = other227.rewriteEnabled;
+ creationMetadata = other227.creationMetadata;
+ bucketingVersion = other227.bucketingVersion;
+ loadInBucketedTable = other227.loadInBucketedTable;
+ __isset = other227.__isset;
return *this;
}
void Table::printTo(std::ostream& out) const {
@@ -5307,6 +5363,8 @@ void Table::printTo(std::ostream& out) const {
out << ", " << "temporary="; (__isset.temporary ? (out << to_string(temporary)) : (out << "<null>"));
out << ", " << "rewriteEnabled="; (__isset.rewriteEnabled ? (out << to_string(rewriteEnabled)) : (out << "<null>"));
out << ", " << "creationMetadata="; (__isset.creationMetadata ? (out << to_string(creationMetadata)) : (out << "<null>"));
+ out << ", " << "bucketingVersion="; (__isset.bucketingVersion ? (out << to_string(bucketingVersion)) : (out << "<null>"));
+ out << ", " << "loadInBucketedTable="; (__isset.loadInBucketedTable ? (out << to_string(loadInBucketedTable)) : (out << "<null>"));
out << ")";
}
@@ -5373,14 +5431,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->values.clear();
- uint32_t _size227;
- ::apache::thrift::protocol::TType _etype230;
- xfer += iprot->readListBegin(_etype230, _size227);
- this->values.resize(_size227);
- uint32_t _i231;
- for (_i231 = 0; _i231 < _size227; ++_i231)
+ uint32_t _size228;
+ ::apache::thrift::protocol::TType _etype231;
+ xfer += iprot->readListBegin(_etype231, _size228);
+ this->values.resize(_size228);
+ uint32_t _i232;
+ for (_i232 = 0; _i232 < _size228; ++_i232)
{
- xfer += iprot->readString(this->values[_i231]);
+ xfer += iprot->readString(this->values[_i232]);
}
xfer += iprot->readListEnd();
}
@@ -5433,17 +5491,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
- uint32_t _size232;
- ::apache::thrift::protocol::TType _ktype233;
- ::apache::thrift::protocol::TType _vtype234;
- xfer += iprot->readMapBegin(_ktype233, _vtype234, _size232);
- uint32_t _i236;
- for (_i236 = 0; _i236 < _size232; ++_i236)
+ uint32_t _size233;
+ ::apache::thrift::protocol::TType _ktype234;
+ ::apache::thrift::protocol::TType _vtype235;
+ xfer += iprot->readMapBegin(_ktype234, _vtype235, _size233);
+ uint32_t _i237;
+ for (_i237 = 0; _i237 < _size233; ++_i237)
{
- std::string _key237;
- xfer += iprot->readString(_key237);
- std::string& _val238 = this->parameters[_key237];
- xfer += iprot->readString(_val238);
+ std::string _key238;
+ xfer += iprot->readString(_key238);
+ std::string& _val239 = this->parameters[_key238];
+ xfer += iprot->readString(_val239);
}
xfer += iprot->readMapEnd();
}
@@ -5480,10 +5538,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
- std::vector<std::string> ::const_iterator _iter239;
- for (_iter239 = this->values.begin(); _iter239 != this->values.end(); ++_iter239)
+ std::vector<std::string> ::const_iterator _iter240;
+ for (_iter240 = this->values.begin(); _iter240 != this->values.end(); ++_iter240)
{
- xfer += oprot->writeString((*_iter239));
+ xfer += oprot->writeString((*_iter240));
}
xfer += oprot->writeListEnd();
}
@@ -5512,11 +5570,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter240;
- for (_iter240 = this->parameters.begin(); _iter240 != this->parameters.end(); ++_iter240)
+ std::map<std::string, std::string> ::const_iterator _iter241;
+ for (_iter241 = this->parameters.begin(); _iter241 != this->parameters.end(); ++_iter241)
{
- xfer += oprot->writeString(_iter240->first);
- xfer += oprot->writeString(_iter240->second);
+ xfer += oprot->writeString(_iter241->first);
+ xfer += oprot->writeString(_iter241->second);
}
xfer += oprot->writeMapEnd();
}
@@ -5545,18 +5603,7 @@ void swap(Partition &a, Partition &b) {
swap(a.__isset, b.__isset);
}
-Partition::Partition(const Partition& other241) {
- values = other241.values;
- dbName = other241.dbName;
- tableName = other241.tableName;
- createTime = other241.createTime;
- lastAccessTime = other241.lastAccessTime;
- sd = other241.sd;
- parameters = other241.parameters;
- privileges = other241.privileges;
- __isset = other241.__isset;
-}
-Partition& Partition::operator=(const Partition& other242) {
+Partition::Partition(const Partition& other242) {
values = other242.values;
dbName = other242.dbName;
tableName = other242.tableName;
@@ -5566,6 +5613,17 @@ Partition& Partition::operator=(const Partition& other242) {
parameters = other242.parameters;
privileges = other242.privileges;
__isset = other242.__isset;
+}
+Partition& Partition::operator=(const Partition& other243) {
+ values = other243.values;
+ dbName = other243.dbName;
+ tableName = other243.tableName;
+ createTime = other243.createTime;
+ lastAccessTime = other243.lastAccessTime;
+ sd = other243.sd;
+ parameters = other243.parameters;
+ privileges = other243.privileges;
+ __isset = other243.__isset;
return *this;
}
void Partition::printTo(std::ostream& out) const {
@@ -5637,14 +5695,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->values.clear();
- uint32_t _size243;
- ::apache::thrift::protocol::TType _etype246;
- xfer += iprot->readListBegin(_etype246, _size243);
- this->values.resize(_size243);
- uint32_t _i247;
- for (_i247 = 0; _i247 < _size243; ++_i247)
+ uint32_t _size244;
+ ::apache::thrift::protocol::TType _etype247;
+ xfer += iprot->readListBegin(_etype247, _size244);
+ this->values.resize(_size244);
+ uint32_t _i248;
+ for (_i248 = 0; _i248 < _size244; ++_i248)
{
- xfer += iprot->readString(this->values[_i247]);
+ xfer += iprot->readString(this->values[_i248]);
}
xfer += iprot->readListEnd();
}
@@ -5681,17 +5739,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
- uint32_t _size248;
- ::apache::thrift::protocol::TType _ktype249;
- ::apache::thrift::protocol::TType _vtype250;
- xfer += iprot->readMapBegin(_ktype249, _vtype250, _size248);
- uint32_t _i252;
- for (_i252 = 0; _i252 < _size248; ++_i252)
+ uint32_t _size249;
+ ::apache::thrift::protocol::TType _ktype250;
+ ::apache::thrift::protocol::TType _vtype251;
+ xfer += iprot->readMapBegin(_ktype250, _vtype251, _size249);
+ uint32_t _i253;
+ for (_i253 = 0; _i253 < _size249; ++_i253)
{
- std::string _key253;
- xfer += iprot->readString(_key253);
- std::string& _val254 = this->parameters[_key253];
- xfer += iprot->readString(_val254);
+ std::string _key254;
+ xfer += iprot->readString(_key254);
+ std::string& _val255 = this->parameters[_key254];
+ xfer += iprot->readString(_val255);
}
xfer += iprot->readMapEnd();
}
@@ -5728,10 +5786,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
- std::vector<std::string> ::const_iterator _iter255;
- for (_iter255 = this->values.begin(); _iter255 != this->values.end(); ++_iter255)
+ std::vector<std::string> ::const_iterator _iter256;
+ for (_iter256 = this->values.begin(); _iter256 != this->values.end(); ++_iter256)
{
- xfer += oprot->writeString((*_iter255));
+ xfer += oprot->writeString((*_iter256));
}
xfer += oprot->writeListEnd();
}
@@ -5752,11 +5810,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter256;
- for (_iter256 = this->parameters.begin(); _iter256 != this->parameters.end(); ++_iter256)
+ std::map<std::string, std::string> ::const_iterator _iter257;
+ for (_iter257 = this->parameters.begin(); _iter257 != this->parameters.end(); ++_iter257)
{
- xfer += oprot->writeString(_iter256->first);
- xfer += oprot->writeString(_iter256->second);
+ xfer += oprot->writeString(_iter257->first);
+ xfer += oprot->writeString(_iter257->second);
}
xfer += oprot->writeMapEnd();
}
@@ -5783,16 +5841,7 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) {
swap(a.__isset, b.__isset);
}
-PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other257) {
- values = other257.values;
- createTime = other257.createTime;
- lastAccessTime = other257.lastAccessTime;
- relativePath = other257.relativePath;
- parameters = other257.parameters;
- privileges = other257.privileges;
- __isset = other257.__isset;
-}
-PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other258) {
+PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other258) {
values = other258.values;
createTime = other258.createTime;
lastAccessTime = other258.lastAccessTime;
@@ -5800,6 +5849,15 @@ PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& othe
parameters = other258.parameters;
privileges = other258.privileges;
__isset = other258.__isset;
+}
+PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other259) {
+ values = other259.values;
+ createTime = other259.createTime;
+ lastAccessTime = other259.lastAccessTime;
+ relativePath = other259.relativePath;
+ parameters = other259.parameters;
+ privileges = other259.privileges;
+ __isset = other259.__isset;
return *this;
}
void PartitionWithoutSD::printTo(std::ostream& out) const {
@@ -5852,14 +5910,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size259;
- ::apache::thrift::protocol::TType _etype262;
- xfer += iprot->readListBegin(_etype262, _size259);
- this->partitions.resize(_size259);
- uint32_t _i263;
- for (_i263 = 0; _i263 < _size259; ++_i263)
+ uint32_t _size260;
+ ::apache::thrift::protocol::TType _etype263;
+ xfer += iprot->readListBegin(_etype263, _size260);
+ this->partitions.resize(_size260);
+ uint32_t _i264;
+ for (_i264 = 0; _i264 < _size260; ++_i264)
{
- xfer += this->partitions[_i263].read(iprot);
+ xfer += this->partitions[_i264].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5896,10 +5954,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<PartitionWithoutSD> ::const_iterator _iter264;
- for (_iter264 = this->partitions.begin(); _iter264 != this->partitions.end(); ++_iter264)
+ std::vector<PartitionWithoutSD> ::const_iterator _iter265;
+ for (_iter265 = this->partitions.begin(); _iter265 != this->partitions.end(); ++_iter265)
{
- xfer += (*_iter264).write(oprot);
+ xfer += (*_iter265).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5921,15 +5979,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) {
swap(a.__isset, b.__isset);
}
-PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other265) {
- partitions = other265.partitions;
- sd = other265.sd;
- __isset = other265.__isset;
-}
-PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other266) {
+PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other266) {
partitions = other266.partitions;
sd = other266.sd;
__isset = other266.__isset;
+}
+PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other267) {
+ partitions = other267.partitions;
+ sd = other267.sd;
+ __isset = other267.__isset;
return *this;
}
void PartitionSpecWithSharedSD::printTo(std::ostream& out) const {
@@ -5974,14 +6032,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size267;
- ::apache::thrift::protocol::TType _etype270;
- xfer += iprot->readListBegin(_etype270, _size267);
- this->partitions.resize(_size267);
- uint32_t _i271;
- for (_i271 = 0; _i271 < _size267; ++_i271)
+ uint32_t _size268;
+ ::apache::thrift::protocol::TType _etype271;
+ xfer += iprot->readListBegin(_etype271, _size268);
+ this->partitions.resize(_size268);
+ uint32_t _i272;
+ for (_i272 = 0; _i272 < _size268; ++_i272)
{
- xfer += this->partitions[_i271].read(iprot);
+ xfer += this->partitions[_i272].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -6010,10 +6068,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<Partition> ::const_iterator _iter272;
- for (_iter272 = this->partitions.begin(); _iter272 != this->partitions.end(); ++_iter272)
+ std::vector<Partition> ::const_iterator _iter273;
+ for (_iter273 = this->partitions.begin(); _iter273 != this->partitions.end(); ++_iter273)
{
- xfer += (*_iter272).write(oprot);
+ xfer += (*_iter273).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -6030,13 +6088,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) {
swap(a.__isset, b.__isset);
}
-PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other273) {
- partitions = other273.partitions;
- __isset = other273.__isset;
-}
-PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other274) {
+PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other274) {
partitions = other274.partitions;
__isset = other274.__isset;
+}
+PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other275) {
+ partitions = other275.partitions;
+ __isset = other275.__isset;
return *this;
}
void PartitionListComposingSpec::printTo(std::ostream& out) const {
@@ -6188,21 +6246,21 @@ void swap(PartitionSpec &a, PartitionSpec &b) {
swap(a.__isset, b.__isset);
}
-PartitionSpec::PartitionSpec(const PartitionSpec& other275) {
- dbName = other275.dbName;
- tableName = other275.tableName;
- rootPath = other275.rootPath;
- sharedSDPartitionSpec = other275.sharedSDPartitionSpec;
- partitionList = other275.partitionList;
- __isset = other275.__isset;
-}
-PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other276) {
+PartitionSpec::PartitionSpec(const PartitionSpec& other276) {
dbName = other276.dbName;
tableName = other276.tableName;
rootPath = other276.rootPath;
sharedSDPartitionSpec = other276.sharedSDPartitionSpec;
partitionList = other276.partitionList;
__isset = other276.__isset;
+}
+PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other277) {
+ dbName = other277.dbName;
+ tableName = other277.tableName;
+ rootPath = other277.rootPath;
+ sharedSDPartitionSpec = other277.sharedSDPartitionSpec;
+ partitionList = other277.partitionList;
+ __isset = other277.__isset;
return *this;
}
void PartitionSpec::printTo(std::ostream& out) const {
@@ -6350,17 +6408,17 @@ uint32_t Index::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
- uint32_t _size277;
- ::apache::thrift::protocol::TType _ktype278;
- ::apache::thrift::protocol::TType _vtype279;
- xfer += iprot->readMapBegin(_ktype278, _vtype279, _size277);
- uint32_t _i281;
- for (_i281 = 0; _i281 < _size277; ++_i281)
+ uint32_t _size278;
+ ::apache::thrift::protocol::TType _ktype279;
+ ::apache::thrift::protocol::TType _vtype280;
+ xfer += iprot->readMapBegin(_ktype279, _vtype280, _size278);
+ uint32_t _i282;
+ for (_i282 = 0; _i282 < _size278; ++_i282)
{
- std::string _key282;
- xfer += iprot->readString(_key282);
- std::string& _val283 = this->parameters[_key282];
- xfer += iprot->readString(_val283);
+ std::string _key283;
+ xfer += iprot->readString(_key283);
+ std::string& _val284 = this->parameters[_key283];
+ xfer += iprot->readString(_val284);
}
xfer += iprot->readMapEnd();
}
@@ -6429,11 +6487,11 @@ uint32_t Index::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter284;
- for (_iter284 = this->parameters.begin(); _iter284 != this->parameters.end(); ++_iter284)
+ std::map<std::string, std::string> ::const_iterator _iter285;
+ for (_iter285 = this->parameters.begin(); _iter285 != this->parameters.end(); ++_iter285)
{
- xfer += oprot->writeString(_iter284->first);
- xfer += oprot->writeString(_iter284->second);
+ xfer += oprot->writeString(_iter285->first);
+ xfer += oprot->writeString(_iter285->second);
}
xfer += oprot->writeMapEnd();
}
@@ -6463,20 +6521,7 @@ void swap(Index &a, Index &b) {
swap(a.__isset, b.__isset);
}
-Index::Index(const Index& other285) {
- indexName = other285.indexName;
- indexHandlerClass = other285.indexHandlerClass;
- dbName = other285.dbName;
- origTableName = other285.origTableName;
- createTime = other285.createTime;
- lastAccessTime = other285.lastAccessTime;
- indexTableName = other285.indexTableName;
- sd = other285.sd;
- parameters = other285.parameters;
- deferredRebuild = other285.deferredRebuild;
- __isset = other285.__isset;
-}
-Index& Index::operator=(const Index& other286) {
+Index::Index(const Index& other286) {
indexName = other286.indexName;
indexHandlerClass = other286.indexHandlerClass;
dbName = other286.dbName;
@@ -6488,6 +6533,19 @@ Index& Index::operator=(const Index& other286) {
parameters = other286.parameters;
deferredRebuild = other286.deferredRebuild;
__isset = other286.__isset;
+}
+Index& Index::operator=(const Index& other287) {
+ indexName = other287.indexName;
+ indexHandlerClass = other287.indexHandlerClass;
+ dbName = other287.dbName;
+ origTableName = other287.origTableName;
+ createTime = other287.createTime;
+ lastAccessTime = other287.lastAccessTime;
+ indexTableName = other287.indexTableName;
+ sd = other287.sd;
+ parameters = other287.parameters;
+ deferredRebuild = other287.deferredRebuild;
+ __isset = other287.__isset;
return *this;
}
void Index::printTo(std::ostream& out) const {
@@ -6638,19 +6696,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other287) {
- numTrues = other287.numTrues;
- numFalses = other287.numFalses;
- numNulls = other287.numNulls;
- bitVectors = other287.bitVectors;
- __isset = other287.__isset;
-}
-BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other288) {
+BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other288) {
numTrues = other288.numTrues;
numFalses = other288.numFalses;
numNulls = other288.numNulls;
bitVectors = other288.bitVectors;
__isset = other288.__isset;
+}
+BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other289) {
+ numTrues = other289.numTrues;
+ numFalses = other289.numFalses;
+ numNulls = other289.numNulls;
+ bitVectors = other289.bitVectors;
+ __isset = other289.__isset;
return *this;
}
void BooleanColumnStatsData::printTo(std::ostream& out) const {
@@ -6813,21 +6871,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other289) {
- lowValue = other289.lowValue;
- highValue = other289.highValue;
- numNulls = other289.numNulls;
- numDVs = other289.numDVs;
- bitVectors = other289.bitVectors;
- __isset = other289.__isset;
-}
-DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other290) {
+DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other290) {
lowValue = other290.lowValue;
highValue = other290.highValue;
numNulls = other290.numNulls;
numDVs = other290.numDVs;
bitVectors = other290.bitVectors;
__isset = other290.__isset;
+}
+DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other291) {
+ lowValue = other291.lowValue;
+ highValue = other291.highValue;
+ numNulls = other291.numNulls;
+ numDVs = other291.numDVs;
+ bitVectors = other291.bitVectors;
+ __isset = other291.__isset;
return *this;
}
void DoubleColumnStatsData::printTo(std::ostream& out) const {
@@ -6991,21 +7049,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other291) {
- lowValue = other291.lowValue;
- highValue = other291.highValue;
- numNulls = other291.numNulls;
- numDVs = other291.numDVs;
- bitVectors = other291.bitVectors;
- __isset = other291.__isset;
-}
-LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other292) {
+LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other292) {
lowValue = other292.lowValue;
highValue = other292.highValue;
numNulls = other292.numNulls;
numDVs = other292.numDVs;
bitVectors = other292.bitVectors;
__isset = other292.__isset;
+}
+LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other293) {
+ lowValue = other293.lowValue;
+ highValue = other293.highValue;
+ numNulls = other293.numNulls;
+ numDVs = other293.numDVs;
+ bitVectors = other293.bitVectors;
+ __isset = other293.__isset;
return *this;
}
void LongColumnStatsData::printTo(std::ostream& out) const {
@@ -7171,21 +7229,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other293) {
- maxColLen = other293.maxColLen;
- avgColLen = other293.avgColLen;
- numNulls = other293.numNulls;
- numDVs = other293.numDVs;
- bitVectors = other293.bitVectors;
- __isset = other293.__isset;
-}
-StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other294) {
+StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other294) {
maxColLen = other294.maxColLen;
avgColLen = other294.avgColLen;
numNulls = other294.numNulls;
numDVs = other294.numDVs;
bitVectors = other294.bitVectors;
__isset = other294.__isset;
+}
+StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other295) {
+ maxColLen = other295.maxColLen;
+ avgColLen = other295.avgColLen;
+ numNulls = other295.numNulls;
+ numDVs = other295.numDVs;
+ bitVectors = other295.bitVectors;
+ __isset = other295.__isset;
return *this;
}
void StringColumnStatsData::printTo(std::ostream& out) const {
@@ -7331,19 +7389,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other295) {
- maxColLen = other295.maxColLen;
- avgColLen = other295.avgColLen;
- numNulls = other295.numNulls;
- bitVectors = other295.bitVectors;
- __isset = other295.__isset;
-}
-BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other296) {
+BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other296) {
maxColLen = other296.maxColLen;
avgColLen = other296.avgColLen;
numNulls = other296.numNulls;
bitVectors = other296.bitVectors;
__isset = other296.__isset;
+}
+BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other297) {
+ maxColLen = other297.maxColLen;
+ avgColLen = other297.avgColLen;
+ numNulls = other297.numNulls;
+ bitVectors = other297.bitVectors;
+ __isset = other297.__isset;
return *this;
}
void BinaryColumnStatsData::printTo(std::ostream& out) const {
@@ -7448,13 +7506,13 @@ void swap(Decimal &a, Decimal &b) {
swap(a.scale, b.scale);
}
-Decimal::Decimal(const Decimal& other297) {
- unscaled = other297.unscaled;
- scale = other297.scale;
-}
-Decimal& Decimal::operator=(const Decimal& other298) {
+Decimal::Decimal(const Decimal& other298) {
unscaled = other298.unscaled;
scale = other298.scale;
+}
+Decimal& Decimal::operator=(const Decimal& other299) {
+ unscaled = other299.unscaled;
+ scale = other299.scale;
return *this;
}
void Decimal::printTo(std::ostream& out) const {
@@ -7615,21 +7673,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other299) {
- lowValue = other299.lowValue;
- highValue = other299.highValue;
- numNulls = other299.numNulls;
- numDVs = other299.numDVs;
- bitVectors = other299.bitVectors;
- __isset = other299.__isset;
-}
-DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other300) {
+DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other300) {
lowValue = other300.lowValue;
highValue = other300.highValue;
numNulls = other300.numNulls;
numDVs = other300.numDVs;
bitVectors = other300.bitVectors;
__isset = other300.__isset;
+}
+DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other301) {
+ lowValue = other301.lowValue;
+ highValue = other301.highValue;
+ numNulls = other301.numNulls;
+ numDVs = other301.numDVs;
+ bitVectors = other301.bitVectors;
+ __isset = other301.__isset;
return *this;
}
void DecimalColumnStatsData::printTo(std::ostream& out) const {
@@ -7715,11 +7773,11 @@ void swap(Date &a, Date &b) {
swap(a.daysSinceEpoch, b.daysSinceEpoch);
}
-Date::Date(const Date& other301) {
- daysSinceEpoch = other301.daysSinceEpoch;
-}
-Date& Date::operator=(const Date& other302) {
+Date::Date(const Date& other302) {
daysSinceEpoch = other302.daysSinceEpoch;
+}
+Date& Date::operator=(const Date& other303) {
+ daysSinceEpoch = other303.daysSinceEpoch;
return *this;
}
void Date::printTo(std::ostream& out) const {
@@ -7879,21 +7937,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other303) {
- lowValue = other303.lowValue;
- highValue = other303.highValue;
- numNulls = other303.numNulls;
- numDVs = other303.numDVs;
- bitVectors = other303.bitVectors;
- __isset = other303.__isset;
-}
-DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other304) {
+DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other304) {
lowValue = other304.lowValue;
highValue = other304.highValue;
numNulls = other304.numNulls;
numDVs = other304.numDVs;
bitVectors = other304.bitVectors;
__isset = other304.__isset;
+}
+DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other305) {
+ lowValue = other305.lowValue;
+ highValue = other305.highValue;
+ numNulls = other305.numNulls;
+ numDVs = other305.numDVs;
+ bitVectors = other305.bitVectors;
+ __isset = other305.__isset;
return *this;
}
void DateColumnStatsData::printTo(std::ostream& out) const {
@@ -8079,17 +8137,7 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) {
swap(a.__isset, b.__isset);
}
-ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other305) {
- booleanStats = other305.booleanStats;
- longStats = other305.longStats;
- doubleStats = other305.doubleStats;
- stringStats = other305.stringStats;
- binaryStats = other305.binaryStats;
- decimalStats = other305.decimalStats;
- dateStats = other305.dateStats;
- __isset = other305.__isset;
-}
-ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other306) {
+ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other306) {
booleanStats = other306.booleanStats;
longStats = other306.longStats;
doubleStats = other306.doubleStats;
@@ -8098,6 +8146,16 @@ ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData
decimalStats = other306.decimalStats;
dateStats = other306.dateStats;
__isset = other306.__isset;
+}
+ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other307) {
+ booleanStats = other307.booleanStats;
+ longStats = other307.longStats;
+ doubleStats = other307.doubleStats;
+ stringStats = other307.stringStats;
+ binaryStats = other307.binaryStats;
+ decimalStats = other307.decimalStats;
+ dateStats = other307.dateStats;
+ __isset = other307.__isset;
return *this;
}
void ColumnStatisticsData::printTo(std::ostream& out) const {
@@ -8225,15 +8283,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) {
swap(a.statsData, b.statsData);
}
-ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other307) {
- colName = other307.colName;
- colType = other307.colType;
- statsData = other307.statsData;
-}
-ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other308) {
+ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other308) {
colName = other308.colName;
colType = other308.colType;
statsData = other308.statsData;
+}
+ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other309) {
+ colName = other309.colName;
+ colType = other309.colType;
+ statsData = other309.statsData;
return *this;
}
void ColumnStatisticsObj::printTo(std::ostream& out) const {
@@ -8396,21 +8454,21 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) {
swap(a.__isset, b.__isset);
}
-ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other309) {
- isTblLevel = other309.isTblLevel;
- dbName = other309.dbName;
- tableName = other309.tableName;
- partName = other309.partName;
- lastAnalyzed = other309.lastAnalyzed;
- __isset = other309.__isset;
-}
-ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other310) {
+ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other310) {
isTblLevel = other310.isTblLevel;
dbName = other310.dbName;
tableName = other310.tableName;
partName = other310.partName;
lastAnalyzed = other310.lastAnalyzed;
__isset = other310.__isset;
+}
+ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other311) {
+ isTblLevel = other311.isTblLevel;
+ dbName = other311.dbName;
+ tableName = other311.tableName;
+ partName = other311.partName;
+ lastAnalyzed = other311.lastAnalyzed;
+ __isset = other311.__isset;
return *this;
}
void ColumnStatisticsDesc::printTo(std::ostream& out) const {
@@ -8472,14 +8530,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->statsObj.clear();
- uint32_t _size311;
- ::apache::thrift::protocol::TType _etype314;
- xfer += iprot->readListBegin(_etype314, _size311);
- this->statsObj.resize(_size311);
- uint32_t _i315;
- for (_i315 = 0; _i315 < _size311; ++_i315)
+ uint32_t _size312;
+ ::apache::thrift::protocol::TType _etype315;
+ xfer += iprot->readListBegin(_etype315, _size312);
+ this->statsObj.resize(_size312);
+ uint32_t _i316;
+ for (_i316 = 0; _i316 < _size312; ++_i316)
{
- xfer += this->statsObj[_i315].read(iprot);
+ xfer += this->statsObj[_i316].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -8516,10 +8574,10 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c
xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->statsObj.size()));
- std::vector<ColumnStatisticsObj> ::const_iterator _iter316;
- for (_iter316 = this->statsObj.begin(); _iter316 != this->statsObj.end(); ++_iter316)
+ std::vector<ColumnStatisticsObj> ::const_iterator _iter317;
+ for (_iter317 = this->statsObj.begin(); _iter317 != this->statsObj.end(); ++_iter317)
{
- xfer += (*_iter316).write(oprot);
+ xfer += (*_iter317).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8536,13 +8594,13 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) {
swap(a.statsObj, b.statsObj);
}
-ColumnStatistics::ColumnStatistics(const ColumnStatistics& other317) {
- statsDesc = other317.statsDesc;
- statsObj = other317.statsObj;
-}
-ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other318) {
+ColumnStatistics::ColumnStatistics(const ColumnStatistics& other318) {
statsDesc = other318.statsDesc;
statsObj = other318.statsObj;
+}
+ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other319) {
+ statsDesc = other319.statsDesc;
+ statsObj = other319.statsObj;
return *this;
}
void ColumnStatistics::printTo(std::ostream& out) const {
@@ -8593,14 +8651,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colStats.clear();
- uint32_t _size319;
- ::apache::thrift::protocol::TType _etype322;
- xfer += iprot->readListBegin(_etype322, _size319);
- this->colStats.resize(_size319);
- uint32_t _i323;
- for (_i323 = 0; _i323 < _size319; ++_i323)
+ uint32_t _size320;
+ ::apache::thrift::protocol::TType _etype323;
+ xfer += iprot->readListBegin(_etype323, _size320);
+ this->colStats.resize(_size320);
+ uint32_t _i324;
+ for (_i324 = 0; _i324 < _size320; ++_i324)
{
- xfer += this->colStats[_i323].read(iprot);
+ xfer += this->colStats[_i324].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -8641,10 +8699,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
- std::vector<ColumnStatisticsObj> ::const_iterator _iter324;
- for (_iter324 = this->colStats.begin(); _iter324 != this->colStats.end(); ++_iter324)
+ std::vector<ColumnStatisticsObj> ::const_iterator _iter325;
+ for (_iter325 = this->colStats.begin(); _iter325 != this->colStats.end(); ++_iter325)
{
- xfer += (*_iter324).write(oprot);
+ xfer += (*_iter325).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8665,13 +8723,13 @@ void swap(AggrStats &a, AggrStats &b) {
swap(a.partsFound, b.partsFound);
}
-AggrStats::AggrStats(const AggrStats& other325) {
- colStats = other325.colStats;
- partsFound = other325.partsFound;
-}
-AggrStats& AggrStats::operator=(const AggrStats& other326) {
+AggrStats::AggrStats(const AggrStats& other326) {
colStats = other326.colStats;
partsFound = other326.partsFound;
+}
+AggrStats& AggrStats::operator=(const AggrStats& other327) {
+ colStats = other327.colStats;
+ partsFound = other327.partsFound;
return *this;
}
void AggrStats::printTo(std::ostream& out) const {
@@ -8722,14 +8780,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colStats.clear();
- uint32_t _size327;
- ::apache::thrift::protocol::TType _etype330;
- xfer += iprot->readListBegin(_etype330, _size327);
- this->colStats.resize(_size327);
- uint32_t _i331;
- for (_i331 = 0; _i331 < _size327; ++_i331)
+ uint32_t _size328;
+ ::apache::thrift::protocol::TType _etype331;
+ xfer += iprot->readListBegin(_etype331, _size328);
+ this->colStats.resize(_size328);
+ uint32_t _i332;
+ for (_i332 = 0; _i332 < _size328; ++_i332)
{
- xfer += this->colStats[_i331].read(iprot);
+ xfer += this->colStats[_i332].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -8768,10 +8826,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
- std::vector<ColumnStatistics> ::const_iterator _iter332;
- for (_iter332 = this->colStats.begin(); _iter332 != this->colStats.end(); ++_iter332)
+ std::vector<ColumnStatistics> ::const_iterator _iter333;
+ for (_iter333 = this->colStats.begin(); _iter333 != this->colStats.end(); ++_iter333)
{
- xfer += (*_iter332).write(oprot);
+ xfer += (*_iter333).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8794,15 +8852,15 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) {
swap(a.__isset, b.__isset);
}
-SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other333) {
- colStats = other333.colStats;
- needMerge = other333.needMerge;
- __isset = other333.__isset;
-}
-SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other334) {
+SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other334) {
colStats = other334.colStats;
needMerge = other334.needMerge;
__isset = other334.__isset;
+}
+SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other335) {
+ colStats = other335.colStats;
+ needMerge = other335.needMerge;
+ __isset = other335.__isset;
return *this;
}
void SetPartitionsStatsRequest::printTo(std::ostream& out) const {
@@ -8851,14 +8909,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->fieldSchemas.clear();
- uint32_t _size335;
- ::apache::thrift::protocol::TType _etype338;
- xfer += iprot->readListBegin(_etype338, _size335);
- this->fieldSchemas.resize(_size335);
- uint32_t _i339;
- for (_i339 = 0; _i339 < _size335; ++_i339)
+ uint32_t _size336;
+ ::apache::thrift::protocol::TType _etype339;
+ xfer += iprot->readListBegin(_etype339, _size336);
+ this->fieldSchemas.resize(_size336);
+ uint32_t _i340;
+ for (_i340 = 0; _i340 < _size336; ++_i340)
{
- xfer += this->fieldSchemas[_i339].read(iprot);
+ xfer += this->fieldSchemas[_i340].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -8871,17 +8929,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->properties.clear();
- uint32_t _size340;
- ::apache::thrift::protocol::TType _ktype341;
- ::apache::thrift::protocol::TType _vtype342;
- xfer += iprot->readMapBegin(_ktype341, _vtype342, _size340);
- uint32_t _i344;
- for (_i344 = 0; _i344 < _size340; ++_i344)
+ uint32_t _size341;
+ ::apache::thrift::protocol::TType _ktype342;
+ ::apache::thrift::protocol::TType _vtype343;
+ xfer += iprot->readMapBegin(_ktype342, _vtype343, _size341);
+ uint32_t _i345;
+ for (_i345 = 0; _i345 < _size341; ++_i345)
{
- std::string _key345;
- xfer += iprot->readString(_key345);
- std::string& _val346 = this->properties[_key345];
- xfer += iprot->readString(_val346);
+ std::string _key346;
+ xfer += iprot->readString(_key346);
+ std::string& _val347 = this->properties[_key346];
+ xfer += iprot->readString(_val347);
}
xfer += iprot->readMapEnd();
}
@@ -8910,10 +8968,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->fieldSchemas.size()));
- std::vector<FieldSchema> ::const_iterator _iter347;
- for (_iter347 = this->fieldSchemas.begin(); _iter347 != this->fieldSchemas.end(); ++_iter347)
+ std::vector<FieldSchema> ::const_iterator _iter348;
+ for (_iter348 = this->fieldSchemas.begin(); _iter348 != this->fieldSchemas.end(); ++_iter348)
{
- xfer += (*_iter347).write(oprot);
+ xfer += (*_iter348).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8922,11 +8980,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
- std::map<std::string, std::string> ::const_iterator _iter348;
- for (_iter348 = this->properties.begin(); _iter348 != this->properties.end(); ++_iter348)
+ std::map<std::string, std::string> ::const_iterator _iter349;
+ for (_iter349 = this->properties.begin(); _iter349 != this->properties.end(); ++_iter349)
{
- xfer += oprot->writeString(_iter348->first);
- xfer += oprot->writeString(_iter348->second);
+ xfer += oprot->writeString(_iter349->first);
+ xfer += oprot->writeString(_iter349->second);
}
xfer += oprot->writeMapEnd();
}
@@ -8944,15 +9002,15 @@ void swap(Schema &a, Schema &b) {
swap(a.__isset, b.__isset);
}
-Schema::Schema(const Schema& other349) {
- fieldSchemas = other349.fieldSchemas;
- properties = other349.properties;
- __isset = other349.__isset;
-}
-Schema& Schema::operator=(const Schema& other350) {
+Schema::Schema(const Schema& other350) {
fieldSchemas = other350.fieldSchemas;
properties = other350.properties;
__isset = other350.__isset;
+}
+Schema& Schema::operator=(const Schema& other351) {
+ fieldSchemas = other351.fieldSchemas;
+ properties = other351.properties;
+ __isset = other351.__isset;
return *this;
}
void Schema::printTo(std::ostream& out) const {
@@ -8997,17 +9055,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->properties.clear();
- uint32_t _size351;
- ::apache::thrift::protocol::TType _ktype352;
- ::apache::thrift::protocol::TType _vtype353;
- xfer += iprot->readMapBegin(_ktype352, _vtype353, _size351);
- uint32_t _i355;
- for (_i355 = 0; _i355 < _size351; ++_i355)
+ uint32_t _size352;
+ ::apache::thrift::protocol::TType _ktype353;
+ ::apache::thrift::protocol::TType _vtype354;
+ xfer += iprot->readMapBegin(_ktype353, _vtype354, _size352);
+ uint32_t _i356;
+ for (_i356 = 0; _i356 < _size352; ++_i356)
{
- std::string _key356;
- xfer += iprot->readString(_key356);
- std::string& _val357 = this->properties[_key356];
- xfer += iprot->readString(_val357);
+ std::string _key357;
+ xfer += iprot->readString(_key357);
+ std::string& _val358 = this->properties[_key357];
+ xfer += iprot->readString(_val358);
}
xfer += iprot->readMapEnd();
}
@@ -9036,11 +9094,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
- std::map<std::string, std::string> ::const_iterator _iter358;
- for (_iter358 = this->properties.begin(); _iter358 != this->properties.end(); ++_iter358)
+ std::map<std::string, std::string> ::const_iterator _iter359;
+ for (_iter359 = this->properties.begin(); _iter359 != this->properties.end(); ++_iter359)
{
- xfer += oprot->writeString(_iter358->first);
- xfer += oprot->writeString(_iter358->second);
+ xfer += oprot->writeString(_iter359->first);
+ xfer += oprot->writeString(_iter359->second);
}
xfer += oprot->writeMapEnd();
}
@@ -9057,13 +9115,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) {
swap(a.__isset, b.__isset);
}
-EnvironmentContext::EnvironmentContext(const EnvironmentContext& other359) {
- properties = other359.properties;
- __isset = other359.__isset;
-}
-EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other360) {
+EnvironmentContext::EnvironmentContext(const EnvironmentContext& other360) {
properties = other360.properties;
__isset = other360.__isset;
+}
+EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other361) {
+ properties = other361.properties;
+ __isset = other361.__isset;
return *this;
}
void EnvironmentContext::printTo(std::ostream& out) const {
@@ -9165,13 +9223,13 @@ void swap(PrimaryKeysRequest &a, PrimaryKeysRequest &b) {
swap(a.tbl_name, b.tbl_name);
}
-PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other361) {
- db_name = other361.db_name;
- tbl_name = other361.tbl_name;
-}
-PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other362) {
+PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other362) {
db_name = other362.db_name;
tbl_name = other362.tbl_name;
+}
+PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other363) {
+ db_name = other363.db_name;
+ tbl_name = other363.tbl_name;
return *this;
}
void PrimaryKeysRequest::printTo(std::ostream& out) const {
@@ -9217,14 +9275,14 @@ uint32_t PrimaryKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->primaryKeys.clear();
- uint32_t _size363;
- ::apache::thrift::protocol::TType _etype366;
- xfer += iprot->readListBegin(_etype366, _size363);
- this->primaryKeys.resize(_size363);
- uint32_t _i367;
- for (_i367 = 0; _i367 < _size363; ++_i367)
+ uint32_t _size364;
+ ::apache::thrift::protocol::TType _etype367;
+ xfer += iprot->readListBegin(_etype367, _size364);
+ this->primaryKeys.resize(_size364);
+ uint32_t _i368;
+ for (_i368 = 0; _i368 < _size364; ++_i368)
{
- xfer += this->primaryKeys[_i367].read(iprot);
+ xfer += this->primaryKeys[_i368].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9255,10 +9313,10 @@ uint32_t PrimaryKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter368;
- for (_iter368 = this->primaryKeys.begin(); _iter368 != this->primaryKeys.end(); ++_iter368)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter369;
+ for (_iter369 = this->primaryKeys.begin(); _iter369 != this->primaryKeys.end(); ++_iter369)
{
- xfer += (*_iter368).write(oprot);
+ xfer += (*_iter369).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9274,11 +9332,11 @@ void swap(PrimaryKeysResponse &a, PrimaryKeysResponse &b) {
swap(a.primaryKeys, b.primaryKeys);
}
-PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other369) {
- primaryKeys = other369.primaryKeys;
-}
-PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other370) {
+PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other370) {
primaryKeys = other370.primaryKeys;
+}
+PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other371) {
+ primaryKeys = other371.primaryKeys;
return *this;
}
void PrimaryKeysResponse::printTo(std::ostream& out) const {
@@ -9409,19 +9467,19 @@ void swap(ForeignKeysRequest &a, ForeignKeysRequest &b) {
swap(a.__isset, b.__isset);
}
-ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other371) {
- parent_db_name = other371.parent_db_name;
- parent_tbl_name = other371.parent_tbl_name;
- foreign_db_name = other371.foreign_db_name;
- foreign_tbl_name = other371.foreign_tbl_name;
- __isset = other371.__isset;
-}
-ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other372) {
+ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other372) {
parent_db_name = other372.parent_db_name;
parent_tbl_name = other372.parent_tbl_name;
foreign_db_name = other372.foreign_db_name;
foreign_tbl_name = other372.foreign_tbl_name;
__isset = other372.__isset;
+}
+ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other373) {
+ parent_db_name = other373.parent_db_name;
+ parent_tbl_name = other373.parent_tbl_name;
+ foreign_db_name = other373.foreign_db_name;
+ foreign_tbl_name = other373.foreign_tbl_name;
+ __isset = other373.__isset;
return *this;
}
void ForeignKeysRequest::printTo(std::ostream& out) const {
@@ -9469,14 +9527,14 @@ uint32_t ForeignKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->foreignKeys.clear();
- uint32_t _size373;
- ::apache::thrift::protocol::TType _etype376;
- xfer += iprot->readListBegin(_etype376, _size373);
- this->foreignKeys.resize(_size373);
- uint32_t _i377;
- for (_i377 = 0; _i377 < _size373; ++_i377)
+ uint32_t _size374;
+ ::apache::thrift::protocol::TType _etype377;
+ xfer += iprot->readListBegin(_etype377, _size374);
+ this->foreignKeys.resize(_size374);
+ uint32_t _i378;
+ for (_i378 = 0; _i378 < _size374; ++_i378)
{
- xfer += this->foreignKeys[_i377].read(iprot);
+ xfer += this->foreignKeys[_i378].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9507,10 +9565,10 @@ uint32_t ForeignKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
- std::vector<SQLForeignKey> ::const_iterator _iter378;
- for (_iter378 = this->foreignKeys.begin(); _iter378 != this->foreignKeys.end(); ++_iter378)
+ std::vector<SQLForeignKey> ::const_iterator _iter379;
+ for (_iter379 = this->foreignKeys.begin(); _iter379 != this->foreignKeys.end(); ++_iter379)
{
- xfer += (*_iter378).write(oprot);
+ xfer += (*_iter379).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9526,11 +9584,11 @@ void swap(ForeignKeysResponse &a, ForeignKeysResponse &b) {
swap(a.foreignKeys, b.foreignKeys);
}
-ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other379) {
- foreignKeys = other379.foreignKeys;
-}
-ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other380) {
+ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other380) {
foreignKeys = other380.foreignKeys;
+}
+ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other381) {
+ foreignKeys = other381.foreignKeys;
return *this;
}
void ForeignKeysResponse::printTo(std::ostream& out) const {
@@ -9632,13 +9690,13 @@ void swap(UniqueConstraintsRequest &a, UniqueConstraintsRequest &b) {
swap(a.tbl_name, b.tbl_name);
}
-UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other381) {
- db_name = other381.db_name;
- tbl_name = other381.tbl_name;
-}
-UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other382) {
+UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other382) {
db_name = other382.db_name;
tbl_name = other382.tbl_name;
+}
+UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other383) {
+ db_name = other383.db_name;
+ tbl_name = other383.tbl_name;
return *this;
}
void UniqueConstraintsRequest::printTo(std::ostream& out) const {
@@ -9684,14 +9742,14 @@ uint32_t UniqueConstraintsResponse::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->uniqueConstraints.clear();
- uint32_t _size383;
- ::apache::thrift::protocol::TType _etype386;
- xfer += iprot->readListBegin(_etype386, _size383);
- this->uniqueConstraints.resize(_size383);
- uint32_t _i387;
- for (_i387 = 0; _i387 < _size383; ++_i387)
+ uint32_t _size384;
+ ::apache::thrift::protocol::TType _etype387;
+ xfer += iprot->readListBegin(_etype387, _size384);
+ this->uniqueConstraints.resize(_size384);
+ uint32_t _i388;
+ for (_i388 = 0; _i388 < _size384; ++_i388)
{
- xfer += this->uniqueConstraints[_i387].read(iprot);
+ xfer += this->uniqueConstraints[_i388].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9722,10 +9780,10 @@ uint32_t UniqueConstraintsResponse::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraints.size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter388;
- for (_iter388 = this->uniqueConstraints.begin(); _iter388 != this->uniqueConstraints.end(); ++_iter388)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter389;
+ for (_iter389 = this->uniqueConstraints.begin(); _iter389 != this->uniqueConstraints.end(); ++_iter389)
{
- xfer += (*_iter388).write(oprot);
+ xfer += (*_iter389).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9741,11 +9799,11 @@ void swap(UniqueConstraintsResponse &a, UniqueConstraintsResponse &b) {
swap(a.uniqueConstraints, b.uniqueConstraints);
}
-UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other389) {
- uniqueConstraints = other389.uniqueConstraints;
-}
-UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other390) {
+UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other390) {
uniqueConstraints = other390.uniqueConstraints;
+}
+UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other391) {
+ uniqueConstraints = other391.uniqueConstraints;
return *this;
}
void UniqueConstraintsResponse::printTo(std::ostream& out) const {
@@ -9847,13 +9905,13 @@ void swap(NotNullConstraintsRequest &a, NotNullConstraintsRequest &b) {
swap(a.tbl_name, b.tbl_name);
}
-NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other391) {
- db_name = other391.db_name;
- tbl_name = other391.tbl_name;
-}
-NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other392) {
+NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other392) {
db_name = other392.db_name;
tbl_name = other392.tbl_name;
+}
+NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other393) {
+ db_name = other393.db_name;
+ tbl_name = other393.tbl_name;
return *this;
}
void NotNullConstraintsRequest::printTo(std::ostream& out) const {
@@ -9899,14 +9957,14 @@ uint32_t NotNullConstraintsResponse::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->notNullConstraints.clear();
- uint32_t _size393;
- ::apache::thrift::protocol::TType _etype396;
- xfer += iprot->readListBegin(_etype396, _size393);
- this->notNullConstraints.resize(_size393);
- uint32_t _i397;
- for (_i397 = 0; _i397 < _size393; ++_i397)
+ uint32_t _size394;
+ ::apache::thrift::protocol::TType _etype397;
+ xfer += iprot->readListBegin(_etype397, _size394);
+ this->notNullConstraints.resize(_size394);
+ uint32_t _i398;
+ for (_i398 = 0; _i398 < _size394; ++_i398)
{
- xfer += this->notNullConstraints[_i397].read(iprot);
+ xfer += this->notNullConstraints[_i398].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9937,10 +9995,10 @@ uint32_t NotNullConstraintsResponse::write(::apache::thrift::protocol::TProtocol
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraints.size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter398;
- for (_iter398 = this->notNullConstraints.begin(); _iter398 != this->notNullConstraints.end(); ++_iter398)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter399;
+ for (_iter399 = this->notNullConstraints.begin(); _iter399 != this->notNullConstraints.end(); ++_iter399)
{
- xfer += (*_iter398).write(oprot);
+ xfer += (*_iter399).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9956,11 +10014,11 @@ void swap(NotNullConstraintsResponse &a, NotNullConstraintsResponse &b) {
swap(a.notNullConstraints, b.notNullConstraints);
}
-NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other399) {
- notNullConstraints = other399.notNullConstraints;
-}
-NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other400) {
+NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other400) {
notNullConstraints = other400.notNullConstraints;
+}
+NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other401) {
+ notNullConstraints = other401.notNullConstraints;
return *this;
}
void NotNullConstraintsResponse::printTo(std::ostream& out) const {
@@ -10082,15 +10140,15 @@ void swap(DropConstraintRequest &a, DropConstraintRequest &b) {
swap(a.constraintname, b.constraintname);
}
-DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other401) {
- dbname = other401.dbname;
- tablename = other401.tablename;
- constraintname = other401.constraintname;
-}
-DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other402) {
+DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other402) {
dbname = other402.dbname;
tablename = other402.tablename;
constraintname = other402.constraintname;
+}
+DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other403) {
+ dbname = other403.dbname;
+ tablename = other403.tablename;
+ constraintname = other403.constraintname;
return *this;
}
void DropConstraintRequest::printTo(std::ostream& out) const {
@@ -10137,14 +10195,14 @@ uint32_t AddPrimaryKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->primaryKeyCols.clear();
- uint32_t _size403;
- ::apache::thrift::protocol::TType _etype406;
- xfer += iprot->readListBegin(_etype406, _size403);
- this->primaryKeyCols.resize(_size403);
- uint32_t _i407;
- for (_i407 = 0; _i407 < _size403; ++_i407)
+ uint32_t _size404;
+ ::apache::thrift::protocol::TType _etype407;
+ xfer += iprot->readListBegin(_etype407, _size404);
+ this->primaryKeyCols.resize(_size404);
+ uint32_t _i408;
+ for (_i408 = 0; _i408 < _size404; ++_i408)
{
- xfer += this->primaryKeyCols[_i407].read(iprot);
+ xfer += this->primaryKeyCols[_i408].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10175,10 +10233,10 @@ uint32_t AddPrimaryKeyRequest::write(::apache::thrift::protocol::TProtocol* opro
xfer += oprot->writeFieldBegin("primaryKeyCols", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeyCols.size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter408;
- for (_iter408 = this->primaryKeyCols.begin(); _iter408 != this->primaryKeyCols.end(); ++_iter408)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter409;
+ for (_iter409 = this->primaryKeyCols.begin(); _iter409 != this->primaryKeyCols.end(); ++_iter409)
{
- xfer += (*_iter408).write(oprot);
+ xfer += (*_iter409).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10194,11 +10252,11 @@ void swap(AddPrimaryKeyRequest &a, AddPrimaryKeyRequest &b) {
swap(a.primaryKeyCols, b.primaryKeyCols);
}
-AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other409) {
- primaryKeyCols = other409.primaryKeyCols;
-}
-AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other410) {
+AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other410) {
primaryKeyCols = other410.primaryKeyCols;
+}
+AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other411) {
+ primaryKeyCols = other411.primaryKeyCols;
return *this;
}
void AddPrimaryKeyRequest::printTo(std::ostream& out) const {
@@ -10243,14 +10301,14 @@ uint32_t AddForeignKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->foreignKeyCols.clear();
- uint32_t _size411;
- ::apache::thrift::protocol::TType _etype414;
- xfer += iprot->readListBegin(_etype414, _size411);
- this->foreignKeyCols.resize(_size411);
- uint32_t _i415;
- for (_i415 = 0; _i415 < _size411; ++_i415)
+ uint32_t _size412;
+ ::apache::thrift::protocol::TType _etype415;
+ xfer += iprot->readListBegin(_etype415, _size412);
+ this->foreignKeyCols.resize(_size412);
+ uint32_t _i416;
+ for (_i416 = 0; _i416 < _size412; ++_i416)
{
- xfer += this->foreignKeyCols[_i415].read(iprot);
+ xfer += this->foreignKeyCols[_i416].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10281,10 +10339,10 @@ uint32_t AddForeignKeyRequest::write(::apache::thrift::protocol::TProtocol* opro
xfer += oprot->writeFieldBegin("foreignKeyCols", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeyCols.size()));
- std::vector<SQLForeignKey> ::const_iterator _iter416;
- for (_iter416 = this->foreignKeyCols.begin(); _iter416 != this->foreignKeyCols.end(); ++_iter416)
+ std::vector<SQLForeignKey> ::const_iterator _iter417;
+ for (_iter417 = this->foreignKeyCols.begin(); _iter417 != this->foreignKeyCols.end(); ++_iter417)
{
- xfer += (*_iter416).write(oprot);
+ xfer += (*_iter417).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10300,11 +10358,11 @@ void swap(AddForeignKeyRequest &a, AddForeignKeyRequest &b) {
swap(a.foreignKeyCols, b.foreignKeyCols);
}
-AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other417) {
- foreignKeyCols = other417.foreignKeyCols;
-}
-AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other418) {
+AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other418) {
foreignKeyCols = other418.foreignKeyCols;
+}
+AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other419) {
+ foreignKeyCols = other419.foreignKeyCols;
return *this;
}
void AddForeignKeyRequest::printTo(std::ostream& out) const {
@@ -10349,14 +10407,14 @@ uint32_t AddUniqueConstraintRequest::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->uniqueConstraintCols.clear();
- uint32_t _size419;
- ::apache::thrift::protocol::TType _etype422;
- xfer += iprot->readListBegin(_etype422, _size419);
- this->uniqueConstraintCols.resize(_size419);
- uint32_t _i423;
- for (_i423 = 0; _i423 < _size419; ++_i423)
+ uint32_t _size420;
+ ::apache::thrift::protocol::TType _etype423;
+ xfer += iprot->readListBegin(_etype423, _size420);
+ this->uniqueConstraintCols.resize(_size420);
+ uint32_t _i424;
+ for (_i424 = 0; _i424 < _size420; ++_i424)
{
- xfer += this->uniqueConstraintCols[_i423].read(iprot);
+ xfer += this->uniqueConstraintCols[_i424].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10387,10 +10445,10 @@ uint32_t AddUniqueConstraintRequest::write(::apache::thrift::protocol::TProtocol
xfer += oprot->writeFieldBegin("uniqueConstraintCols", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraintCols.size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter424;
- for (_iter424 = this->uniqueConstraintCols.begin(); _iter424 != this->uniqueConstraintCols.end(); ++_iter424)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter425;
+ for (_iter425 = this->uniqueConstraintCols.begin(); _iter425 != this->uniqueConstraintCols.end(); ++_iter425)
{
- xfer += (*_iter424).write(oprot);
+ xfer += (*_iter425).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10406,11 +10464,11 @@ void swap(AddUniqueConstraintRequest &a, AddUniqueConstraintRequest &b) {
swap(a.uniqueConstraintCols, b.uniqueConstraintCols);
}
-AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other425) {
- uniqueConstraintCols = other425.uniqueConstraintCols;
-}
-AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other426) {
+AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other426) {
uniqueConstraintCols = other426.uniqueConstraintCols;
+}
+AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other427) {
+ uniqueConstraintCols = other427.uniqueConstraintCols;
return *this;
}
void AddUniqueConstraintRequest::printTo(std::ostream& out) const {
@@ -10455,14 +10513,14 @@ uint32_t AddNotNullConstraintRequest::read(::apache::thrift::protocol::TProtocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->notNullConstraintCols.clear();
- uint32_t _size427;
- ::apache::thrift::protocol::TType _etype430;
- xfer += iprot->readListBegin(_etype430, _size427);
- this->notNullConstraintCols.resize(_size427);
- uint32_t _i431;
- for (_i431 = 0; _i431 < _size427; ++_i431)
+ uint32_t _size428;
+ ::apache::thrift::protocol::TType _etype431;
+ xfer += iprot->readListBegin(_etype431, _size428);
+ this->notNullConstraintCols.resize(_size428);
+ uint32_t _i432;
+ for (_i432 = 0; _i432 < _size428; ++_i432)
{
- xfer += this->notNullConstraintCols[_i431].read(iprot);
+ xfer += this->notNullConstraintCols[_i432].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10493,10 +10551,10 @@ uint32_t AddNotNullConstraintRequest::write(::apache::thrift::protocol::TProtoco
xfer += oprot->writeFieldBegin("notNullConstraintCols", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraintCols.size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter432;
- for (_iter432 = this->notNullConstraintCols.begin(); _iter432 != this->notNullConstraintCols.end(); ++_iter432)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter433;
+ for (_iter433 = this->notNullConstraintCols.begin(); _iter433 != this->notNullConstraintCols.end(); ++_iter433)
{
- xfer += (*_iter432).write(oprot);
+ xfer += (*_iter433).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10512,11 +10570,11 @@ void swap(AddNotNullConstraintRequest &a, AddNotNullConstraintRequest &b) {
swap(a.notNullConstraintCols, b.notNullConstraintCols);
}
-AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other433) {
- notNullConstraintCols = other433.notNullConstraintCols;
-}
-AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other434) {
+AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other434) {
notNullConstraintCols = other434.notNullConstraintCols;
+}
+AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other435) {
+ notNullConstraintCols = other435.notNullConstraintCols;
return *this;
}
void AddNotNullConstraintRequest::printTo(std::ostream& out) const {
@@ -10566,14 +10624,14 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size435;
- ::apache::thrift::protocol::TType _etype438;
- xfer += iprot->readListBegin(_etype438, _size435);
- this->partitions.resize(_size435);
- uint32_t _i439;
- for (_i439 = 0; _i439 < _size435; ++_i439)
+ uint32_t _size436;
+ ::apache::thrift::protocol::TType _etype439;
+ xfer += iprot->readListBegin(_etype439, _size436);
+ this->partitions.resize(_size436);
+ uint32_t _i440;
+ for (_i440 = 0; _i440 < _size436; ++_i440)
{
- xfer += this->partitions[_i439].read(iprot);
+ xfer += this->partitions[_i440].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10614,10 +10672,10 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<Partition> ::const_iterator _iter440;
- for (_iter440 = this->partitions.begin(); _iter440 != this->partitions.end(); ++_iter440)
+ std::vector<Partition> ::const_iterator _iter441;
+ for (_iter441 = this->partitions.begin(); _iter441 != this->partitions.end(); ++_iter441)
{
- xfer += (*_iter440).write(oprot);
+ xfer += (*_iter441).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10638,13 +10696,13 @@ void swap(PartitionsByExprResult &a, PartitionsByExprResult &b) {
swap(a.hasUnknownPartitions, b.hasUnknownPartitions);
}
-PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other441) {
- partitions = other441.partitions;
- hasUnknownPartitions = other441.hasUnknownPartitions;
-}
-PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other442) {
+PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other442) {
partitions = other442.partitions;
hasUnknownPartitions = other442.hasUnknownPartitions;
+}
+PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other443) {
+ partitions = other443.partitions;
+ hasUnknownPartitions = other443.hasUnknownPartitions;
return *this;
}
void PartitionsByExprResult::printTo(std::ostream& out) const {
@@ -10806,21 +10864,21 @@ void swap(PartitionsByExprRequest &a, PartitionsByExprRequest &b) {
swap(a.__isset, b.__isset);
}
-PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other443) {
- dbName = other443.dbName;
- tblName = other443.tblName;
- expr = other443.expr;
- defaultPartitionName = other443.defaultPartitionName;
- maxParts = other443.maxParts;
- __isset = other443.__isset;
-}
-PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other444) {
+PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other444) {
dbName = other444.dbName;
tblName = other444.tblName;
expr = other444.expr;
defaultPartitionName = other444.defaultPartitionName;
maxParts = other444.maxParts;
__isset = other444.__isset;
+}
+PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other445) {
+ dbName = other445.dbName;
+ tblName = other445.tblName;
+ expr = other445.expr;
+ defaultPartitionName = other445.defaultPartitionName;
+ maxParts = other445.maxParts;
+ __isset = other445.__isset;
return *this;
}
void PartitionsByExprRequest::printTo(std::ostream& out) const {
@@ -10869,14 +10927,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tableStats.clear();
- uint32_t _size445;
- ::apache::thrift::protocol::TType _etype448;
- xfer += iprot->readListBegin(_etype448, _size445);
- this->tableStats.resize(_size445);
- uint32_t _i449;
- for (_i449 = 0; _i449 < _size445; ++_i449)
+ uint32_t _size446;
+ ::apache::thrift::protocol::TType _etype449;
+ xfer += iprot->readListBegin(_etype449, _size446);
+ this->tableStats.resize(_size446);
+ uint32_t _i450;
+ for (_i450 = 0; _i450 < _size446; ++_i450)
{
- xfer += this->tableStats[_i449].read(iprot);
+ xfer += this->tableStats[_i450].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10907,10 +10965,10 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c
xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->tableStats.size()));
- std::vector<ColumnStatisticsObj> ::const_iterator _iter450;
- for (_iter450 = this->tableStats.begin(); _iter450 != this->tableStats.end(); ++_iter450)
+ std::vector<ColumnStatisticsObj> ::const_iterator _iter451;
+ for (_iter451 = this->tableStats.begin(); _iter451 != this->tableStats.end(); ++_iter451)
{
- xfer += (*_iter450).write(oprot);
+ xfer += (*_iter451).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10926,11 +10984,11 @@ void swap(TableStatsResult &a, TableStatsResult &b) {
swap(a.tableStats, b.tableStats);
}
-TableStatsResult::TableStatsResult(const TableStatsResult& other451) {
- tableStats = other451.tableStats;
-}
-TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other452) {
+TableStatsResult::TableStatsResult(const TableStatsResult& other452) {
tableStats = other452.tableStats;
+}
+TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other453) {
+ tableStats = other453.tableStats;
return *this;
}
void TableStatsResult::printTo(std::ostream& out) const {
@@ -10975,26 +11033,26 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->partStats.clear();
- uint32_t _size453;
- ::apache::thrift::protocol::TType _ktype454;
- ::apache::thrift::protocol::TType _vtype455;
- xfer += iprot->readMapBegin(_ktype454, _vtype455, _size453);
- uint32_t _i457;
- for (_i457 = 0; _i457 < _size453; ++_i457)
+ uint32_t _size454;
+ ::apache::thrift::protocol::TType _ktype455;
+ ::apache::thrift::protocol::TType _vtype456;
+ xfer += iprot->readMapBegin(_ktype455, _vtype456, _size454);
+ uint32_t _i458;
+ for (_i458 = 0; _i458 < _size
<TRUNCATED>
[3/7] hive git commit: HIVE-18350 : load data should rename files
consistent with insert statements. (Deepak Jaiswal,
reviewed by Sergey Shelukhin and Ashutosh Chauhan)
Posted by dj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index af0fd6b..615c024 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1049;
- ::apache::thrift::protocol::TType _etype1052;
- xfer += iprot->readListBegin(_etype1052, _size1049);
- this->success.resize(_size1049);
- uint32_t _i1053;
- for (_i1053 = 0; _i1053 < _size1049; ++_i1053)
+ uint32_t _size1050;
+ ::apache::thrift::protocol::TType _etype1053;
+ xfer += iprot->readListBegin(_etype1053, _size1050);
+ this->success.resize(_size1050);
+ uint32_t _i1054;
+ for (_i1054 = 0; _i1054 < _size1050; ++_i1054)
{
- xfer += iprot->readString(this->success[_i1053]);
+ xfer += iprot->readString(this->success[_i1054]);
}
xfer += iprot->readListEnd();
}
@@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1054;
- for (_iter1054 = this->success.begin(); _iter1054 != this->success.end(); ++_iter1054)
+ std::vector<std::string> ::const_iterator _iter1055;
+ for (_iter1055 = this->success.begin(); _iter1055 != this->success.end(); ++_iter1055)
{
- xfer += oprot->writeString((*_iter1054));
+ xfer += oprot->writeString((*_iter1055));
}
xfer += oprot->writeListEnd();
}
@@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1055;
- ::apache::thrift::protocol::TType _etype1058;
- xfer += iprot->readListBegin(_etype1058, _size1055);
- (*(this->success)).resize(_size1055);
- uint32_t _i1059;
- for (_i1059 = 0; _i1059 < _size1055; ++_i1059)
+ uint32_t _size1056;
+ ::apache::thrift::protocol::TType _etype1059;
+ xfer += iprot->readListBegin(_etype1059, _size1056);
+ (*(this->success)).resize(_size1056);
+ uint32_t _i1060;
+ for (_i1060 = 0; _i1060 < _size1056; ++_i1060)
{
- xfer += iprot->readString((*(this->success))[_i1059]);
+ xfer += iprot->readString((*(this->success))[_i1060]);
}
xfer += iprot->readListEnd();
}
@@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1060;
- ::apache::thrift::protocol::TType _etype1063;
- xfer += iprot->readListBegin(_etype1063, _size1060);
- this->success.resize(_size1060);
- uint32_t _i1064;
- for (_i1064 = 0; _i1064 < _size1060; ++_i1064)
+ uint32_t _size1061;
+ ::apache::thrift::protocol::TType _etype1064;
+ xfer += iprot->readListBegin(_etype1064, _size1061);
+ this->success.resize(_size1061);
+ uint32_t _i1065;
+ for (_i1065 = 0; _i1065 < _size1061; ++_i1065)
{
- xfer += iprot->readString(this->success[_i1064]);
+ xfer += iprot->readString(this->success[_i1065]);
}
xfer += iprot->readListEnd();
}
@@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1065;
- for (_iter1065 = this->success.begin(); _iter1065 != this->success.end(); ++_iter1065)
+ std::vector<std::string> ::const_iterator _iter1066;
+ for (_iter1066 = this->success.begin(); _iter1066 != this->success.end(); ++_iter1066)
{
- xfer += oprot->writeString((*_iter1065));
+ xfer += oprot->writeString((*_iter1066));
}
xfer += oprot->writeListEnd();
}
@@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1066;
- ::apache::thrift::protocol::TType _etype1069;
- xfer += iprot->readListBegin(_etype1069, _size1066);
- (*(this->success)).resize(_size1066);
- uint32_t _i1070;
- for (_i1070 = 0; _i1070 < _size1066; ++_i1070)
+ uint32_t _size1067;
+ ::apache::thrift::protocol::TType _etype1070;
+ xfer += iprot->readListBegin(_etype1070, _size1067);
+ (*(this->success)).resize(_size1067);
+ uint32_t _i1071;
+ for (_i1071 = 0; _i1071 < _size1067; ++_i1071)
{
- xfer += iprot->readString((*(this->success))[_i1070]);
+ xfer += iprot->readString((*(this->success))[_i1071]);
}
xfer += iprot->readListEnd();
}
@@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size1071;
- ::apache::thrift::protocol::TType _ktype1072;
- ::apache::thrift::protocol::TType _vtype1073;
- xfer += iprot->readMapBegin(_ktype1072, _vtype1073, _size1071);
- uint32_t _i1075;
- for (_i1075 = 0; _i1075 < _size1071; ++_i1075)
+ uint32_t _size1072;
+ ::apache::thrift::protocol::TType _ktype1073;
+ ::apache::thrift::protocol::TType _vtype1074;
+ xfer += iprot->readMapBegin(_ktype1073, _vtype1074, _size1072);
+ uint32_t _i1076;
+ for (_i1076 = 0; _i1076 < _size1072; ++_i1076)
{
- std::string _key1076;
- xfer += iprot->readString(_key1076);
- Type& _val1077 = this->success[_key1076];
- xfer += _val1077.read(iprot);
+ std::string _key1077;
+ xfer += iprot->readString(_key1077);
+ Type& _val1078 = this->success[_key1077];
+ xfer += _val1078.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::map<std::string, Type> ::const_iterator _iter1078;
- for (_iter1078 = this->success.begin(); _iter1078 != this->success.end(); ++_iter1078)
+ std::map<std::string, Type> ::const_iterator _iter1079;
+ for (_iter1079 = this->success.begin(); _iter1079 != this->success.end(); ++_iter1079)
{
- xfer += oprot->writeString(_iter1078->first);
- xfer += _iter1078->second.write(oprot);
+ xfer += oprot->writeString(_iter1079->first);
+ xfer += _iter1079->second.write(oprot);
}
xfer += oprot->writeMapEnd();
}
@@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size1079;
- ::apache::thrift::protocol::TType _ktype1080;
- ::apache::thrift::protocol::TType _vtype1081;
- xfer += iprot->readMapBegin(_ktype1080, _vtype1081, _size1079);
- uint32_t _i1083;
- for (_i1083 = 0; _i1083 < _size1079; ++_i1083)
+ uint32_t _size1080;
+ ::apache::thrift::protocol::TType _ktype1081;
+ ::apache::thrift::protocol::TType _vtype1082;
+ xfer += iprot->readMapBegin(_ktype1081, _vtype1082, _size1080);
+ uint32_t _i1084;
+ for (_i1084 = 0; _i1084 < _size1080; ++_i1084)
{
- std::string _key1084;
- xfer += iprot->readString(_key1084);
- Type& _val1085 = (*(this->success))[_key1084];
- xfer += _val1085.read(iprot);
+ std::string _key1085;
+ xfer += iprot->readString(_key1085);
+ Type& _val1086 = (*(this->success))[_key1085];
+ xfer += _val1086.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1086;
- ::apache::thrift::protocol::TType _etype1089;
- xfer += iprot->readListBegin(_etype1089, _size1086);
- this->success.resize(_size1086);
- uint32_t _i1090;
- for (_i1090 = 0; _i1090 < _size1086; ++_i1090)
+ uint32_t _size1087;
+ ::apache::thrift::protocol::TType _etype1090;
+ xfer += iprot->readListBegin(_etype1090, _size1087);
+ this->success.resize(_size1087);
+ uint32_t _i1091;
+ for (_i1091 = 0; _i1091 < _size1087; ++_i1091)
{
- xfer += this->success[_i1090].read(iprot);
+ xfer += this->success[_i1091].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1091;
- for (_iter1091 = this->success.begin(); _iter1091 != this->success.end(); ++_iter1091)
+ std::vector<FieldSchema> ::const_iterator _iter1092;
+ for (_iter1092 = this->success.begin(); _iter1092 != this->success.end(); ++_iter1092)
{
- xfer += (*_iter1091).write(oprot);
+ xfer += (*_iter1092).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1092;
- ::apache::thrift::protocol::TType _etype1095;
- xfer += iprot->readListBegin(_etype1095, _size1092);
- (*(this->success)).resize(_size1092);
- uint32_t _i1096;
- for (_i1096 = 0; _i1096 < _size1092; ++_i1096)
+ uint32_t _size1093;
+ ::apache::thrift::protocol::TType _etype1096;
+ xfer += iprot->readListBegin(_etype1096, _size1093);
+ (*(this->success)).resize(_size1093);
+ uint32_t _i1097;
+ for (_i1097 = 0; _i1097 < _size1093; ++_i1097)
{
- xfer += (*(this->success))[_i1096].read(iprot);
+ xfer += (*(this->success))[_i1097].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1097;
- ::apache::thrift::protocol::TType _etype1100;
- xfer += iprot->readListBegin(_etype1100, _size1097);
- this->success.resize(_size1097);
- uint32_t _i1101;
- for (_i1101 = 0; _i1101 < _size1097; ++_i1101)
+ uint32_t _size1098;
+ ::apache::thrift::protocol::TType _etype1101;
+ xfer += iprot->readListBegin(_etype1101, _size1098);
+ this->success.resize(_size1098);
+ uint32_t _i1102;
+ for (_i1102 = 0; _i1102 < _size1098; ++_i1102)
{
- xfer += this->success[_i1101].read(iprot);
+ xfer += this->success[_i1102].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1102;
- for (_iter1102 = this->success.begin(); _iter1102 != this->success.end(); ++_iter1102)
+ std::vector<FieldSchema> ::const_iterator _iter1103;
+ for (_iter1103 = this->success.begin(); _iter1103 != this->success.end(); ++_iter1103)
{
- xfer += (*_iter1102).write(oprot);
+ xfer += (*_iter1103).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1103;
- ::apache::thrift::protocol::TType _etype1106;
- xfer += iprot->readListBegin(_etype1106, _size1103);
- (*(this->success)).resize(_size1103);
- uint32_t _i1107;
- for (_i1107 = 0; _i1107 < _size1103; ++_i1107)
+ uint32_t _size1104;
+ ::apache::thrift::protocol::TType _etype1107;
+ xfer += iprot->readListBegin(_etype1107, _size1104);
+ (*(this->success)).resize(_size1104);
+ uint32_t _i1108;
+ for (_i1108 = 0; _i1108 < _size1104; ++_i1108)
{
- xfer += (*(this->success))[_i1107].read(iprot);
+ xfer += (*(this->success))[_i1108].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1108;
- ::apache::thrift::protocol::TType _etype1111;
- xfer += iprot->readListBegin(_etype1111, _size1108);
- this->success.resize(_size1108);
- uint32_t _i1112;
- for (_i1112 = 0; _i1112 < _size1108; ++_i1112)
+ uint32_t _size1109;
+ ::apache::thrift::protocol::TType _etype1112;
+ xfer += iprot->readListBegin(_etype1112, _size1109);
+ this->success.resize(_size1109);
+ uint32_t _i1113;
+ for (_i1113 = 0; _i1113 < _size1109; ++_i1113)
{
- xfer += this->success[_i1112].read(iprot);
+ xfer += this->success[_i1113].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1113;
- for (_iter1113 = this->success.begin(); _iter1113 != this->success.end(); ++_iter1113)
+ std::vector<FieldSchema> ::const_iterator _iter1114;
+ for (_iter1114 = this->success.begin(); _iter1114 != this->success.end(); ++_iter1114)
{
- xfer += (*_iter1113).write(oprot);
+ xfer += (*_iter1114).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1114;
- ::apache::thrift::protocol::TType _etype1117;
- xfer += iprot->readListBegin(_etype1117, _size1114);
- (*(this->success)).resize(_size1114);
- uint32_t _i1118;
- for (_i1118 = 0; _i1118 < _size1114; ++_i1118)
+ uint32_t _size1115;
+ ::apache::thrift::protocol::TType _etype1118;
+ xfer += iprot->readListBegin(_etype1118, _size1115);
+ (*(this->success)).resize(_size1115);
+ uint32_t _i1119;
+ for (_i1119 = 0; _i1119 < _size1115; ++_i1119)
{
- xfer += (*(this->success))[_i1118].read(iprot);
+ xfer += (*(this->success))[_i1119].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1119;
- ::apache::thrift::protocol::TType _etype1122;
- xfer += iprot->readListBegin(_etype1122, _size1119);
- this->success.resize(_size1119);
- uint32_t _i1123;
- for (_i1123 = 0; _i1123 < _size1119; ++_i1123)
+ uint32_t _size1120;
+ ::apache::thrift::protocol::TType _etype1123;
+ xfer += iprot->readListBegin(_etype1123, _size1120);
+ this->success.resize(_size1120);
+ uint32_t _i1124;
+ for (_i1124 = 0; _i1124 < _size1120; ++_i1124)
{
- xfer += this->success[_i1123].read(iprot);
+ xfer += this->success[_i1124].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1124;
- for (_iter1124 = this->success.begin(); _iter1124 != this->success.end(); ++_iter1124)
+ std::vector<FieldSchema> ::const_iterator _iter1125;
+ for (_iter1125 = this->success.begin(); _iter1125 != this->success.end(); ++_iter1125)
{
- xfer += (*_iter1124).write(oprot);
+ xfer += (*_iter1125).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1125;
- ::apache::thrift::protocol::TType _etype1128;
- xfer += iprot->readListBegin(_etype1128, _size1125);
- (*(this->success)).resize(_size1125);
- uint32_t _i1129;
- for (_i1129 = 0; _i1129 < _size1125; ++_i1129)
+ uint32_t _size1126;
+ ::apache::thrift::protocol::TType _etype1129;
+ xfer += iprot->readListBegin(_etype1129, _size1126);
+ (*(this->success)).resize(_size1126);
+ uint32_t _i1130;
+ for (_i1130 = 0; _i1130 < _size1126; ++_i1130)
{
- xfer += (*(this->success))[_i1129].read(iprot);
+ xfer += (*(this->success))[_i1130].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->primaryKeys.clear();
- uint32_t _size1130;
- ::apache::thrift::protocol::TType _etype1133;
- xfer += iprot->readListBegin(_etype1133, _size1130);
- this->primaryKeys.resize(_size1130);
- uint32_t _i1134;
- for (_i1134 = 0; _i1134 < _size1130; ++_i1134)
+ uint32_t _size1131;
+ ::apache::thrift::protocol::TType _etype1134;
+ xfer += iprot->readListBegin(_etype1134, _size1131);
+ this->primaryKeys.resize(_size1131);
+ uint32_t _i1135;
+ for (_i1135 = 0; _i1135 < _size1131; ++_i1135)
{
- xfer += this->primaryKeys[_i1134].read(iprot);
+ xfer += this->primaryKeys[_i1135].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->foreignKeys.clear();
- uint32_t _size1135;
- ::apache::thrift::protocol::TType _etype1138;
- xfer += iprot->readListBegin(_etype1138, _size1135);
- this->foreignKeys.resize(_size1135);
- uint32_t _i1139;
- for (_i1139 = 0; _i1139 < _size1135; ++_i1139)
+ uint32_t _size1136;
+ ::apache::thrift::protocol::TType _etype1139;
+ xfer += iprot->readListBegin(_etype1139, _size1136);
+ this->foreignKeys.resize(_size1136);
+ uint32_t _i1140;
+ for (_i1140 = 0; _i1140 < _size1136; ++_i1140)
{
- xfer += this->foreignKeys[_i1139].read(iprot);
+ xfer += this->foreignKeys[_i1140].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4558,14 +4558,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->uniqueConstraints.clear();
- uint32_t _size1140;
- ::apache::thrift::protocol::TType _etype1143;
- xfer += iprot->readListBegin(_etype1143, _size1140);
- this->uniqueConstraints.resize(_size1140);
- uint32_t _i1144;
- for (_i1144 = 0; _i1144 < _size1140; ++_i1144)
+ uint32_t _size1141;
+ ::apache::thrift::protocol::TType _etype1144;
+ xfer += iprot->readListBegin(_etype1144, _size1141);
+ this->uniqueConstraints.resize(_size1141);
+ uint32_t _i1145;
+ for (_i1145 = 0; _i1145 < _size1141; ++_i1145)
{
- xfer += this->uniqueConstraints[_i1144].read(iprot);
+ xfer += this->uniqueConstraints[_i1145].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4578,14 +4578,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->notNullConstraints.clear();
- uint32_t _size1145;
- ::apache::thrift::protocol::TType _etype1148;
- xfer += iprot->readListBegin(_etype1148, _size1145);
- this->notNullConstraints.resize(_size1145);
- uint32_t _i1149;
- for (_i1149 = 0; _i1149 < _size1145; ++_i1149)
+ uint32_t _size1146;
+ ::apache::thrift::protocol::TType _etype1149;
+ xfer += iprot->readListBegin(_etype1149, _size1146);
+ this->notNullConstraints.resize(_size1146);
+ uint32_t _i1150;
+ for (_i1150 = 0; _i1150 < _size1146; ++_i1150)
{
- xfer += this->notNullConstraints[_i1149].read(iprot);
+ xfer += this->notNullConstraints[_i1150].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4618,10 +4618,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter1150;
- for (_iter1150 = this->primaryKeys.begin(); _iter1150 != this->primaryKeys.end(); ++_iter1150)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter1151;
+ for (_iter1151 = this->primaryKeys.begin(); _iter1151 != this->primaryKeys.end(); ++_iter1151)
{
- xfer += (*_iter1150).write(oprot);
+ xfer += (*_iter1151).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4630,10 +4630,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
- std::vector<SQLForeignKey> ::const_iterator _iter1151;
- for (_iter1151 = this->foreignKeys.begin(); _iter1151 != this->foreignKeys.end(); ++_iter1151)
+ std::vector<SQLForeignKey> ::const_iterator _iter1152;
+ for (_iter1152 = this->foreignKeys.begin(); _iter1152 != this->foreignKeys.end(); ++_iter1152)
{
- xfer += (*_iter1151).write(oprot);
+ xfer += (*_iter1152).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4642,10 +4642,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraints.size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter1152;
- for (_iter1152 = this->uniqueConstraints.begin(); _iter1152 != this->uniqueConstraints.end(); ++_iter1152)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter1153;
+ for (_iter1153 = this->uniqueConstraints.begin(); _iter1153 != this->uniqueConstraints.end(); ++_iter1153)
{
- xfer += (*_iter1152).write(oprot);
+ xfer += (*_iter1153).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4654,10 +4654,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraints.size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter1153;
- for (_iter1153 = this->notNullConstraints.begin(); _iter1153 != this->notNullConstraints.end(); ++_iter1153)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter1154;
+ for (_iter1154 = this->notNullConstraints.begin(); _iter1154 != this->notNullConstraints.end(); ++_iter1154)
{
- xfer += (*_iter1153).write(oprot);
+ xfer += (*_iter1154).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4685,10 +4685,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter1154;
- for (_iter1154 = (*(this->primaryKeys)).begin(); _iter1154 != (*(this->primaryKeys)).end(); ++_iter1154)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter1155;
+ for (_iter1155 = (*(this->primaryKeys)).begin(); _iter1155 != (*(this->primaryKeys)).end(); ++_iter1155)
{
- xfer += (*_iter1154).write(oprot);
+ xfer += (*_iter1155).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4697,10 +4697,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
- std::vector<SQLForeignKey> ::const_iterator _iter1155;
- for (_iter1155 = (*(this->foreignKeys)).begin(); _iter1155 != (*(this->foreignKeys)).end(); ++_iter1155)
+ std::vector<SQLForeignKey> ::const_iterator _iter1156;
+ for (_iter1156 = (*(this->foreignKeys)).begin(); _iter1156 != (*(this->foreignKeys)).end(); ++_iter1156)
{
- xfer += (*_iter1155).write(oprot);
+ xfer += (*_iter1156).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4709,10 +4709,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->uniqueConstraints)).size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter1156;
- for (_iter1156 = (*(this->uniqueConstraints)).begin(); _iter1156 != (*(this->uniqueConstraints)).end(); ++_iter1156)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter1157;
+ for (_iter1157 = (*(this->uniqueConstraints)).begin(); _iter1157 != (*(this->uniqueConstraints)).end(); ++_iter1157)
{
- xfer += (*_iter1156).write(oprot);
+ xfer += (*_iter1157).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4721,10 +4721,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->notNullConstraints)).size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter1157;
- for (_iter1157 = (*(this->notNullConstraints)).begin(); _iter1157 != (*(this->notNullConstraints)).end(); ++_iter1157)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter1158;
+ for (_iter1158 = (*(this->notNullConstraints)).begin(); _iter1158 != (*(this->notNullConstraints)).end(); ++_iter1158)
{
- xfer += (*_iter1157).write(oprot);
+ xfer += (*_iter1158).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -6478,14 +6478,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partNames.clear();
- uint32_t _size1158;
- ::apache::thrift::protocol::TType _etype1161;
- xfer += iprot->readListBegin(_etype1161, _size1158);
- this->partNames.resize(_size1158);
- uint32_t _i1162;
- for (_i1162 = 0; _i1162 < _size1158; ++_i1162)
+ uint32_t _size1159;
+ ::apache::thrift::protocol::TType _etype1162;
+ xfer += iprot->readListBegin(_etype1162, _size1159);
+ this->partNames.resize(_size1159);
+ uint32_t _i1163;
+ for (_i1163 = 0; _i1163 < _size1159; ++_i1163)
{
- xfer += iprot->readString(this->partNames[_i1162]);
+ xfer += iprot->readString(this->partNames[_i1163]);
}
xfer += iprot->readListEnd();
}
@@ -6522,10 +6522,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
- std::vector<std::string> ::const_iterator _iter1163;
- for (_iter1163 = this->partNames.begin(); _iter1163 != this->partNames.end(); ++_iter1163)
+ std::vector<std::string> ::const_iterator _iter1164;
+ for (_iter1164 = this->partNames.begin(); _iter1164 != this->partNames.end(); ++_iter1164)
{
- xfer += oprot->writeString((*_iter1163));
+ xfer += oprot->writeString((*_iter1164));
}
xfer += oprot->writeListEnd();
}
@@ -6557,10 +6557,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partNames)).size()));
- std::vector<std::string> ::const_iterator _iter1164;
- for (_iter1164 = (*(this->partNames)).begin(); _iter1164 != (*(this->partNames)).end(); ++_iter1164)
+ std::vector<std::string> ::const_iterator _iter1165;
+ for (_iter1165 = (*(this->partNames)).begin(); _iter1165 != (*(this->partNames)).end(); ++_iter1165)
{
- xfer += oprot->writeString((*_iter1164));
+ xfer += oprot->writeString((*_iter1165));
}
xfer += oprot->writeListEnd();
}
@@ -6804,14 +6804,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1165;
- ::apache::thrift::protocol::TType _etype1168;
- xfer += iprot->readListBegin(_etype1168, _size1165);
- this->success.resize(_size1165);
- uint32_t _i1169;
- for (_i1169 = 0; _i1169 < _size1165; ++_i1169)
+ uint32_t _size1166;
+ ::apache::thrift::protocol::TType _etype1169;
+ xfer += iprot->readListBegin(_etype1169, _size1166);
+ this->success.resize(_size1166);
+ uint32_t _i1170;
+ for (_i1170 = 0; _i1170 < _size1166; ++_i1170)
{
- xfer += iprot->readString(this->success[_i1169]);
+ xfer += iprot->readString(this->success[_i1170]);
}
xfer += iprot->readListEnd();
}
@@ -6850,10 +6850,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1170;
- for (_iter1170 = this->success.begin(); _iter1170 != this->success.end(); ++_iter1170)
+ std::vector<std::string> ::const_iterator _iter1171;
+ for (_iter1171 = this->success.begin(); _iter1171 != this->success.end(); ++_iter1171)
{
- xfer += oprot->writeString((*_iter1170));
+ xfer += oprot->writeString((*_iter1171));
}
xfer += oprot->writeListEnd();
}
@@ -6898,14 +6898,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1171;
- ::apache::thrift::protocol::TType _etype1174;
- xfer += iprot->readListBegin(_etype1174, _size1171);
- (*(this->success)).resize(_size1171);
- uint32_t _i1175;
- for (_i1175 = 0; _i1175 < _size1171; ++_i1175)
+ uint32_t _size1172;
+ ::apache::thrift::protocol::TType _etype1175;
+ xfer += iprot->readListBegin(_etype1175, _size1172);
+ (*(this->success)).resize(_size1172);
+ uint32_t _i1176;
+ for (_i1176 = 0; _i1176 < _size1172; ++_i1176)
{
- xfer += iprot->readString((*(this->success))[_i1175]);
+ xfer += iprot->readString((*(this->success))[_i1176]);
}
xfer += iprot->readListEnd();
}
@@ -7075,14 +7075,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1176;
- ::apache::thrift::protocol::TType _etype1179;
- xfer += iprot->readListBegin(_etype1179, _size1176);
- this->success.resize(_size1176);
- uint32_t _i1180;
- for (_i1180 = 0; _i1180 < _size1176; ++_i1180)
+ uint32_t _size1177;
+ ::apache::thrift::protocol::TType _etype1180;
+ xfer += iprot->readListBegin(_etype1180, _size1177);
+ this->success.resize(_size1177);
+ uint32_t _i1181;
+ for (_i1181 = 0; _i1181 < _size1177; ++_i1181)
{
- xfer += iprot->readString(this->success[_i1180]);
+ xfer += iprot->readString(this->success[_i1181]);
}
xfer += iprot->readListEnd();
}
@@ -7121,10 +7121,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift::
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1181;
- for (_iter1181 = this->success.begin(); _iter1181 != this->success.end(); ++_iter1181)
+ std::vector<std::string> ::const_iterator _iter1182;
+ for (_iter1182 = this->success.begin(); _iter1182 != this->success.end(); ++_iter1182)
{
- xfer += oprot->writeString((*_iter1181));
+ xfer += oprot->writeString((*_iter1182));
}
xfer += oprot->writeListEnd();
}
@@ -7169,14 +7169,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1182;
- ::apache::thrift::protocol::TType _etype1185;
- xfer += iprot->readListBegin(_etype1185, _size1182);
- (*(this->success)).resize(_size1182);
- uint32_t _i1186;
- for (_i1186 = 0; _i1186 < _size1182; ++_i1186)
+ uint32_t _size1183;
+ ::apache::thrift::protocol::TType _etype1186;
+ xfer += iprot->readListBegin(_etype1186, _size1183);
+ (*(this->success)).resize(_size1183);
+ uint32_t _i1187;
+ for (_i1187 = 0; _i1187 < _size1183; ++_i1187)
{
- xfer += iprot->readString((*(this->success))[_i1186]);
+ xfer += iprot->readString((*(this->success))[_i1187]);
}
xfer += iprot->readListEnd();
}
@@ -7314,14 +7314,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1187;
- ::apache::thrift::protocol::TType _etype1190;
- xfer += iprot->readListBegin(_etype1190, _size1187);
- this->success.resize(_size1187);
- uint32_t _i1191;
- for (_i1191 = 0; _i1191 < _size1187; ++_i1191)
+ uint32_t _size1188;
+ ::apache::thrift::protocol::TType _etype1191;
+ xfer += iprot->readListBegin(_etype1191, _size1188);
+ this->success.resize(_size1188);
+ uint32_t _i1192;
+ for (_i1192 = 0; _i1192 < _size1188; ++_i1192)
{
- xfer += iprot->readString(this->success[_i1191]);
+ xfer += iprot->readString(this->success[_i1192]);
}
xfer += iprot->readListEnd();
}
@@ -7360,10 +7360,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write(
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1192;
- for (_iter1192 = this->success.begin(); _iter1192 != this->success.end(); ++_iter1192)
+ std::vector<std::string> ::const_iterator _iter1193;
+ for (_iter1193 = this->success.begin(); _iter1193 != this->success.end(); ++_iter1193)
{
- xfer += oprot->writeString((*_iter1192));
+ xfer += oprot->writeString((*_iter1193));
}
xfer += oprot->writeListEnd();
}
@@ -7408,14 +7408,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read(
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1193;
- ::apache::thrift::protocol::TType _etype1196;
- xfer += iprot->readListBegin(_etype1196, _size1193);
- (*(this->success)).resize(_size1193);
- uint32_t _i1197;
- for (_i1197 = 0; _i1197 < _size1193; ++_i1197)
+ uint32_t _size1194;
+ ::apache::thrift::protocol::TType _etype1197;
+ xfer += iprot->readListBegin(_etype1197, _size1194);
+ (*(this->success)).resize(_size1194);
+ uint32_t _i1198;
+ for (_i1198 = 0; _i1198 < _size1194; ++_i1198)
{
- xfer += iprot->readString((*(this->success))[_i1197]);
+ xfer += iprot->readString((*(this->success))[_i1198]);
}
xfer += iprot->readListEnd();
}
@@ -7490,14 +7490,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_types.clear();
- uint32_t _size1198;
- ::apache::thrift::protocol::TType _etype1201;
- xfer += iprot->readListBegin(_etype1201, _size1198);
- this->tbl_types.resize(_size1198);
- uint32_t _i1202;
- for (_i1202 = 0; _i1202 < _size1198; ++_i1202)
+ uint32_t _size1199;
+ ::apache::thrift::protocol::TType _etype1202;
+ xfer += iprot->readListBegin(_etype1202, _size1199);
+ this->tbl_types.resize(_size1199);
+ uint32_t _i1203;
+ for (_i1203 = 0; _i1203 < _size1199; ++_i1203)
{
- xfer += iprot->readString(this->tbl_types[_i1202]);
+ xfer += iprot->readString(this->tbl_types[_i1203]);
}
xfer += iprot->readListEnd();
}
@@ -7534,10 +7534,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
- std::vector<std::string> ::const_iterator _iter1203;
- for (_iter1203 = this->tbl_types.begin(); _iter1203 != this->tbl_types.end(); ++_iter1203)
+ std::vector<std::string> ::const_iterator _iter1204;
+ for (_iter1204 = this->tbl_types.begin(); _iter1204 != this->tbl_types.end(); ++_iter1204)
{
- xfer += oprot->writeString((*_iter1203));
+ xfer += oprot->writeString((*_iter1204));
}
xfer += oprot->writeListEnd();
}
@@ -7569,10 +7569,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
- std::vector<std::string> ::const_iterator _iter1204;
- for (_iter1204 = (*(this->tbl_types)).begin(); _iter1204 != (*(this->tbl_types)).end(); ++_iter1204)
+ std::vector<std::string> ::const_iterator _iter1205;
+ for (_iter1205 = (*(this->tbl_types)).begin(); _iter1205 != (*(this->tbl_types)).end(); ++_iter1205)
{
- xfer += oprot->writeString((*_iter1204));
+ xfer += oprot->writeString((*_iter1205));
}
xfer += oprot->writeListEnd();
}
@@ -7613,14 +7613,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1205;
- ::apache::thrift::protocol::TType _etype1208;
- xfer += iprot->readListBegin(_etype1208, _size1205);
- this->success.resize(_size1205);
- uint32_t _i1209;
- for (_i1209 = 0; _i1209 < _size1205; ++_i1209)
+ uint32_t _size1206;
+ ::apache::thrift::protocol::TType _etype1209;
+ xfer += iprot->readListBegin(_etype1209, _size1206);
+ this->success.resize(_size1206);
+ uint32_t _i1210;
+ for (_i1210 = 0; _i1210 < _size1206; ++_i1210)
{
- xfer += this->success[_i1209].read(iprot);
+ xfer += this->success[_i1210].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -7659,10 +7659,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<TableMeta> ::const_iterator _iter1210;
- for (_iter1210 = this->success.begin(); _iter1210 != this->success.end(); ++_iter1210)
+ std::vector<TableMeta> ::const_iterator _iter1211;
+ for (_iter1211 = this->success.begin(); _iter1211 != this->success.end(); ++_iter1211)
{
- xfer += (*_iter1210).write(oprot);
+ xfer += (*_iter1211).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -7707,14 +7707,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1211;
- ::apache::thrift::protocol::TType _etype1214;
- xfer += iprot->readListBegin(_etype1214, _size1211);
- (*(this->success)).resize(_size1211);
- uint32_t _i1215;
- for (_i1215 = 0; _i1215 < _size1211; ++_i1215)
+ uint32_t _size1212;
+ ::apache::thrift::protocol::TType _etype1215;
+ xfer += iprot->readListBegin(_etype1215, _size1212);
+ (*(this->success)).resize(_size1212);
+ uint32_t _i1216;
+ for (_i1216 = 0; _i1216 < _size1212; ++_i1216)
{
- xfer += (*(this->success))[_i1215].read(iprot);
+ xfer += (*(this->success))[_i1216].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -7852,14 +7852,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1216;
- ::apache::thrift::protocol::TType _etype1219;
- xfer += iprot->readListBegin(_etype1219, _size1216);
- this->success.resize(_size1216);
- uint32_t _i1220;
- for (_i1220 = 0; _i1220 < _size1216; ++_i1220)
+ uint32_t _size1217;
+ ::apache::thrift::protocol::TType _etype1220;
+ xfer += iprot->readListBegin(_etype1220, _size1217);
+ this->success.resize(_size1217);
+ uint32_t _i1221;
+ for (_i1221 = 0; _i1221 < _size1217; ++_i1221)
{
- xfer += iprot->readString(this->success[_i1220]);
+ xfer += iprot->readString(this->success[_i1221]);
}
xfer += iprot->readListEnd();
}
@@ -7898,10 +7898,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1221;
- for (_iter1221 = this->success.begin(); _iter1221 != this->success.end(); ++_iter1221)
+ std::vector<std::string> ::const_iterator _iter1222;
+ for (_iter1222 = this->success.begin(); _iter1222 != this->success.end(); ++_iter1222)
{
- xfer += oprot->writeString((*_iter1221));
+ xfer += oprot->writeString((*_iter1222));
}
xfer += oprot->writeListEnd();
}
@@ -7946,14 +7946,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1222;
- ::apache::thrift::protocol::TType _etype1225;
- xfer += iprot->readListBegin(_etype1225, _size1222);
- (*(this->success)).resize(_size1222);
- uint32_t _i1226;
- for (_i1226 = 0; _i1226 < _size1222; ++_i1226)
+ uint32_t _size1223;
+ ::apache::thrift::protocol::TType _etype1226;
+ xfer += iprot->readListBegin(_etype1226, _size1223);
+ (*(this->success)).resize(_size1223);
+ uint32_t _i1227;
+ for (_i1227 = 0; _i1227 < _size1223; ++_i1227)
{
- xfer += iprot->readString((*(this->success))[_i1226]);
+ xfer += iprot->readString((*(this->success))[_i1227]);
}
xfer += iprot->readListEnd();
}
@@ -8263,14 +8263,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_names.clear();
- uint32_t _size1227;
- ::apache::thrift::protocol::TType _etype1230;
- xfer += iprot->readListBegin(_etype1230, _size1227);
- this->tbl_names.resize(_size1227);
- uint32_t _i1231;
- for (_i1231 = 0; _i1231 < _size1227; ++_i1231)
+ uint32_t _size1228;
+ ::apache::thrift::protocol::TType _etype1231;
+ xfer += iprot->readListBegin(_etype1231, _size1228);
+ this->tbl_names.resize(_size1228);
+ uint32_t _i1232;
+ for (_i1232 = 0; _i1232 < _size1228; ++_i1232)
{
- xfer += iprot->readString(this->tbl_names[_i1231]);
+ xfer += iprot->readString(this->tbl_names[_i1232]);
}
xfer += iprot->readListEnd();
}
@@ -8303,10 +8303,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
- std::vector<std::string> ::const_iterator _iter1232;
- for (_iter1232 = this->tbl_names.begin(); _iter1232 != this->tbl_names.end(); ++_iter1232)
+ std::vector<std::string> ::const_iterator _iter1233;
+ for (_iter1233 = this->tbl_names.begin(); _iter1233 != this->tbl_names.end(); ++_iter1233)
{
- xfer += oprot->writeString((*_iter1232));
+ xfer += oprot->writeString((*_iter1233));
}
xfer += oprot->writeListEnd();
}
@@ -8334,10 +8334,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
- std::vector<std::string> ::const_iterator _iter1233;
- for (_iter1233 = (*(this->tbl_names)).begin(); _iter1233 != (*(this->tbl_names)).end(); ++_iter1233)
+ std::vector<std::string> ::const_iterator _iter1234;
+ for (_iter1234 = (*(this->tbl_names)).begin(); _iter1234 != (*(this->tbl_names)).end(); ++_iter1234)
{
- xfer += oprot->writeString((*_iter1233));
+ xfer += oprot->writeString((*_iter1234));
}
xfer += oprot->writeListEnd();
}
@@ -8378,14 +8378,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1234;
- ::apache::thrift::protocol::TType _etype1237;
- xfer += iprot->readListBegin(_etype1237, _size1234);
- this->success.resize(_size1234);
- uint32_t _i1238;
- for (_i1238 = 0; _i1238 < _size1234; ++_i1238)
+ uint32_t _size1235;
+ ::apache::thrift::protocol::TType _etype1238;
+ xfer += iprot->readListBegin(_etype1238, _size1235);
+ this->success.resize(_size1235);
+ uint32_t _i1239;
+ for (_i1239 = 0; _i1239 < _size1235; ++_i1239)
{
- xfer += this->success[_i1238].read(iprot);
+ xfer += this->success[_i1239].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -8416,10 +8416,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<Table> ::const_iterator _iter1239;
- for (_iter1239 = this->success.begin(); _iter1239 != this->success.end(); ++_iter1239)
+ std::vector<Table> ::const_iterator _iter1240;
+ for (_iter1240 = this->success.begin(); _iter1240 != this->success.end(); ++_iter1240)
{
- xfer += (*_iter1239).write(oprot);
+ xfer += (*_iter1240).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8460,14 +8460,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1240;
- ::apache::thrift::protocol::TType _etype1243;
- xfer += iprot->readListBegin(_etype1243, _size1240);
- (*(this->success)).resize(_size1240);
- uint32_t _i1244;
- for (_i1244 = 0; _i1244 < _size1240; ++_i1244)
+ uint32_t _size1241;
+ ::apache::thrift::protocol::TType _etype1244;
+ xfer += iprot->readListBegin(_etype1244, _size1241);
+ (*(this->success)).resize(_size1241);
+ uint32_t _i1245;
+ for (_i1245 = 0; _i1245 < _size1241; ++_i1245)
{
- xfer += (*(this->success))[_i1244].read(iprot);
+ xfer += (*(this->success))[_i1245].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9000,14 +9000,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_names.clear();
- uint32_t _size1245;
- ::apache::thrift::protocol::TType _etype1248;
- xfer += iprot->readListBegin(_etype1248, _size1245);
- this->tbl_names.resize(_size1245);
- uint32_t _i1249;
- for (_i1249 = 0; _i1249 < _size1245; ++_i1249)
+ uint32_t _size1246;
+ ::apache::thrift::protocol::TType _etype1249;
+ xfer += iprot->readListBegin(_etype1249, _size1246);
+ this->tbl_names.resize(_size1246);
+ uint32_t _i1250;
+ for (_i1250 = 0; _i1250 < _size1246; ++_i1250)
{
- xfer += iprot->readString(this->tbl_names[_i1249]);
+ xfer += iprot->readString(this->tbl_names[_i1250]);
}
xfer += iprot->readListEnd();
}
@@ -9040,10 +9040,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(:
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
- std::vector<std::string> ::const_iterator _iter1250;
- for (_iter1250 = this->tbl_names.begin(); _iter1250 != this->tbl_names.end(); ++_iter1250)
+ std::vector<std::string> ::const_iterator _iter1251;
+ for (_iter1251 = this->tbl_names.begin(); _iter1251 != this->tbl_names.end(); ++_iter1251)
{
- xfer += oprot->writeString((*_iter1250));
+ xfer += oprot->writeString((*_iter1251));
}
xfer += oprot->writeListEnd();
}
@@ -9071,10 +9071,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write(
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
- std::vector<std::string> ::const_iterator _iter1251;
- for (_iter1251 = (*(this->tbl_names)).begin(); _iter1251 != (*(this->tbl_names)).end(); ++_iter1251)
+ std::vector<std::string> ::const_iterator _iter1252;
+ for (_iter1252 = (*(this->tbl_names)).begin(); _iter1252 != (*(this->tbl_names)).end(); ++_iter1252)
{
- xfer += oprot->writeString((*_iter1251));
+ xfer += oprot->writeString((*_iter1252));
}
xfer += oprot->writeListEnd();
}
@@ -9115,17 +9115,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read(
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size1252;
- ::apache::thrift::protocol::TType _ktype1253;
- ::apache::thrift::protocol::TType _vtype1254;
- xfer += iprot->readMapBegin(_ktype1253, _vtype1254, _size1252);
- uint32_t _i1256;
- for (_i1256 = 0; _i1256 < _size1252; ++_i1256)
+ uint32_t _size1253;
+ ::apache::thrift::protocol::TType _ktype1254;
+ ::apache::thrift::protocol::TType _vtype1255;
+ xfer += iprot->readMapBegin(_ktype1254, _vtype1255, _size1253);
+ uint32_t _i1257;
+ for (_i1257 = 0; _i1257 < _size1253; ++_i1257)
{
- std::string _key1257;
- xfer += iprot->readString(_key1257);
- Materialization& _val1258 = this->success[_key1257];
- xfer += _val1258.read(iprot);
+ std::string _key1258;
+ xfer += iprot->readString(_key1258);
+ Materialization& _val1259 = this->success[_key1258];
+ xfer += _val1259.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -9180,11 +9180,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::map<std::string, Materialization> ::const_iterator _iter1259;
- for (_iter1259 = this->success.begin(); _iter1259 != this->success.end(); ++_iter1259)
+ std::map<std::string, Materialization> ::const_iterator _iter1260;
+ for (_iter1260 = this->success.begin(); _iter1260 != this->success.end(); ++_iter1260)
{
- xfer += oprot->writeString(_iter1259->first);
- xfer += _iter1259->second.write(oprot);
+ xfer += oprot->writeString(_iter1260->first);
+ xfer += _iter1260->second.write(oprot);
}
xfer += oprot->writeMapEnd();
}
@@ -9237,17 +9237,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size1260;
- ::apache::thrift::protocol::TType _ktype1261;
- ::apache::thrift::protocol::TType _vtype1262;
- xfer += iprot->readMapBegin(_ktype1261, _vtype1262, _size1260);
- uint32_t _i1264;
- for (_i1264 = 0; _i1264 < _size1260; ++_i1264)
+ uint32_t _size1261;
+ ::apache::thrift::protocol::TType _ktype1262;
+ ::apache::thrift::protocol::TType _vtype1263;
+ xfer += iprot->readMapBegin(_ktype1262, _vtype1263, _size1261);
+ uint32_t _i1265;
+ for (_i1265 = 0; _i1265 < _size1261; ++_i1265)
{
- std::string _key1265;
- xfer += iprot->readString(_key1265);
- Materialization& _val1266 = (*(this->success))[_key1265];
- xfer += _val1266.read(iprot);
+ std::string _key1266;
+ xfer += iprot->readString(_key1266);
+ Materialization& _val1267 = (*(this->success))[_key1266];
+ xfer += _val1267.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -9433,14 +9433,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1267;
- ::apache::thrift::protocol::TType _etype1270;
- xfer += iprot->readListBegin(_etype1270, _size1267);
- this->success.resize(_size1267);
- uint32_t _i1271;
- for (_i1271 = 0; _i1271 < _size1267; ++_i1271)
+ uint32_t _size1268;
+ ::apache::thrift::protocol::TType _etype1271;
+ xfer += iprot->readListBegin(_etype1271, _size1268);
+ this->success.resize(_size1268);
+ uint32_t _i1272;
+ for (_i1272 = 0; _i1272 < _size1268; ++_i1272)
{
- xfer += iprot->readString(this->success[_i1271]);
+ xfer += iprot->readString(this->success[_i1272]);
}
xfer += iprot->readListEnd();
}
@@ -9495,10 +9495,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1272;
- for (_iter1272 = this->success.begin(); _iter1272 != this->success.end(); ++_iter1272)
+ std::vector<std::string> ::const_iterator _iter1273;
+ for (_iter1273 = this->success.begin(); _iter1273 != this->success.end(); ++_iter1273)
{
- xfer += oprot->writeString((*_iter1272));
+ xfer += oprot->writeString((*_iter1273));
}
xfer += oprot->writeListEnd();
}
@@ -9551,14 +9551,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1273;
- ::apache::thrift::protocol::TType _etype1276;
- xfer += iprot->readListBegin(_etype1276, _size1273);
- (*(this->success)).resize(_size1273);
- uint32_t _i1277;
- for (_i1277 = 0; _i1277 < _size1273; ++_i1277)
+ uint32_t _size1274;
+ ::apache::thrift::protocol::TType _etype1277;
+ xfer += iprot->readListBegin(_etype1277, _size1274);
+ (*(this->success)).resize(_size1274);
+ uint32_t _i1278;
+ for (_i1278 = 0; _i1278 < _size1274; ++_i1278)
{
- xfer += iprot->readString((*(this->success))[_i1277]);
+ xfer += iprot->readString((*(this->success))[_i1278]);
}
xfer += iprot->readListEnd();
}
@@ -10892,14 +10892,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size1278;
- ::apache::thrift::protocol::TType _etype1281;
- xfer += iprot->readListBegin(_etype1281, _size1278);
- this->new_parts.resize(_size1278);
- uint32_t _i1282;
- for (_i1282 = 0; _i1282 < _size1278; ++_i1282)
+ uint32_t _size1279;
+ ::apache::thrift::protocol::TType _etype1282;
+ xfer += iprot->readListBegin(_etype1282, _size1279);
+ this->new_parts.resize(_size1279);
+ uint32_t _i1283;
+ for (_i1283 = 0; _i1283 < _size1279; ++_i1283)
{
- xfer += this->new_parts[_i1282].read(iprot);
+ xfer += this->new_parts[_i1283].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10928,10 +10928,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<Partition> ::const_iterator _iter1283;
- for (_iter1283 = this->new_parts.begin(); _iter1283 != this->new_parts.end(); ++_iter1283)
+ std::vector<Partition> ::const_iterator _iter1284;
+ for (_iter1284 = this->new_parts.begin(); _iter1284 != this->new_parts.end(); ++_iter1284)
{
- xfer += (*_iter1283).write(oprot);
+ xfer += (*_iter1284).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10955,10 +10955,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<Partition> ::const_iterator _iter1284;
- for (_iter1284 = (*(this->new_parts)).begin(); _iter1284 != (*(this->new_parts)).end(); ++_iter1284)
+ std::vector<Partition> ::const_iterator _iter1285;
+ for (_iter1285 = (*(this->new_parts)).begin(); _iter1285 != (*(this->new_parts)).end(); ++_iter1285)
{
- xfer += (*_iter1284).write(oprot);
+ xfer += (*_iter1285).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11167,14 +11167,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size1285;
- ::apache::thrift::protocol::TType _etype1288;
- xfer += iprot->readListBegin(_etype1288, _size1285);
- this->new_parts.resize(_size1285);
- uint32_t _i1289;
- for (_i1289 = 0; _i1289 < _size1285; ++_i1289)
+ uint32_t _size1286;
+ ::apache::thrift::protocol::TType _etype1289;
+ xfer += iprot->readListBegin(_etype1289, _size1286);
+ this->new_parts.resize(_size1286);
+ uint32_t _i1290;
+ for (_i1290 = 0; _i1290 < _size1286; ++_i1290)
{
- xfer += this->new_parts[_i1289].read(iprot);
+ xfer += this->new_parts[_i1290].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -11203,10 +11203,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<PartitionSpec> ::const_iterator _iter1290;
- for (_iter1290 = this->new_parts.begin(); _iter1290 != this->new_parts.end(); ++_iter1290)
+ std::vector<PartitionSpec> ::const_iterator _iter1291;
+ for (_iter1291 = this->new_parts.begin(); _iter1291 != this->new_parts.end(); ++_iter1291)
{
- xfer += (*_iter1290).write(oprot);
+ xfer += (*_iter1291).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11230,10 +11230,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<PartitionSpec> ::const_iterator _iter1291;
- for (_iter1291 = (*(this->new_parts)).begin(); _iter1291 != (*(this->new_parts)).end(); ++_iter1291)
+ std::vector<PartitionSpec> ::const_iterator _iter1292;
+ for (_iter1292 = (*(this->new_parts)).begin(); _iter1292 != (*(this->new_parts)).end(); ++_iter1292)
{
- xfer += (*_iter1291).write(oprot);
+ xfer += (*_iter1292).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11458,14 +11458,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1292;
- ::apache::thrift::protocol::TType _etype1295;
- xfer += iprot->readListBegin(_etype1295, _size1292);
- this->part_vals.resize(_size1292);
- uint32_t _i1296;
- for (_i1296 = 0; _i1296 < _size1292; ++_i1296)
+ uint32_t _size1293;
+ ::apache::thrift::protocol::TType _etype1296;
+ xfer += iprot->readListBegin(_etype1296, _size1293);
+ this->part_vals.resize(_size1293);
+ uint32_t _i1297;
+ for (_i1297 = 0; _i1297 < _size1293; ++_i1297)
{
- xfer += iprot->readString(this->part_vals[_i1296]);
+ xfer += iprot->readString(this->part_vals[_i1297]);
}
xfer += iprot->readListEnd();
}
@@ -11502,10 +11502,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1297;
- for (_iter1297 = this->part_vals.begin(); _iter1297 != this->part_vals.end(); ++_iter1297)
+ std::vector<std::string> ::const_iterator _iter1298;
+ for (_iter1298 = this->part_vals.begin(); _iter1298 != this->part_vals.end(); ++_iter1298)
{
- xfer += oprot->writeString((*_iter1297));
+ xfer += oprot->writeString((*_iter1298));
}
xfer += oprot->writeListEnd();
}
@@ -11537,10 +11537,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1298;
- for (_iter1298 = (*(this->part_vals)).begin(); _iter1298 != (*(this->part_vals)).end(); ++_iter1298)
+ std::vector<std::string> ::const_iterator _iter1299;
+ for (_iter1299 = (*(this->part_vals)).begin(); _iter1299 != (*(this->part_vals)).end(); ++_iter1299)
{
- xfer += oprot->writeString((*_iter1298));
+ xfer += oprot->writeString((*_iter1299));
}
xfer += oprot->writeListEnd();
}
@@ -12012,14 +12012,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1299;
- ::apache::thrift::protocol::TType _etype1302;
- xfer += iprot->readListBegin(_etype1302, _size1299);
- this->part_vals.resize(_size1299);
- uint32_t _i1303;
- for (_i1303 = 0; _i1303 < _size1299; ++_i1303)
+ uint32_t _size1300;
+ ::apache::thrift::protocol::TType _etype1303;
+ xfer += iprot->readListBegin(_etype1303, _size1300);
+ this->part_vals.resize(_size1300);
+ uint32_t _i1304;
+ for (_i1304 = 0; _i1304 < _size1300; ++_i1304)
{
- xfer += iprot->readString(this->part_vals[_i1303]);
+ xfer += iprot->readString(this->part_vals[_i1304]);
}
xfer += iprot->readListEnd();
}
@@ -12064,10 +12064,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1304;
- for (_iter1304 = this->part_vals.begin(); _iter1304 != this->part_vals.end(); ++_iter1304)
+ std::vector<std::string> ::const_iterator _iter1305;
+ for (_iter1305 = this->part_vals.begin(); _iter1305 != this->part_vals.end(); ++_iter1305)
{
- xfer += oprot->writeString((*_iter1304));
+ xfer += oprot->writeString((*_iter1305));
}
xfer += oprot->writeListEnd();
}
@@ -12103,10 +12103,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1305;
- for (_iter1305 = (*(this->part_vals)).begin(); _iter1305 != (*(this->part_vals)).end(); ++_iter1305)
+ std::vector<std::string> ::const_iterator _iter1306;
+ for (_iter1306 = (*(this->part_vals)).begin(); _iter1306 != (*(this->part_vals)).end(); ++_iter1306)
{
- xfer += oprot->writeString((*_iter1305));
+ xfer += oprot->writeString((*_iter1306));
}
xfer += oprot->writeListEnd();
}
@@ -12909,14 +12909,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1306;
- ::apache::thrift::protocol::TType _etype1309;
- xfer += iprot->readListBegin(_etype1309, _size1306);
- this->part_vals.resize(_size1306);
- uint32_t _i1310;
- for (_i1310 = 0; _i1310 < _size1306; ++_i1310)
+ uint32_t _size1307;
+ ::apache::thrift::protocol::TType _etype1310;
+ xfer += iprot->readListBegin(_etype1310, _size1307);
+ this->part_vals.resize(_size1307);
+ uint32_t _i1311;
+ for (_i1311 = 0; _i1311 < _size1307; ++_i1311)
{
- xfer += iprot->readString(this->part_vals[_i1310]);
+ xfer += iprot->readString(this->part_vals[_i1311]);
}
xfer += iprot->readListEnd();
}
@@ -12961,10 +12961,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1311;
- for (_iter1311 = this->part_vals.begin(); _iter1311 != this->part_vals.end(); ++_iter1311)
+ std::vector<std::string> ::const_iterator _iter1312;
+ for (_iter1312 = this->part_vals.begin(); _iter1312 != this->part_vals.end(); ++_iter1312)
{
- xfer += oprot->writeString((*_iter1311));
+ xfer += oprot->writeString((*_iter1312));
}
xfer += oprot->writeListEnd();
}
@@ -13000,10 +13000,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1312;
- for (_iter1312 = (*(this->part_vals)).begin(); _iter1312 != (*(this->part_vals)).end(); ++_iter1312)
+ std::vector<std::string> ::const_iterator _iter1313;
+ for (_iter1313 = (*(this->part_vals)).begin(); _iter1313 != (*(this->part_vals)).end(); ++_iter1313)
{
- xfer += oprot->writeString((*_iter1312));
+ xfer += oprot->writeString((*_iter1313));
}
xfer += oprot->writeListEnd();
}
@@ -13212,14 +13212,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1313;
- ::apache::thrift::protocol::TType _etype1316;
- xfer += iprot->readListBegin(_etype1316, _size1313);
- this->part_vals.resize(_size1313);
- uint32_t _i1317;
- for (_i1317 = 0; _i1317 < _size1313; ++_i1317)
+ uint32_t _size1314;
+ ::apache::thrift::protocol::TType _etype1317;
+ xfer += iprot->readListBegin(_etype1317, _size1314);
+ this->part_vals.resize(_size1314);
+ uint32_t _i1318;
+ for (_i1318 = 0; _i1318 < _size1314; ++_i1318)
{
- xfer += iprot->readString(this->part_vals[_i1317]);
+ xfer += iprot->readString(this->part_vals[_i1318]);
}
xfer += iprot->readListEnd();
}
@@ -13272,10 +13272,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1318;
- for (_iter1318 = this->part_vals.begin(); _iter1318 != this->part_vals.end(); ++_iter1318)
+ std::vector<std::string> ::const_iterator _iter1319;
+ for (_iter1319 = this->part_vals.begin(); _iter1319 != this->part_vals.end(); ++_iter1319)
{
- xfer += oprot->writeString((*_iter1318));
+ xfer += oprot->writeString((*_iter1319));
}
xfer += oprot->writeListEnd();
}
@@ -13315,10 +13315,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1319;
- for (_iter1319 = (*(this->part_vals)).begin(); _iter1319 != (*(this->part_vals)).end(); ++_iter1319)
+ std::vector<std::string> ::const_iterator _iter1320;
+ for (_iter1320 = (*(this->part_vals)).begin(); _iter1320 != (*(this->part_vals)).end(); ++_iter1320)
{
- xfer += oprot->writeString((*_iter1319));
+ xfer += oprot->writeString((*_iter1320));
}
xfer += oprot->writeListEnd();
}
@@ -14324,14 +14324,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1320;
- ::apache::thrift::protocol::TType _etype1323;
- xfer += iprot->readListBegin(_etype1323, _size1320);
- this->part_vals.resize(_size1320);
- uint32_t _i1324;
- for (_i1324 = 0; _i1324 < _size1320; ++_i1324)
+ uint32_t _size1321;
+ ::apache::thrift::protocol::TType _etype1324;
+ xfer += iprot->readListBegin(_etype1324, _size1321);
+ this->part_vals.resize(_size1321);
+ uint32_t _i1325;
+ for (_i1325 = 0; _i1325 < _size1321; ++_i1325)
{
- xfer += iprot->readString(this->part_vals[_i1324]);
+ xfer += iprot->readString(this->part_vals[_i1325]);
}
xfer += iprot->readListEnd();
}
@@ -14368,10 +14368,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1325;
- for (_iter1325 = this->part_vals.begin(); _iter1325 != this->part_vals.end(); ++_iter1325)
+ std::vector<std::string> ::const_iterator _iter1326;
+ for (_iter1326 = this->part_vals.begin(); _iter1326 != this->part_vals.end(); ++_iter1326)
{
- xfer += oprot->writeString((*_iter1325));
+ xfer += oprot->writeString((*_iter1326));
}
xfer += oprot->writeListEnd();
}
@@ -14403,10 +14403,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1326;
- for (_iter1326 = (*(this->part_vals)).begin(); _iter1326 != (*(this->part_vals)).end(); ++_iter1326)
+ std::vector<std::string> ::const_iterator _iter1327;
+ for (_iter1327 = (*(this->part_vals)).begin(); _iter1327 != (*(this->part_vals)).end(); ++_iter1327)
{
- xfer += oprot->writeString((*_iter1326));
+ xfer += oprot->writeString((*_iter1327));
}
xfer += oprot->writeListEnd();
}
@@ -14595,17 +14595,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->partitionSpecs.clear();
- uint32_t _size1327;
- ::apache::thrift::protocol::TType _ktype1328;
- ::apache::thrift::protocol::TType _vtype1329;
- xfer += iprot->readMapBegin(_ktype1328, _vtype1329, _size1327);
- uint32_t _i1331;
- for (_i1331 = 0; _i1331 < _size1327; ++_i1331)
+ uint32_t _size1328;
+ ::apache::thrift::protocol::TType _ktype1329;
+ ::apache::thrift::protocol::TType _vtype1330;
+ xfer += iprot->readMapBegin(_ktype1329, _vtype1330, _size1328);
+ uint32_t _i1332;
+ for (_i1332 = 0; _i1332 < _size1328; ++_i1332)
{
- std::string _key1332;
- xfer += iprot->readString(_key1332);
- std::string& _val1333 = this->partitionSpecs[_key1332];
- xfer += iprot->readString(_val1333);
+ std::string _key1333;
+ xfer += iprot->readString(_key1333);
+ std::string& _val1334 = this->partitionSpecs[_key1333];
+ xfer += iprot->readString(_val1334);
}
xfer += iprot->readMapEnd();
}
@@ -14666,11 +14666,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr
xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
- std::map<std::string, std::string> ::const_iterator _iter1334;
- for (_iter1334 = this->partitionSpecs.begin(); _iter1334 != this->partitionSpecs.end(); ++_iter1334)
+ std::map<std::string, std::string> ::const_iterator _iter1335;
+ for (_iter1335 = this->partitionSpecs.begin(); _iter1335 != this->partitionSpecs.end(); ++_iter1335)
{
- xfer += oprot->writeString(_iter1334->first);
- xfer += oprot->writeString(_iter1334->second);
+ xfer += oprot->writeString(_iter1335->first);
+ xfer += oprot->writeString(_iter1335->second);
}
xfer += oprot->writeMapEnd();
}
@@ -14710,11 +14710,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p
xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
- std::map<std::string, std::string> ::const_iterator _iter1335;
- for (_iter1335 = (*(this->partitionSpecs)).begin(); _iter1335 != (*(this->partitionSpecs)).end(); ++_iter1335)
+ std::map<std::string, std::string> ::const_iterator _iter1336;
+ for (_iter1336 = (*(this->partitionSpecs)).begin(); _iter1336 != (*(this->partitionSpecs)).end(); ++_iter1336)
{
- xfer += oprot->writeString(_iter1335->first);
- xfer += oprot->writeString(_iter1335->second);
+ xfer += oprot->writeString(_iter1336->first);
+ xfer += oprot->writeString(_iter1336->second);
}
xfer += oprot->writeMapEnd();
}
@@ -14959,17 +14959,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->partitionSpecs.clear();
- uint32_t _size1336;
- ::apache::thrift::protocol::TType _ktype1337;
- ::apache::thrift::protocol::TType _vtype1338;
- xfer += iprot->readMapBegin(_ktype1337, _vtype1338, _size1336);
- uint32_t _i1340;
- for (_i1340 = 0; _i1340 < _size1336; ++_i1340)
+ uint32_t _size1337;
+ ::apache::thrift::protocol::TType _ktype1338;
+ ::apache::thrift::protocol::TType _vtype1339;
+ xfer += iprot->readMapBegin(_ktype1338, _vtype1339, _size1337);
+ uint32_t _i1341;
+ for (_i1341 = 0; _i1341 < _size1337; ++_i1341)
{
- std::string _key1341;
- xfer += iprot->readString(_key1341);
- std::string& _val1342 = this->partitionSpecs[_key1341];
- xfer += iprot->readString(_val1342);
+ std::string _key1342;
+ xfer += iprot->readString(_key1342);
+ std::string& _val1343 = this->partitionSpecs[_key1342];
+ xfer += iprot->readString(_val1343);
}
xfer += iprot->readMapEnd();
}
@@ -15030,11 +15030,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p
xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
- std::map<std::string, std::string> ::const_iterator _iter1343;
- for (_iter1343 = this->partitionSpecs.begin(); _iter1343 != this->partitionSpecs.end(); ++_iter1343)
+ std::map<std::string, std::string> ::const_iterator _iter1344;
+ for (_iter1344 = this->partitionSpecs.begin(); _iter1344 != this->partitionSpecs.end(); ++_iter1344)
{
- xfer += oprot->writeString(_iter1343->fir
<TRUNCATED>
[5/7] hive git commit: HIVE-18350 : load data should rename files
consistent with insert statements. (Deepak Jaiswal,
reviewed by Sergey Shelukhin and Ashutosh Chauhan)
Posted by dj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out
index 054b0d0..d4472cf 100644
--- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_small
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_small
@@ -23,27 +23,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small@ds=2008-04-08
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small@ds=2008-04-08
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -64,6 +48,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09')
PREHOOK: type: LOAD
#### A masked pattern was here ####
@@ -81,6 +81,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -101,16 +117,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 4 Data size: 2996 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 4 Data size: 2996 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 4 Data size: 2996 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: NONE
Path -> Alias:
#### A masked pattern was here ####
Path -> Partition:
@@ -121,7 +137,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -129,7 +145,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
partition_columns ds
partition_columns.types string
@@ -137,7 +153,7 @@ STAGE PLANS:
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -145,7 +161,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -167,16 +183,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 74872 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 158376 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Merge Join Operator
condition map:
Inner Join 0 to 1
@@ -184,7 +200,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 117 Data size: 78681 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 250 Data size: 165502 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -208,7 +224,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -216,7 +232,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -224,7 +240,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -232,7 +248,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -256,7 +272,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -264,7 +280,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -272,7 +288,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -280,7 +296,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -375,16 +391,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 4 Data size: 2996 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 4 Data size: 2996 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 4 Data size: 2996 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: NONE
Path -> Alias:
#### A masked pattern was here ####
Path -> Partition:
@@ -395,7 +411,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -403,7 +419,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
partition_columns ds
partition_columns.types string
@@ -411,7 +427,7 @@ STAGE PLANS:
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -419,7 +435,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -441,16 +457,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 74872 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 158376 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Merge Join Operator
condition map:
Inner Join 0 to 1
@@ -458,7 +474,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 117 Data size: 78681 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 250 Data size: 165502 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -482,7 +498,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -490,7 +506,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -498,7 +514,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -506,7 +522,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -530,7 +546,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -538,7 +554,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -546,7 +562,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -554,7 +570,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out
index 95d3298..5cd5d79 100644
--- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out
@@ -72,11 +72,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small@ds=2008-04-09
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -97,6 +97,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
@@ -232,16 +248,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 56 Data size: 37620 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 79280 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 54 Data size: 36276 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 114 Data size: 75316 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 54 Data size: 36276 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 114 Data size: 75316 Basic stats: COMPLETE Column stats: NONE
Merge Join Operator
condition map:
Inner Join 0 to 1
@@ -249,7 +265,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 1
- Statistics: Num rows: 59 Data size: 39903 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 125 Data size: 82847 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -273,7 +289,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -281,7 +297,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -289,7 +305,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -297,7 +313,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -370,7 +386,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-38
+78
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -506,16 +522,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 56 Data size: 37620 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 79280 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 54 Data size: 36276 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 114 Data size: 75316 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 54 Data size: 36276 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 114 Data size: 75316 Basic stats: COMPLETE Column stats: NONE
Merge Join Operator
condition map:
Inner Join 0 to 1
@@ -523,7 +539,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 59 Data size: 39903 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 125 Data size: 82847 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -547,7 +563,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -555,7 +571,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -563,7 +579,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -571,7 +587,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -644,7 +660,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-38
+78
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -780,16 +796,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 56 Data size: 37620 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 79280 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 54 Data size: 36276 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 114 Data size: 75316 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 54 Data size: 36276 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 114 Data size: 75316 Basic stats: COMPLETE Column stats: NONE
Merge Join Operator
condition map:
Inner Join 0 to 1
@@ -797,7 +813,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 59 Data size: 39903 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 125 Data size: 82847 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -821,7 +837,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -829,7 +845,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -837,7 +853,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -845,7 +861,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -918,4 +934,4 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-38
+78
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out
index e711715..a18f4b2 100644
--- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_small
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_small
@@ -22,27 +22,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -62,6 +46,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
@@ -101,7 +101,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -109,13 +109,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -123,7 +123,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -131,13 +131,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_small
@@ -187,7 +187,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -195,13 +195,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -209,7 +209,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -217,13 +217,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
@@ -318,7 +318,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -326,13 +326,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -340,7 +340,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -348,13 +348,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_small
@@ -404,7 +404,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -412,13 +412,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -426,7 +426,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -434,13 +434,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
@@ -562,7 +562,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -570,13 +570,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -584,7 +584,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -592,13 +592,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
@@ -639,7 +639,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -647,13 +647,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -661,7 +661,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -669,13 +669,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_small
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out
index 53c685c..fdea211 100644
--- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out
@@ -72,11 +72,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small@ds=2008-04-09
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -97,6 +97,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09')
PREHOOK: type: LOAD
#### A masked pattern was here ####
@@ -114,6 +130,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
@@ -249,16 +281,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 112 Data size: 74872 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 158376 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Merge Join Operator
condition map:
Inner Join 0 to 1
@@ -266,7 +298,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 1
- Statistics: Num rows: 117 Data size: 78681 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 250 Data size: 165502 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -290,7 +322,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -298,7 +330,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -306,7 +338,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -314,7 +346,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -338,7 +370,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -346,7 +378,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -354,7 +386,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -362,7 +394,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -438,7 +470,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-76
+156
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -574,16 +606,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 74872 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 158376 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Merge Join Operator
condition map:
Inner Join 0 to 1
@@ -591,7 +623,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 117 Data size: 78681 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 250 Data size: 165502 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -615,7 +647,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -623,7 +655,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -631,7 +663,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -639,7 +671,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -663,7 +695,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -671,7 +703,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -679,7 +711,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -687,7 +719,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -763,7 +795,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-76
+156
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -899,16 +931,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 74872 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 158376 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 107 Data size: 71529 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 228 Data size: 150457 Basic stats: COMPLETE Column stats: NONE
Merge Join Operator
condition map:
Inner Join 0 to 1
@@ -916,7 +948,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 117 Data size: 78681 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 250 Data size: 165502 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -940,7 +972,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -948,7 +980,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -956,7 +988,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -964,7 +996,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -988,7 +1020,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -996,7 +1028,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -1004,7 +1036,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1012,7 +1044,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1088,4 +1120,4 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-76
+156
[7/7] hive git commit: HIVE-18350 : load data should rename files
consistent with insert statements. (Deepak Jaiswal,
reviewed by Sergey Shelukhin and Ashutosh Chauhan)
Posted by dj...@apache.org.
HIVE-18350 : load data should rename files consistent with insert statements. (Deepak Jaiswal, reviewed by Sergey Shelukhin and Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6e9b63e4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6e9b63e4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6e9b63e4
Branch: refs/heads/master
Commit: 6e9b63e48b4f34ba26a6eefb354b0c94ee82256c
Parents: 1faadb0
Author: Deepak Jaiswal <dj...@apache.org>
Authored: Thu Feb 8 00:46:28 2018 -0800
Committer: Deepak Jaiswal <dj...@apache.org>
Committed: Thu Feb 8 00:48:20 2018 -0800
----------------------------------------------------------------------
.../hive/ql/exec/tez/CustomPartitionVertex.java | 71 +-
.../ql/exec/tez/CustomVertexConfiguration.java | 31 +-
.../hadoop/hive/ql/exec/tez/DagUtils.java | 16 +-
.../apache/hadoop/hive/ql/metadata/Table.java | 16 +-
.../hive/ql/optimizer/ConvertJoinMapJoin.java | 54 +-
.../annotation/OpTraitsRulesProcFactory.java | 35 +-
.../optimizer/spark/SparkMapJoinOptimizer.java | 3 +-
.../hive/ql/parse/LoadSemanticAnalyzer.java | 43 +
.../apache/hadoop/hive/ql/plan/OpTraits.java | 22 +-
.../hadoop/hive/ql/metadata/TestHive.java | 9 +-
.../clientpositive/auto_sortmerge_join_2.q | 12 +-
.../clientpositive/auto_sortmerge_join_4.q | 6 +-
.../clientpositive/auto_sortmerge_join_5.q | 10 +-
.../clientpositive/auto_sortmerge_join_7.q | 8 +-
.../bucket_mapjoin_mismatch1.q.out | 170 -
.../clientpositive/auto_sortmerge_join_2.q.out | 196 +-
.../clientpositive/auto_sortmerge_join_4.q.out | 104 +-
.../clientpositive/auto_sortmerge_join_5.q.out | 158 +-
.../clientpositive/auto_sortmerge_join_7.q.out | 168 +-
.../llap/auto_sortmerge_join_2.q.out | 132 +-
.../llap/auto_sortmerge_join_4.q.out | 74 +-
.../llap/auto_sortmerge_join_5.q.out | 112 +-
.../llap/auto_sortmerge_join_7.q.out | 114 +-
.../spark/auto_sortmerge_join_2.q.out | 104 +-
.../spark/auto_sortmerge_join_4.q.out | 74 +-
.../spark/auto_sortmerge_join_5.q.out | 118 +-
.../spark/auto_sortmerge_join_7.q.out | 114 +-
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 2240 +++++-----
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 3888 +++++++++---------
.../gen/thrift/gen-cpp/hive_metastore_types.h | 32 +-
.../hive/metastore/api/BucketingVersion.java | 48 +
.../apache/hadoop/hive/metastore/api/Table.java | 230 +-
.../src/gen/thrift/gen-php/metastore/Types.php | 57 +
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 45 +-
.../gen/thrift/gen-rb/hive_metastore_types.rb | 17 +-
.../hadoop/hive/metastore/ObjectStore.java | 5 +-
.../hadoop/hive/metastore/model/MTable.java | 37 +-
.../src/main/thrift/hive_metastore.thrift | 11 +-
.../hive/metastore/cache/TestCachedStore.java | 18 +-
.../TestTablesCreateDropAlterTruncate.java | 19 +-
40 files changed, 4676 insertions(+), 3945 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java
index 26afe90..ef148d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java
@@ -108,14 +108,15 @@ public class CustomPartitionVertex extends VertexManagerPlugin {
private final Multimap<Integer, Integer> bucketToTaskMap = HashMultimap.<Integer, Integer> create();
private final Map<String, Multimap<Integer, InputSplit>> inputToGroupedSplitMap =
- new HashMap<String, Multimap<Integer, InputSplit>>();
+ new HashMap<>();
private int numInputsAffectingRootInputSpecUpdate = 1;
private int numInputsSeenSoFar = 0;
private final Map<String, EdgeManagerPluginDescriptor> emMap = Maps.newHashMap();
private final List<InputSplit> finalSplits = Lists.newLinkedList();
private final Map<String, InputSpecUpdate> inputNameInputSpecMap =
- new HashMap<String, InputSpecUpdate>();
+ new HashMap<>();
+ private Map<String, Integer> inputToBucketMap;
public CustomPartitionVertex(VertexManagerPluginContext context) {
super(context);
@@ -137,6 +138,7 @@ public class CustomPartitionVertex extends VertexManagerPlugin {
this.mainWorkName = vertexConf.getInputName();
this.vertexType = vertexConf.getVertexType();
this.numInputsAffectingRootInputSpecUpdate = vertexConf.getNumInputs();
+ this.inputToBucketMap = vertexConf.getInputToBucketMap();
}
@Override
@@ -242,7 +244,7 @@ public class CustomPartitionVertex extends VertexManagerPlugin {
}
Multimap<Integer, InputSplit> bucketToInitialSplitMap =
- getBucketSplitMapForPath(pathFileSplitsMap);
+ getBucketSplitMapForPath(inputName, pathFileSplitsMap);
try {
int totalResource = context.getTotalAvailableResource().getMemory();
@@ -532,20 +534,47 @@ public class CustomPartitionVertex extends VertexManagerPlugin {
/*
* This method generates the map of bucket to file splits.
*/
- private Multimap<Integer, InputSplit> getBucketSplitMapForPath(
+ private Multimap<Integer, InputSplit> getBucketSplitMapForPath(String inputName,
Map<String, Set<FileSplit>> pathFileSplitsMap) {
- int bucketNum = 0;
Multimap<Integer, InputSplit> bucketToInitialSplitMap =
- ArrayListMultimap.<Integer, InputSplit> create();
+ ArrayListMultimap.create();
+ boolean fallback = false;
+ List<Integer> bucketIds = new ArrayList<>();
for (Map.Entry<String, Set<FileSplit>> entry : pathFileSplitsMap.entrySet()) {
- int bucketId = bucketNum % numBuckets;
+ // Extract the buckedID from pathFilesMap, this is more accurate method,
+ // however. it may not work in certain cases where buckets are named
+ // after files used while loading data. In such case, fallback to old
+ // potential inaccurate method.
+ // The accepted file names are such as 000000_0, 000001_0_copy_1.
+ String bucketIdStr =
+ Utilities.getBucketFileNameFromPathSubString(entry.getKey());
+ int bucketId = Utilities.getBucketIdFromFile(bucketIdStr);
+ if (bucketId == -1) {
+ fallback = true;
+ LOG.info("Fallback to using older sort based logic to assign " +
+ "buckets to splits.");
+ bucketIds.clear();
+ break;
+ }
+ bucketIds.add(bucketId);
for (FileSplit fsplit : entry.getValue()) {
bucketToInitialSplitMap.put(bucketId, fsplit);
}
- bucketNum++;
+ }
+
+ int bucketNum = 0;
+ if (fallback) {
+ // This is the old logic which assumes that the filenames are sorted in
+ // alphanumeric order and mapped to appropriate bucket number.
+ for (Map.Entry<String, Set<FileSplit>> entry : pathFileSplitsMap.entrySet()) {
+ for (FileSplit fsplit : entry.getValue()) {
+ bucketToInitialSplitMap.put(bucketNum, fsplit);
+ }
+ bucketNum++;
+ }
}
// this is just for SMB join use-case. The numBuckets would be equal to that of the big table
@@ -553,16 +582,28 @@ public class CustomPartitionVertex extends VertexManagerPlugin {
// data from the right buckets to the big table side. For e.g. Big table has 8 buckets and small
// table has 4 buckets, bucket 0 of small table needs to be sent to bucket 4 of the big table as
// well.
- if (bucketNum < numBuckets) {
- int loopedBucketId = 0;
- for (; bucketNum < numBuckets; bucketNum++) {
- for (InputSplit fsplit : bucketToInitialSplitMap.get(loopedBucketId)) {
- bucketToInitialSplitMap.put(bucketNum, fsplit);
+ if (numInputsAffectingRootInputSpecUpdate != 1 &&
+ inputName.compareTo(mainWorkName) != 0) {
+ // small table
+ int inputNumBuckets = inputToBucketMap.get(inputName);
+ if (fallback && bucketNum != inputNumBuckets) {
+ // The fallback mechanism kicked in which only works correctly if
+ // there exists a file for each bucket, else it may result in wrong
+ // result. Throw an error
+
+ }
+ if (inputNumBuckets < numBuckets) {
+ // Need to send the splits to multiple buckets
+ for (int i = 1; i < numBuckets/inputNumBuckets; i++) {
+ int bucketIdBase = i * inputNumBuckets;
+ for (Integer bucketId : bucketIds) {
+ for (InputSplit fsplit : bucketToInitialSplitMap.get(bucketId)) {
+ bucketToInitialSplitMap.put(bucketIdBase + bucketId, fsplit);
+ }
+ }
}
- loopedBucketId++;
}
}
-
return bucketToInitialSplitMap;
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomVertexConfiguration.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomVertexConfiguration.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomVertexConfiguration.java
index ef5e7ed..4301829 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomVertexConfiguration.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomVertexConfiguration.java
@@ -21,7 +21,10 @@ package org.apache.hadoop.hive.ql.exec.tez;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import com.google.common.base.Preconditions;
import org.apache.hadoop.hive.ql.plan.TezWork.VertexType;
import org.apache.hadoop.io.Writable;
@@ -39,22 +42,24 @@ public class CustomVertexConfiguration implements Writable {
private VertexType vertexType = VertexType.AUTO_INITIALIZED_EDGES;
private int numInputs;
private String inputName;
+ private Map<String, Integer> inputToBucketMap;
public CustomVertexConfiguration() {
}
// this is the constructor to use for the Bucket map join case.
public CustomVertexConfiguration(int numBuckets, VertexType vertexType) {
- this(numBuckets, vertexType, "", 1);
+ this(numBuckets, vertexType, "", 1, null);
}
// this is the constructor to use for SMB.
public CustomVertexConfiguration(int numBuckets, VertexType vertexType, String inputName,
- int numInputs) {
+ int numInputs, Map<String, Integer> inputToBucketMap) {
this.numBuckets = numBuckets;
this.vertexType = vertexType;
this.numInputs = numInputs;
this.inputName = inputName;
+ this.inputToBucketMap = inputToBucketMap;
}
@Override
@@ -63,6 +68,14 @@ public class CustomVertexConfiguration implements Writable {
out.writeInt(this.numBuckets);
out.writeInt(numInputs);
out.writeUTF(inputName);
+ int sz = inputToBucketMap != null ? inputToBucketMap.size() : 0;
+ out.writeInt(sz);
+ if (sz > 0) {
+ for (Map.Entry<String, Integer> entry : inputToBucketMap.entrySet()) {
+ out.writeUTF(entry.getKey());
+ out.writeInt(entry.getValue());
+ }
+ }
}
@Override
@@ -71,6 +84,16 @@ public class CustomVertexConfiguration implements Writable {
this.numBuckets = in.readInt();
this.numInputs = in.readInt();
this.inputName = in.readUTF();
+ int sz = in.readInt();
+ Preconditions.checkState(sz >= 0);
+ if (sz == 0) {
+ this.inputToBucketMap = null;
+ } else {
+ this.inputToBucketMap = new HashMap<>();
+ for (int i = 0; i < sz; i++) {
+ this.inputToBucketMap.put(in.readUTF(), in.readInt());
+ }
+ }
}
public int getNumBuckets() {
@@ -88,4 +111,8 @@ public class CustomVertexConfiguration implements Writable {
public int getNumInputs() {
return numInputs;
}
+
+ public Map<String, Integer> getInputToBucketMap() {
+ return inputToBucketMap;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 9885038..0e75f6e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -21,6 +21,7 @@ import java.util.Collection;
import java.util.concurrent.ConcurrentHashMap;
import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import javax.security.auth.login.LoginException;
@@ -568,13 +569,26 @@ public class DagUtils {
MultiMRInput.createConfigBuilder(conf, HiveInputFormat.class).build());
}
+ // To be populated for SMB joins only for all the small tables
+ Map<String, Integer> inputToBucketMap = new HashMap<>();
+ if (mergeJoinWork.getMergeJoinOperator().getParentOperators().size() == 1
+ && mergeJoinWork.getMergeJoinOperator().getOpTraits() != null) {
+ // This is an SMB join.
+ for (BaseWork work : mapWorkList) {
+ MapWork mw = (MapWork) work;
+ Map<String, Operator<?>> aliasToWork = mw.getAliasToWork();
+ Preconditions.checkState(aliasToWork.size() == 1,
+ "More than 1 alias in SMB mapwork");
+ inputToBucketMap.put(mw.getName(), mw.getWorks().get(0).getOpTraits().getNumBuckets());
+ }
+ }
VertexManagerPluginDescriptor desc =
VertexManagerPluginDescriptor.create(CustomPartitionVertex.class.getName());
// the +1 to the size is because of the main work.
CustomVertexConfiguration vertexConf =
new CustomVertexConfiguration(mergeJoinWork.getMergeJoinOperator().getConf()
.getNumBuckets(), vertexType, mergeJoinWork.getBigTableAlias(),
- mapWorkList.size() + 1);
+ mapWorkList.size() + 1, inputToBucketMap);
DataOutputBuffer dob = new DataOutputBuffer();
vertexConf.write(dob);
byte[] userPayload = dob.getData();
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 632a213..4ee0579 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -42,15 +42,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
@@ -188,6 +180,8 @@ public class Table implements Serializable {
t.setOwner(SessionState.getUserFromAuthenticator());
// set create time
t.setCreateTime((int) (System.currentTimeMillis() / 1000));
+ t.setBucketingVersion(BucketingVersion.MURMUR_BUCKETING);
+ t.setLoadInBucketedTable(false);
}
return t;
}
@@ -676,6 +670,10 @@ public class Table implements Serializable {
return tTable.getSd().getNumBuckets();
}
+ public BucketingVersion getBucketingVersion() {
+ return tTable.getBucketingVersion();
+ }
+
public void setInputFormatClass(String name) throws HiveException {
if (name == null) {
inputFormatClass = null;
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
index dc698c8..f30cf4e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
@@ -26,9 +26,11 @@ import java.util.Map;
import java.util.Set;
import java.util.Stack;
+import com.google.common.base.Preconditions;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.BucketingVersion;
import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator;
import org.apache.hadoop.hive.ql.exec.CommonJoinOperator;
import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator;
@@ -180,7 +182,8 @@ public class ConvertJoinMapJoin implements NodeProcessor {
MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, mapJoinConversionPos, true);
// map join operator by default has no bucket cols and num of reduce sinks
// reduced by 1
- mapJoinOp.setOpTraits(new OpTraits(null, -1, null, joinOp.getOpTraits().getNumReduceSinks()));
+ mapJoinOp.setOpTraits(new OpTraits(null, -1, null,
+ joinOp.getOpTraits().getNumReduceSinks(), joinOp.getOpTraits().getBucketingVersion()));
mapJoinOp.setStatistics(joinOp.getStatistics());
// propagate this change till the next RS
for (Operator<? extends OperatorDesc> childOp : mapJoinOp.getChildOperators()) {
@@ -378,7 +381,7 @@ public class ConvertJoinMapJoin implements NodeProcessor {
joinOp.getSchema());
int numReduceSinks = joinOp.getOpTraits().getNumReduceSinks();
OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numBuckets,
- joinOp.getOpTraits().getSortCols(), numReduceSinks);
+ joinOp.getOpTraits().getSortCols(), numReduceSinks, joinOp.getOpTraits().getBucketingVersion());
mergeJoinOp.setOpTraits(opTraits);
mergeJoinOp.setStatistics(joinOp.getStatistics());
@@ -445,7 +448,8 @@ public class ConvertJoinMapJoin implements NodeProcessor {
return;
}
currentOp.setOpTraits(new OpTraits(opTraits.getBucketColNames(),
- opTraits.getNumBuckets(), opTraits.getSortCols(), opTraits.getNumReduceSinks()));
+ opTraits.getNumBuckets(), opTraits.getSortCols(), opTraits.getNumReduceSinks(),
+ opTraits.getBucketingVersion()));
for (Operator<? extends OperatorDesc> childOp : currentOp.getChildOperators()) {
if ((childOp instanceof ReduceSinkOperator) || (childOp instanceof GroupByOperator)) {
break;
@@ -498,7 +502,8 @@ public class ConvertJoinMapJoin implements NodeProcessor {
// we can set the traits for this join operator
opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(),
- tezBucketJoinProcCtx.getNumBuckets(), null, joinOp.getOpTraits().getNumReduceSinks());
+ tezBucketJoinProcCtx.getNumBuckets(), null,
+ joinOp.getOpTraits().getNumReduceSinks(), joinOp.getOpTraits().getBucketingVersion());
mapJoinOp.setOpTraits(opTraits);
mapJoinOp.setStatistics(joinOp.getStatistics());
setNumberOfBucketsOnChildren(mapJoinOp);
@@ -576,6 +581,13 @@ public class ConvertJoinMapJoin implements NodeProcessor {
return false;
}
ReduceSinkOperator rsOp = (ReduceSinkOperator) parentOp;
+ // If the chosen big table has less number of buckets than any of the
+ // small tables, then those buckets will have no mapping to any of the
+ // big table buckets resulting in wrong results.
+ if (numBuckets > 0 && numBuckets < rsOp.getOpTraits().getNumBuckets()) {
+ LOG.info("Small table has more buckets than big table.");
+ return false;
+ }
if (!checkColEquality(rsOp.getParentOperators().get(0).getOpTraits().getSortCols(), rsOp
.getOpTraits().getSortCols(), rsOp.getColumnExprMap(), false)) {
LOG.info("We cannot convert to SMB because the sort column names do not match.");
@@ -593,6 +605,37 @@ public class ConvertJoinMapJoin implements NodeProcessor {
numBuckets = bigTableRS.getConf().getNumReducers();
}
tezBucketJoinProcCtx.setNumBuckets(numBuckets);
+
+ // With bucketing using two different versions. Version 1 for exiting
+ // tables and version 2 for new tables. All the inputs to the SMB must be
+ // from same version. This only applies to tables read directly and not
+ // intermediate outputs of joins/groupbys
+ BucketingVersion version = BucketingVersion.INVALID_BUCKETING;
+ for (Operator<? extends OperatorDesc> parentOp : joinOp.getParentOperators()) {
+ // Check if the parent is coming from a table scan, if so, what is the version of it.
+ assert parentOp.getParentOperators() != null && parentOp.getParentOperators().size() == 1;
+ Operator<?> op = parentOp.getParentOperators().get(0);
+ while(op != null && !(op instanceof TableScanOperator
+ || op instanceof ReduceSinkOperator
+ || op instanceof CommonJoinOperator)) {
+ // If op has parents it is guaranteed to be 1.
+ List<Operator<?>> parents = op.getParentOperators();
+ Preconditions.checkState(parents.size() == 0 || parents.size() == 1);
+ op = parents.size() == 1 ? parents.get(0) : null;
+ }
+
+ if (op instanceof TableScanOperator) {
+ BucketingVersion localVersion = ((TableScanOperator)op).getConf().
+ getTableMetadata().getBucketingVersion();
+ if (version == BucketingVersion.INVALID_BUCKETING) {
+ version = localVersion;
+ } else if (version != localVersion) {
+ // versions dont match, return false.
+ LOG.debug("SMB Join can't be performed due to bucketing version mismatch");
+ return false;
+ }
+ }
+ }
LOG.info("We can convert the join to an SMB join.");
return true;
}
@@ -1168,7 +1211,8 @@ public class ConvertJoinMapJoin implements NodeProcessor {
joinOp.getOpTraits().getBucketColNames(),
numReducers,
null,
- joinOp.getOpTraits().getNumReduceSinks());
+ joinOp.getOpTraits().getNumReduceSinks(),
+ joinOp.getOpTraits().getBucketingVersion());
mapJoinOp.setOpTraits(opTraits);
mapJoinOp.setStatistics(joinOp.getStatistics());
// propagate this change till the next RS
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
index 69d9f31..7696402 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
@@ -22,6 +22,7 @@ import java.util.*;
import java.util.Map.Entry;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.BucketingVersion;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.ql.exec.GroupByOperator;
import org.apache.hadoop.hive.ql.exec.JoinOperator;
@@ -92,10 +93,12 @@ public class OpTraitsRulesProcFactory {
List<List<String>> listBucketCols = new ArrayList<List<String>>();
int numBuckets = -1;
int numReduceSinks = 1;
+ BucketingVersion bucketingVersion = BucketingVersion.INVALID_BUCKETING;
OpTraits parentOpTraits = rs.getParentOperators().get(0).getOpTraits();
if (parentOpTraits != null) {
numBuckets = parentOpTraits.getNumBuckets();
numReduceSinks += parentOpTraits.getNumReduceSinks();
+ bucketingVersion = parentOpTraits.getBucketingVersion();
}
List<String> bucketCols = new ArrayList<>();
@@ -134,7 +137,8 @@ public class OpTraitsRulesProcFactory {
}
listBucketCols.add(bucketCols);
- OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listBucketCols, numReduceSinks);
+ OpTraits opTraits = new OpTraits(listBucketCols, numBuckets,
+ listBucketCols, numReduceSinks, bucketingVersion);
rs.setOpTraits(opTraits);
return null;
}
@@ -213,7 +217,8 @@ public class OpTraitsRulesProcFactory {
sortedColsList.add(sortCols);
}
// num reduce sinks hardcoded to 0 because TS has no parents
- OpTraits opTraits = new OpTraits(bucketColsList, numBuckets, sortedColsList, 0);
+ OpTraits opTraits = new OpTraits(bucketColsList, numBuckets,
+ sortedColsList, 0, table.getBucketingVersion());
ts.setOpTraits(opTraits);
return null;
}
@@ -239,12 +244,15 @@ public class OpTraitsRulesProcFactory {
List<List<String>> listBucketCols = new ArrayList<List<String>>();
int numReduceSinks = 0;
+ BucketingVersion bucketingVersion = BucketingVersion.INVALID_BUCKETING;
OpTraits parentOpTraits = gbyOp.getParentOperators().get(0).getOpTraits();
if (parentOpTraits != null) {
numReduceSinks = parentOpTraits.getNumReduceSinks();
+ bucketingVersion = parentOpTraits.getBucketingVersion();
}
listBucketCols.add(gbyKeys);
- OpTraits opTraits = new OpTraits(listBucketCols, -1, listBucketCols, numReduceSinks);
+ OpTraits opTraits = new OpTraits(listBucketCols, -1, listBucketCols,
+ numReduceSinks, bucketingVersion);
gbyOp.setOpTraits(opTraits);
return null;
}
@@ -298,12 +306,15 @@ public class OpTraitsRulesProcFactory {
int numBuckets = -1;
int numReduceSinks = 0;
+ BucketingVersion bucketingVersion = BucketingVersion.INVALID_BUCKETING;
OpTraits parentOpTraits = selOp.getParentOperators().get(0).getOpTraits();
if (parentOpTraits != null) {
numBuckets = parentOpTraits.getNumBuckets();
numReduceSinks = parentOpTraits.getNumReduceSinks();
+ bucketingVersion = parentOpTraits.getBucketingVersion();
}
- OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listSortCols, numReduceSinks);
+ OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listSortCols,
+ numReduceSinks, bucketingVersion);
selOp.setOpTraits(opTraits);
return null;
}
@@ -319,6 +330,7 @@ public class OpTraitsRulesProcFactory {
List<List<String>> sortColsList = new ArrayList<List<String>>();
byte pos = 0;
int numReduceSinks = 0; // will be set to the larger of the parents
+ boolean bucketingVersionSeen = false;
for (Operator<? extends OperatorDesc> parentOp : joinOp.getParentOperators()) {
if (!(parentOp instanceof ReduceSinkOperator)) {
// can be mux operator
@@ -338,7 +350,7 @@ public class OpTraitsRulesProcFactory {
pos++;
}
- joinOp.setOpTraits(new OpTraits(bucketColsList, -1, bucketColsList, numReduceSinks));
+ joinOp.setOpTraits(new OpTraits(bucketColsList, -1, bucketColsList, numReduceSinks, BucketingVersion.INVALID_BUCKETING));
return null;
}
@@ -392,6 +404,8 @@ public class OpTraitsRulesProcFactory {
Operator<? extends OperatorDesc> operator = (Operator<? extends OperatorDesc>) nd;
int numReduceSinks = 0;
+ BucketingVersion bucketingVersion = BucketingVersion.INVALID_BUCKETING;
+ boolean bucketingVersionSeen = false;
for (Operator<?> parentOp : operator.getParentOperators()) {
if (parentOp.getOpTraits() == null) {
continue;
@@ -399,8 +413,17 @@ public class OpTraitsRulesProcFactory {
if (parentOp.getOpTraits().getNumReduceSinks() > numReduceSinks) {
numReduceSinks = parentOp.getOpTraits().getNumReduceSinks();
}
+ // If there is mismatch in bucketingVersion, then it should be set to
+ // -1, that way SMB will be disabled.
+ if (bucketingVersion == BucketingVersion.INVALID_BUCKETING && !bucketingVersionSeen) {
+ bucketingVersion = parentOp.getOpTraits().getBucketingVersion();
+ bucketingVersionSeen = true;
+ } else if (bucketingVersion != parentOp.getOpTraits().getBucketingVersion()) {
+ bucketingVersion = BucketingVersion.INVALID_BUCKETING;
+ }
}
- OpTraits opTraits = new OpTraits(null, -1, null, numReduceSinks);
+ OpTraits opTraits = new OpTraits(null, -1,
+ null, numReduceSinks, bucketingVersion);
operator.setOpTraits(opTraits);
return null;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
index bacc444..39d2370 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
@@ -121,7 +121,8 @@ public class SparkMapJoinOptimizer implements NodeProcessor {
}
// we can set the traits for this join operator
- OpTraits opTraits = new OpTraits(bucketColNames, numBuckets, null, joinOp.getOpTraits().getNumReduceSinks());
+ OpTraits opTraits = new OpTraits(bucketColNames, numBuckets, null,
+ joinOp.getOpTraits().getNumReduceSinks(), joinOp.getOpTraits().getBucketingVersion());
mapJoinOp.setOpTraits(opTraits);
mapJoinOp.setStatistics(joinOp.getStatistics());
setNumberOfBucketsOnChildren(mapJoinOp);
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 54f5bab..3619763 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -23,6 +23,7 @@ import java.io.IOException;
import java.io.Serializable;
import java.net.URI;
import java.net.URISyntaxException;
+import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -160,6 +161,48 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
"source contains directory: " + oneSrc.getPath().toString()));
}
}
+ // Do another loop if table is bucketed
+ List<String> bucketCols = table.getBucketCols();
+ if (bucketCols != null && !bucketCols.isEmpty()) {
+ // Hive assumes that user names the files as per the corresponding
+ // bucket. For e.g, file names should follow the format 000000_0, 000000_1 etc.
+ // Here the 1st file will belong to bucket 0 and 2nd to bucket 1 and so on.
+ boolean[] bucketArray = new boolean[table.getNumBuckets()];
+ // initialize the array
+ Arrays.fill(bucketArray, false);
+ int numBuckets = table.getNumBuckets();
+
+ for (FileStatus oneSrc : srcs) {
+ String bucketName = oneSrc.getPath().getName();
+
+ //get the bucket id
+ String bucketIdStr =
+ Utilities.getBucketFileNameFromPathSubString(bucketName);
+ int bucketId = Utilities.getBucketIdFromFile(bucketIdStr);
+ LOG.debug("bucket ID for file " + oneSrc.getPath() + " = " + bucketId
+ + " for table " + table.getFullyQualifiedName());
+ if (bucketId == -1) {
+ throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(
+ "The file name is invalid : "
+ + oneSrc.getPath().toString() + " for table "
+ + table.getFullyQualifiedName()));
+ }
+ if (bucketId >= numBuckets) {
+ throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(
+ "The file name corresponds to invalid bucketId : "
+ + oneSrc.getPath().toString())
+ + ". Maximum number of buckets can be " + numBuckets
+ + " for table " + table.getFullyQualifiedName());
+ }
+ if (bucketArray[bucketId]) {
+ throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(
+ "Multiple files for same bucket : " + bucketId
+ + ". Only 1 file per bucket allowed in single load command. To load multiple files for same bucket, use multiple statements for table "
+ + table.getFullyQualifiedName()));
+ }
+ bucketArray[bucketId] = true;
+ }
+ }
} catch (IOException e) {
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java
index 9621c3b..0dcc229 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java
@@ -18,21 +18,26 @@
package org.apache.hadoop.hive.ql.plan;
+import org.apache.hadoop.hive.metastore.api.BucketingVersion;
+
import java.util.List;
public class OpTraits {
- List<List<String>> bucketColNames;
- List<List<String>> sortColNames;
- int numBuckets;
- int numReduceSinks;
+ private List<List<String>> bucketColNames;
+ private List<List<String>> sortColNames;
+ private int numBuckets;
+ private int numReduceSinks;
+ private BucketingVersion bucketingVersion;
public OpTraits(List<List<String>> bucketColNames, int numBuckets,
- List<List<String>> sortColNames, int numReduceSinks) {
+ List<List<String>> sortColNames, int numReduceSinks,
+ BucketingVersion bucketingVersion) {
this.bucketColNames = bucketColNames;
this.numBuckets = numBuckets;
this.sortColNames = sortColNames;
this.numReduceSinks = numReduceSinks;
+ this.bucketingVersion = bucketingVersion;
}
public List<List<String>> getBucketColNames() {
@@ -68,6 +73,13 @@ public class OpTraits {
return this.numReduceSinks;
}
+ public void setBucketingVersion(BucketingVersion bucketingVersion) {
+ this.bucketingVersion = bucketingVersion;
+ }
+
+ public BucketingVersion getBucketingVersion() {
+ return bucketingVersion;
+ }
@Override
public String toString() {
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index b5b478f..5355e06 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -35,12 +35,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.PartitionDropOptions;
import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.ql.index.HiveIndex;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.session.SessionState;
@@ -171,6 +166,8 @@ public class TestHive extends TestCase {
tbl.setStoredAsSubDirectories(false);
tbl.setRewriteEnabled(false);
+ tbl.getTTable().setBucketingVersion(BucketingVersion.MURMUR_BUCKETING);
+ tbl.getTTable().setLoadInBucketedTable(false);
// create table
setNullCreateTableGrants();
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q
index e5fdcb5..b7bd10e 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q
@@ -1,19 +1,21 @@
set hive.strict.checks.bucketing=false;
set hive.mapred.mode=nonstrict;
--- small 1 part, 4 bucket & big 2 part, 2 bucket
-CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+-- small 1 part, 2 bucket & big 2 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08');
load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08');
-CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08');
load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08');
load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09');
load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q
index abf09e5..9f719ae 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q
@@ -1,7 +1,7 @@
set hive.strict.checks.bucketing=false;
set hive.mapred.mode=nonstrict;
--- small 2 part, 4 bucket & big 1 part, 2 bucket
+-- small 2 part, 4 bucket & big 1 part, 4 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08');
load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08');
@@ -13,9 +13,11 @@ load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INT
load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-09');
load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-09');
-CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08');
load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q
index b85c4a7..c107501 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q
@@ -1,19 +1,19 @@
set hive.strict.checks.bucketing=false;
set hive.mapred.mode=nonstrict;
--- small no part, 4 bucket & big no part, 2 bucket
+-- small no part, 2 bucket & big no part, 4 bucket
-- SORT_QUERY_RESULTS
-CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small;
load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small;
-load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small;
-load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small;
-CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big;
load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big;
+load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big;
+load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big;
set hive.auto.convert.sortmerge.join=true;
set hive.optimize.bucketmapjoin = true;
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q
index bd78086..a5cc04a 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q
@@ -1,7 +1,7 @@
set hive.strict.checks.bucketing=false;
set hive.mapred.mode=nonstrict;
--- small 2 part, 4 bucket & big 2 part, 2 bucket
+-- small 2 part, 4 bucket & big 2 part, 4 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08');
load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08');
@@ -13,12 +13,16 @@ load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INT
load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-09');
load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-09');
-CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08');
load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08');
load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09');
load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out b/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out
index b9c2e6f..37dbbf9 100644
--- a/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out
+++ b/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out
@@ -53,174 +53,4 @@ POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@srcbucket_mapjoin_part_2
-PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0'
- INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@srcbucket_mapjoin_part_2
-POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0'
- INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@srcbucket_mapjoin_part_2
-POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
-PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0'
- INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
-POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0'
- INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
#### A masked pattern was here ####
-POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
-PREHOOK: query: explain
-select a.key, a.value, b.value
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
-on a.key=b.key and a.ds="2008-04-08" and b.ds="2008-04-08"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select a.key, a.value, b.value
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
-on a.key=b.key and a.ds="2008-04-08" and b.ds="2008-04-08"
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- Statistics: Num rows: 108 Data size: 42000 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: key is not null (type: boolean)
- Statistics: Num rows: 108 Data size: 42000 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: int), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 108 Data size: 42000 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
- Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 108 Data size: 42000 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string)
- TableScan
- alias: b
- Statistics: Num rows: 78 Data size: 30620 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: key is not null (type: boolean)
- Statistics: Num rows: 78 Data size: 30620 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: int), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 78 Data size: 30620 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
- Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 78 Data size: 30620 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string)
- Reduce Operator Tree:
- Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: int)
- 1 _col0 (type: int)
- outputColumnNames: _col0, _col1, _col4
- Statistics: Num rows: 118 Data size: 46200 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 118 Data size: 46200 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 118 Data size: 46200 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: explain
-select /*+mapjoin(b)*/ a.key, a.value, b.value
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
-on a.key=b.key and a.ds="2008-04-08" and b.ds="2008-04-08"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select /*+mapjoin(b)*/ a.key, a.value, b.value
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
-on a.key=b.key and a.ds="2008-04-08" and b.ds="2008-04-08"
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-1 depends on stages: Stage-3
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce Local Work
- Alias -> Map Local Tables:
- b
- Fetch Operator
- limit: -1
- Alias -> Map Local Operator Tree:
- b
- TableScan
- alias: b
- Statistics: Num rows: 102 Data size: 30620 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: key is not null (type: boolean)
- Statistics: Num rows: 102 Data size: 30620 Basic stats: COMPLETE Column stats: NONE
- HashTable Sink Operator
- keys:
- 0 key (type: int)
- 1 key (type: int)
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- Statistics: Num rows: 140 Data size: 42000 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: key is not null (type: boolean)
- Statistics: Num rows: 140 Data size: 42000 Basic stats: COMPLETE Column stats: NONE
- Map Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 key (type: int)
- 1 key (type: int)
- outputColumnNames: _col0, _col1, _col7
- Statistics: Num rows: 154 Data size: 46200 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 154 Data size: 46200 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 154 Data size: 46200 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Local Work:
- Map Reduce Local Work
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-FAILED: SemanticException [Error 10136]: Bucketed mapjoin cannot be performed. This can be due to multiple reasons: . Join columns dont match bucketed columns. . Number of buckets are not a multiple of each other. If you really want to perform the operation, either remove the mapjoin hint from your query or set hive.enforce.bucketmapjoin to false.
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out
index 5cfc35a..dda7211 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_small
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_small
@@ -23,27 +23,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small@ds=2008-04-08
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small@ds=2008-04-08
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -64,6 +48,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09')
PREHOOK: type: LOAD
#### A masked pattern was here ####
@@ -81,6 +81,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -95,16 +111,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -134,7 +150,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -142,7 +158,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -150,7 +166,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -158,7 +174,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -183,7 +199,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -191,7 +207,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -199,7 +215,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -207,7 +223,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -308,7 +324,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -316,7 +332,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
partition_columns ds
partition_columns.types string
@@ -324,7 +340,7 @@ STAGE PLANS:
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -332,7 +348,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -353,16 +369,16 @@ STAGE PLANS:
$hdt$_1:b
TableScan
alias: b
- Statistics: Num rows: 4 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 4 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 4 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
HashTable Sink Operator
keys:
0 _col0 (type: string)
@@ -374,16 +390,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -414,7 +430,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -422,7 +438,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -430,7 +446,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -438,7 +454,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -463,7 +479,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -471,7 +487,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -479,7 +495,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -487,7 +503,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -511,7 +527,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -519,7 +535,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
partition_columns ds
partition_columns.types string
@@ -527,7 +543,7 @@ STAGE PLANS:
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -535,7 +551,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -597,7 +613,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -605,7 +621,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -613,7 +629,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -621,7 +637,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -645,7 +661,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -653,7 +669,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -661,7 +677,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -669,7 +685,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -690,16 +706,16 @@ STAGE PLANS:
$hdt$_0:a
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
HashTable Sink Operator
keys:
0 _col0 (type: string)
@@ -711,16 +727,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 4 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 4 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 4 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -751,7 +767,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -759,7 +775,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -767,7 +783,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -775,7 +791,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -800,7 +816,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -808,7 +824,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -816,7 +832,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -824,7 +840,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -848,7 +864,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -856,7 +872,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
partition_columns ds
partition_columns.types string
@@ -864,7 +880,7 @@ STAGE PLANS:
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -872,7 +888,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -924,16 +940,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -963,7 +979,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -971,7 +987,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -979,7 +995,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -987,7 +1003,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1012,7 +1028,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1020,7 +1036,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -1028,7 +1044,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1036,7 +1052,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
[6/7] hive git commit: HIVE-18350 : load data should rename files
consistent with insert statements. (Deepak Jaiswal,
reviewed by Sergey Shelukhin and Ashutosh Chauhan)
Posted by dj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out
index 0d586fd..b54c574 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out
@@ -72,11 +72,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small@ds=2008-04-09
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -97,6 +97,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
@@ -111,16 +127,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -150,7 +166,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -158,7 +174,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -166,7 +182,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -174,7 +190,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -243,7 +259,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-38
+78
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -258,16 +274,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -297,7 +313,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -305,7 +321,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -313,7 +329,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -321,7 +337,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -390,7 +406,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-38
+78
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -534,16 +550,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -574,7 +590,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -582,7 +598,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -590,7 +606,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -598,7 +614,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -755,7 +771,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -763,7 +779,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -771,7 +787,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -779,7 +795,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -800,16 +816,16 @@ STAGE PLANS:
$hdt$_0:a
TableScan
alias: a
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
HashTable Sink Operator
keys:
0 _col0 (type: string)
@@ -861,7 +877,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -869,7 +885,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -877,7 +893,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -885,7 +901,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1034,16 +1050,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -1073,7 +1089,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1081,7 +1097,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -1089,7 +1105,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1097,7 +1113,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1166,4 +1182,4 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-38
+78
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out
index 45704d1..451c3b3 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_small
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_small
@@ -22,27 +22,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -62,6 +46,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
@@ -76,16 +76,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -114,7 +114,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -122,13 +122,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -136,7 +136,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -144,13 +144,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
@@ -216,16 +216,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -254,7 +254,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -262,13 +262,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -276,7 +276,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -284,13 +284,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
@@ -369,16 +369,16 @@ STAGE PLANS:
$hdt$_1:b
TableScan
alias: b
- Statistics: Num rows: 1 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
HashTable Sink Operator
keys:
0 _col0 (type: string)
@@ -390,16 +390,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -429,7 +429,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -437,13 +437,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -451,7 +451,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -459,13 +459,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
@@ -475,7 +475,7 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -492,7 +492,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -500,13 +500,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_small
@@ -551,16 +551,16 @@ STAGE PLANS:
$hdt$_0:a
TableScan
alias: a
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
HashTable Sink Operator
keys:
0 _col0 (type: string)
@@ -572,16 +572,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 1 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -611,7 +611,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -619,13 +619,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -633,7 +633,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -641,13 +641,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
@@ -657,7 +657,7 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -674,7 +674,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -682,13 +682,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_small
@@ -728,16 +728,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -766,7 +766,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -774,13 +774,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -788,7 +788,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -796,13 +796,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out
index 1959075..f335142 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out
@@ -72,11 +72,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small@ds=2008-04-09
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -97,6 +97,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09')
PREHOOK: type: LOAD
#### A masked pattern was here ####
@@ -114,6 +130,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
@@ -128,16 +160,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -167,7 +199,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -175,7 +207,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -183,7 +215,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -191,7 +223,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -216,7 +248,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -224,7 +256,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -232,7 +264,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -240,7 +272,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -312,7 +344,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-76
+156
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -327,16 +359,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -366,7 +398,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -374,7 +406,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -382,7 +414,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -390,7 +422,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -415,7 +447,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -423,7 +455,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -431,7 +463,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -439,7 +471,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -511,7 +543,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-76
+156
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -655,16 +687,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -695,7 +727,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -703,7 +735,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -711,7 +743,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -719,7 +751,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -744,7 +776,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -752,7 +784,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -760,7 +792,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -768,7 +800,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -926,7 +958,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -934,7 +966,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -942,7 +974,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -950,7 +982,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -974,7 +1006,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -982,7 +1014,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -990,7 +1022,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -998,7 +1030,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1019,16 +1051,16 @@ STAGE PLANS:
$hdt$_0:a
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
HashTable Sink Operator
keys:
0 _col0 (type: string)
@@ -1080,7 +1112,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1088,7 +1120,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -1096,7 +1128,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1104,7 +1136,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1129,7 +1161,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1137,7 +1169,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -1145,7 +1177,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1153,7 +1185,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1302,16 +1334,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -1341,7 +1373,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1349,7 +1381,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -1357,7 +1389,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1365,7 +1397,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1390,7 +1422,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1398,7 +1430,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -1406,7 +1438,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -1414,7 +1446,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -1486,4 +1518,4 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-76
+156
[4/7] hive git commit: HIVE-18350 : load data should rename files
consistent with insert statements. (Deepak Jaiswal,
reviewed by Sergey Shelukhin and Ashutosh Chauhan)
Posted by dj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out
index 8cfa113..117ff4a 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_small
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_small
@@ -23,27 +23,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small@ds=2008-04-08
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small@ds=2008-04-08
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -64,6 +48,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09')
PREHOOK: type: LOAD
#### A masked pattern was here ####
@@ -81,6 +81,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -100,16 +116,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -117,7 +133,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 123 Data size: 60500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 264 Data size: 127864 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -142,7 +158,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -150,7 +166,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -158,7 +174,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -166,7 +182,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -191,7 +207,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -199,7 +215,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -207,7 +223,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -215,7 +231,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -308,16 +324,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -325,7 +341,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 123 Data size: 60500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 264 Data size: 127864 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -350,7 +366,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -358,7 +374,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -366,7 +382,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -374,7 +390,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -399,7 +415,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -407,7 +423,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -415,7 +431,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -423,7 +439,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out
index fce5e0c..aff5a0d 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out
@@ -72,11 +72,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small@ds=2008-04-09
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -97,6 +97,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
@@ -116,16 +132,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -133,7 +149,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 1
- Statistics: Num rows: 61 Data size: 30250 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 132 Data size: 63932 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -158,7 +174,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -166,7 +182,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -174,7 +190,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -182,7 +198,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -254,7 +270,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-38
+78
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -274,16 +290,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -291,7 +307,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 61 Data size: 30250 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 132 Data size: 63932 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -316,7 +332,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -324,7 +340,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -332,7 +348,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -340,7 +356,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -412,7 +428,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-38
+78
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -432,16 +448,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 56 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 120 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -449,7 +465,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 61 Data size: 30250 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 132 Data size: 63932 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -474,7 +490,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -482,7 +498,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -490,7 +506,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -498,7 +514,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -570,4 +586,4 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-38
+78
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out
index 8250eca..6255dd2 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_small
-POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_small
@@ -22,27 +22,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small
-PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@bucket_small
-POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@bucket_small
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -62,6 +46,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
@@ -81,16 +81,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -98,7 +98,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 1
- Statistics: Num rows: 1 Data size: 2486 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1254 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -122,7 +122,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -130,13 +130,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -144,7 +144,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -152,13 +152,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
@@ -232,16 +232,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -249,7 +249,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 1 Data size: 30250 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 63932 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -273,7 +273,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -281,13 +281,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -295,7 +295,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -303,13 +303,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
@@ -382,16 +382,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 1 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 2260 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1140 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: string)
@@ -414,7 +414,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -422,13 +422,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -436,7 +436,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 4
+ bucket_count 2
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -444,13 +444,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_small
- numFiles 4
+ numFiles 2
numRows 0
rawDataSize 0
serialization.ddl struct bucket_small { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 226
+ totalSize 114
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_small
@@ -468,16 +468,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -487,7 +487,7 @@ STAGE PLANS:
input vertices:
1 Map 3
Position of Big Table: 0
- Statistics: Num rows: 1 Data size: 30250 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 63932 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -518,7 +518,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -526,13 +526,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -540,7 +540,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -548,13 +548,13 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
rawDataSize 0
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.bucket_big
http://git-wip-us.apache.org/repos/asf/hive/blob/6e9b63e4/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
index eb813c1..ac5cd47 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
@@ -72,11 +72,11 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/sm
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_small@ds=2008-04-09
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@bucket_big
@@ -97,6 +97,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09')
PREHOOK: type: LOAD
#### A masked pattern was here ####
@@ -114,6 +130,22 @@ POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/bi
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
@@ -133,16 +165,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: b
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -150,7 +182,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 1
- Statistics: Num rows: 123 Data size: 60500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 264 Data size: 127864 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -175,7 +207,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -183,7 +215,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -191,7 +223,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -199,7 +231,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -224,7 +256,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -232,7 +264,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -240,7 +272,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -248,7 +280,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -323,7 +355,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-76
+156
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -343,16 +375,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -360,7 +392,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 123 Data size: 60500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 264 Data size: 127864 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -385,7 +417,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -393,7 +425,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -401,7 +433,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -409,7 +441,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -434,7 +466,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -442,7 +474,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -450,7 +482,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -458,7 +490,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -533,7 +565,7 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-76
+156
PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
PREHOOK: type: QUERY
POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
@@ -553,16 +585,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: a
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: key is not null (type: boolean)
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 112 Data size: 55000 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 240 Data size: 116240 Basic stats: COMPLETE Column stats: NONE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
@@ -570,7 +602,7 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
Position of Big Table: 0
- Statistics: Num rows: 123 Data size: 60500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 264 Data size: 127864 Basic stats: COMPLETE Column stats: NONE
BucketMapJoin: true
Group By Operator
aggregations: count()
@@ -595,7 +627,7 @@ STAGE PLANS:
partition values:
ds 2008-04-08
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -603,7 +635,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -611,7 +643,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -619,7 +651,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -644,7 +676,7 @@ STAGE PLANS:
partition values:
ds 2008-04-09
properties:
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -652,7 +684,7 @@ STAGE PLANS:
columns.types string:string
#### A masked pattern was here ####
name default.bucket_big
- numFiles 2
+ numFiles 4
numRows 0
partition_columns ds
partition_columns.types string
@@ -660,7 +692,7 @@ STAGE PLANS:
serialization.ddl struct bucket_big { string key, string value}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 2750
+ totalSize 5812
#### A masked pattern was here ####
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -668,7 +700,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
SORTBUCKETCOLSPREFIX TRUE
- bucket_count 2
+ bucket_count 4
bucket_field_name key
column.name.delimiter ,
columns key,value
@@ -743,4 +775,4 @@ POSTHOOK: Input: default@bucket_small
POSTHOOK: Input: default@bucket_small@ds=2008-04-08
POSTHOOK: Input: default@bucket_small@ds=2008-04-09
#### A masked pattern was here ####
-76
+156