You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/09/02 21:57:07 UTC
svn commit: r1622108 [12/27] - in /hive/branches/tez: ./
accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/
beeline/src/java/org/apache/hive/beeline/
beeline/src/test/org/apache/hive/beeline/ bin/ bin/ext/ checkstyle/
common/src/java/...
Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java Tue Sep 2 19:56:56 2014
@@ -49,121 +49,135 @@ public class TxnDbUtil {
// intended for creating derby databases, and thus will inexorably get
// out of date with it. I'm open to any suggestions on how to make this
// read the file in a build friendly way.
- Connection conn = getConnection();
- Statement s = conn.createStatement();
- s.execute("CREATE TABLE TXNS (" +
- " TXN_ID bigint PRIMARY KEY," +
- " TXN_STATE char(1) NOT NULL," +
- " TXN_STARTED bigint NOT NULL," +
- " TXN_LAST_HEARTBEAT bigint NOT NULL," +
- " TXN_USER varchar(128) NOT NULL," +
- " TXN_HOST varchar(128) NOT NULL)");
-
- s.execute("CREATE TABLE TXN_COMPONENTS (" +
- " TC_TXNID bigint REFERENCES TXNS (TXN_ID)," +
- " TC_DATABASE varchar(128) NOT NULL," +
- " TC_TABLE varchar(128)," +
- " TC_PARTITION varchar(767))");
- s.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
- " CTC_TXNID bigint," +
- " CTC_DATABASE varchar(128) NOT NULL," +
- " CTC_TABLE varchar(128)," +
- " CTC_PARTITION varchar(767))");
- s.execute("CREATE TABLE NEXT_TXN_ID (" +
- " NTXN_NEXT bigint NOT NULL)");
- s.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
- s.execute("CREATE TABLE HIVE_LOCKS (" +
- " HL_LOCK_EXT_ID bigint NOT NULL," +
- " HL_LOCK_INT_ID bigint NOT NULL," +
- " HL_TXNID bigint," +
- " HL_DB varchar(128) NOT NULL," +
- " HL_TABLE varchar(128)," +
- " HL_PARTITION varchar(767)," +
- " HL_LOCK_STATE char(1) NOT NULL," +
- " HL_LOCK_TYPE char(1) NOT NULL," +
- " HL_LAST_HEARTBEAT bigint NOT NULL," +
- " HL_ACQUIRED_AT bigint," +
- " HL_USER varchar(128) NOT NULL," +
- " HL_HOST varchar(128) NOT NULL," +
- " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
- s.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
-
- s.execute("CREATE TABLE NEXT_LOCK_ID (" +
- " NL_NEXT bigint NOT NULL)");
- s.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
-
- s.execute("CREATE TABLE COMPACTION_QUEUE (" +
- " CQ_ID bigint PRIMARY KEY," +
- " CQ_DATABASE varchar(128) NOT NULL," +
- " CQ_TABLE varchar(128) NOT NULL," +
- " CQ_PARTITION varchar(767)," +
- " CQ_STATE char(1) NOT NULL," +
- " CQ_TYPE char(1) NOT NULL," +
- " CQ_WORKER_ID varchar(128)," +
- " CQ_START bigint," +
- " CQ_RUN_AS varchar(128))");
-
- s.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
- s.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
-
- conn.commit();
- conn.close();
+ Connection conn = null;
+ boolean committed = false;
+ try {
+ conn = getConnection();
+ Statement s = conn.createStatement();
+ s.execute("CREATE TABLE TXNS (" +
+ " TXN_ID bigint PRIMARY KEY," +
+ " TXN_STATE char(1) NOT NULL," +
+ " TXN_STARTED bigint NOT NULL," +
+ " TXN_LAST_HEARTBEAT bigint NOT NULL," +
+ " TXN_USER varchar(128) NOT NULL," +
+ " TXN_HOST varchar(128) NOT NULL)");
+
+ s.execute("CREATE TABLE TXN_COMPONENTS (" +
+ " TC_TXNID bigint REFERENCES TXNS (TXN_ID)," +
+ " TC_DATABASE varchar(128) NOT NULL," +
+ " TC_TABLE varchar(128)," +
+ " TC_PARTITION varchar(767))");
+ s.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
+ " CTC_TXNID bigint," +
+ " CTC_DATABASE varchar(128) NOT NULL," +
+ " CTC_TABLE varchar(128)," +
+ " CTC_PARTITION varchar(767))");
+ s.execute("CREATE TABLE NEXT_TXN_ID (" +
+ " NTXN_NEXT bigint NOT NULL)");
+ s.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
+ s.execute("CREATE TABLE HIVE_LOCKS (" +
+ " HL_LOCK_EXT_ID bigint NOT NULL," +
+ " HL_LOCK_INT_ID bigint NOT NULL," +
+ " HL_TXNID bigint," +
+ " HL_DB varchar(128) NOT NULL," +
+ " HL_TABLE varchar(128)," +
+ " HL_PARTITION varchar(767)," +
+ " HL_LOCK_STATE char(1) NOT NULL," +
+ " HL_LOCK_TYPE char(1) NOT NULL," +
+ " HL_LAST_HEARTBEAT bigint NOT NULL," +
+ " HL_ACQUIRED_AT bigint," +
+ " HL_USER varchar(128) NOT NULL," +
+ " HL_HOST varchar(128) NOT NULL," +
+ " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
+ s.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
+
+ s.execute("CREATE TABLE NEXT_LOCK_ID (" +
+ " NL_NEXT bigint NOT NULL)");
+ s.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
+
+ s.execute("CREATE TABLE COMPACTION_QUEUE (" +
+ " CQ_ID bigint PRIMARY KEY," +
+ " CQ_DATABASE varchar(128) NOT NULL," +
+ " CQ_TABLE varchar(128) NOT NULL," +
+ " CQ_PARTITION varchar(767)," +
+ " CQ_STATE char(1) NOT NULL," +
+ " CQ_TYPE char(1) NOT NULL," +
+ " CQ_WORKER_ID varchar(128)," +
+ " CQ_START bigint," +
+ " CQ_RUN_AS varchar(128))");
+
+ s.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
+ s.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
+
+ conn.commit();
+ committed = true;
+ } finally {
+ if (!committed) conn.rollback();
+ conn.close();
+ }
}
public static void cleanDb() throws Exception {
- Connection conn = getConnection();
- Statement s = conn.createStatement();
- // We want to try these, whether they succeed or fail.
- try {
- s.execute("DROP INDEX HL_TXNID_INDEX");
- } catch (Exception e) {
- System.err.println("Unable to drop index HL_TXNID_INDEX " +
- e.getMessage());
- }
- try {
- s.execute("DROP TABLE TXN_COMPONENTS");
- } catch (Exception e) {
- System.err.println("Unable to drop table TXN_COMPONENTS " +
- e.getMessage());
- }
- try {
- s.execute("DROP TABLE COMPLETED_TXN_COMPONENTS");
- } catch (Exception e) {
- System.err.println("Unable to drop table COMPLETED_TXN_COMPONENTS " +
- e.getMessage());
- }
- try {
- s.execute("DROP TABLE TXNS");
- } catch (Exception e) {
- System.err.println("Unable to drop table TXNS " +
- e.getMessage());
- }
+ Connection conn = null;
+ boolean committed = false;
try {
- s.execute("DROP TABLE NEXT_TXN_ID");
- } catch (Exception e) {
- System.err.println("Unable to drop table NEXT_TXN_ID " +
- e.getMessage());
+ conn = getConnection();
+ Statement s = conn.createStatement();
+ // We want to try these, whether they succeed or fail.
+ try {
+ s.execute("DROP INDEX HL_TXNID_INDEX");
+ } catch (Exception e) {
+ System.err.println("Unable to drop index HL_TXNID_INDEX " +
+ e.getMessage());
+ }
+ try {
+ s.execute("DROP TABLE TXN_COMPONENTS");
+ } catch (Exception e) {
+ System.err.println("Unable to drop table TXN_COMPONENTS " +
+ e.getMessage());
+ }
+ try {
+ s.execute("DROP TABLE COMPLETED_TXN_COMPONENTS");
+ } catch (Exception e) {
+ System.err.println("Unable to drop table COMPLETED_TXN_COMPONENTS " +
+ e.getMessage());
+ }
+ try {
+ s.execute("DROP TABLE TXNS");
+ } catch (Exception e) {
+ System.err.println("Unable to drop table TXNS " +
+ e.getMessage());
+ }
+ try {
+ s.execute("DROP TABLE NEXT_TXN_ID");
+ } catch (Exception e) {
+ System.err.println("Unable to drop table NEXT_TXN_ID " +
+ e.getMessage());
+ }
+ try {
+ s.execute("DROP TABLE HIVE_LOCKS");
+ } catch (Exception e) {
+ System.err.println("Unable to drop table HIVE_LOCKS " +
+ e.getMessage());
+ }
+ try {
+ s.execute("DROP TABLE NEXT_LOCK_ID");
+ } catch (Exception e) {
+ }
+ try {
+ s.execute("DROP TABLE COMPACTION_QUEUE");
+ } catch (Exception e) {
+ }
+ try {
+ s.execute("DROP TABLE NEXT_COMPACTION_QUEUE_ID");
+ } catch (Exception e) {
+ }
+ conn.commit();
+ committed = true;
+ } finally {
+ if (!committed) conn.rollback();
+ conn.close();
}
- try {
- s.execute("DROP TABLE HIVE_LOCKS");
- } catch (Exception e) {
- System.err.println("Unable to drop table HIVE_LOCKS " +
- e.getMessage());
- }
- try {
- s.execute("DROP TABLE NEXT_LOCK_ID");
- } catch (Exception e) {
- }
- try {
- s.execute("DROP TABLE COMPACTION_QUEUE");
- } catch (Exception e) {
- }
- try {
- s.execute("DROP TABLE NEXT_COMPACTION_QUEUE_ID");
- } catch (Exception e) {
- }
- conn.commit();
- conn.close();
}
/**
@@ -174,25 +188,34 @@ public class TxnDbUtil {
*/
public static int countLockComponents(long lockId) throws Exception {
Connection conn = getConnection();
- Statement s = conn.createStatement();
- ResultSet rs = s.executeQuery("select count(*) from hive_locks where " +
- "hl_lock_ext_id = " + lockId);
- if (!rs.next()) return 0;
- int rc = rs.getInt(1);
- conn.rollback();
- conn.close();
- return rc;
+ try {
+ Statement s = conn.createStatement();
+ ResultSet rs = s.executeQuery("select count(*) from hive_locks where hl_lock_ext_id = " +
+ lockId);
+ if (!rs.next()) return 0;
+ int rc = rs.getInt(1);
+ return rc;
+ } finally {
+ conn.rollback();
+ conn.close();
+ }
}
public static int findNumCurrentLocks() throws Exception {
- Connection conn = getConnection();
- Statement s = conn.createStatement();
- ResultSet rs = s.executeQuery("select count(*) from hive_locks");
- if (!rs.next()) return 0;
- int rc = rs.getInt(1);
- conn.rollback();
- conn.close();
- return rc;
+ Connection conn = null;
+ try {
+ conn = getConnection();
+ Statement s = conn.createStatement();
+ ResultSet rs = s.executeQuery("select count(*) from hive_locks");
+ if (!rs.next()) return 0;
+ int rc = rs.getInt(1);
+ return rc;
+ } finally {
+ if (conn != null) {
+ conn.rollback();
+ conn.close();
+ }
+ }
}
private static Connection getConnection() throws Exception {
Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Tue Sep 2 19:56:56 2014
@@ -40,6 +40,7 @@ import javax.sql.DataSource;
import java.io.IOException;
import java.sql.*;
import java.util.*;
+import java.util.concurrent.TimeUnit;
/**
* A handler to answer transaction related calls that come into the metastore
@@ -119,7 +120,7 @@ public class TxnHandler {
throw new RuntimeException(e);
}
- timeout = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT) * 1000;
+ timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS);
deadlockCnt = 0;
buildJumpTable();
}
Modified: hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java (original)
+++ hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java Tue Sep 2 19:56:56 2014
@@ -24,6 +24,7 @@ import java.util.Map;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.Database;
@@ -42,6 +43,7 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -712,11 +714,16 @@ public class DummyRawStoreControlledComm
}
@Override
- public List<ColumnStatisticsObj> get_aggr_stats_for(String dbName,
+ public AggrStats get_aggr_stats_for(String dbName,
String tblName, List<String> partNames, List<String> colNames)
throws MetaException {
return null;
}
+ @Override
+ public boolean updatePartitionColumnStatistics(SetPartitionsStatsRequest request)
+ throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+ return objectStore.updatePartitionColumnStatistics(request);
+ }
}
Modified: hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java (original)
+++ hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java Tue Sep 2 19:56:56 2014
@@ -25,6 +25,7 @@ import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.Database;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
@@ -729,11 +731,17 @@ public class DummyRawStoreForJdoConnecti
}
@Override
- public List<ColumnStatisticsObj> get_aggr_stats_for(String dbName,
+ public AggrStats get_aggr_stats_for(String dbName,
String tblName, List<String> partNames, List<String> colNames)
throws MetaException {
return null;
}
+
+ @Override
+ public boolean updatePartitionColumnStatistics(SetPartitionsStatsRequest request)
+ throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+ return false;
+ }
}
Modified: hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java (original)
+++ hive/branches/tez/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java Tue Sep 2 19:56:56 2014
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hive.metastore.txn;
-import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -26,11 +25,11 @@ import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.junit.After;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import static junit.framework.Assert.*;
@@ -868,7 +867,7 @@ public class TestTxnHandler {
@Test
public void testHeartbeatLock() throws Exception {
- conf.setIntVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1);
+ conf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.SECONDS);
HeartbeatRequest h = new HeartbeatRequest();
LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
Modified: hive/branches/tez/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/pom.xml?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/pom.xml (original)
+++ hive/branches/tez/pom.xml Tue Sep 2 19:56:56 2014
@@ -60,6 +60,7 @@
<maven.repo.local>${settings.localRepository}</maven.repo.local>
<hive.path.to.root>.</hive.path.to.root>
<hive.jdbc.driver.classifier>standalone</hive.jdbc.driver.classifier>
+ <checkstyle.conf.dir>${hive.path.to.root}/checkstyle</checkstyle.conf.dir>
<!-- Test Properties -->
<test.extra.path></test.extra.path>
@@ -75,6 +76,7 @@
<datanucleus.maven.plugin.version>3.3.0-release</datanucleus.maven.plugin.version>
<maven.antrun.plugin.version>1.7</maven.antrun.plugin.version>
<maven.assembly.plugin.version>2.3</maven.assembly.plugin.version>
+ <maven.checkstyle.plugin.version>2.12.1</maven.checkstyle.plugin.version>
<maven.compiler.plugin.version>3.1</maven.compiler.plugin.version>
<maven.enforcer.plugin.version>1.3.1</maven.enforcer.plugin.version>
<maven.install.plugin.version>2.4</maven.install.plugin.version>
@@ -616,6 +618,11 @@
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <version>${maven.checkstyle.plugin.version}</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<version>${maven.enforcer.plugin.version}</version>
</plugin>
@@ -745,6 +752,13 @@
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <configuration>
+ <configLocation>${checkstyle.conf.dir}/checkstyle.xml</configLocation>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes>
Modified: hive/branches/tez/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java (original)
+++ hive/branches/tez/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java Tue Sep 2 19:56:56 2014
@@ -3735,6 +3735,515 @@ public final class OrcProto {
// @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics)
}
+ public interface TimestampStatisticsOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional sint64 minimum = 1;
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ boolean hasMinimum();
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ long getMinimum();
+
+ // optional sint64 maximum = 2;
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ boolean hasMaximum();
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ long getMaximum();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.TimestampStatistics}
+ */
+ public static final class TimestampStatistics extends
+ com.google.protobuf.GeneratedMessage
+ implements TimestampStatisticsOrBuilder {
+ // Use TimestampStatistics.newBuilder() to construct.
+ private TimestampStatistics(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TimestampStatistics(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TimestampStatistics defaultInstance;
+ public static TimestampStatistics getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TimestampStatistics getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TimestampStatistics(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ minimum_ = input.readSInt64();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ maximum_ = input.readSInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<TimestampStatistics> PARSER =
+ new com.google.protobuf.AbstractParser<TimestampStatistics>() {
+ public TimestampStatistics parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TimestampStatistics(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<TimestampStatistics> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional sint64 minimum = 1;
+ public static final int MINIMUM_FIELD_NUMBER = 1;
+ private long minimum_;
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public boolean hasMinimum() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public long getMinimum() {
+ return minimum_;
+ }
+
+ // optional sint64 maximum = 2;
+ public static final int MAXIMUM_FIELD_NUMBER = 2;
+ private long maximum_;
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public boolean hasMaximum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public long getMaximum() {
+ return maximum_;
+ }
+
+ private void initFields() {
+ minimum_ = 0L;
+ maximum_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeSInt64(1, minimum_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeSInt64(2, maximum_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeSInt64Size(1, minimum_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeSInt64Size(2, maximum_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.TimestampStatistics}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ minimum_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ maximum_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ }
+
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics build() {
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics buildPartial() {
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics result = new org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.minimum_ = minimum_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.maximum_ = maximum_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics) {
+ return mergeFrom((org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics other) {
+ if (other == org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance()) return this;
+ if (other.hasMinimum()) {
+ setMinimum(other.getMinimum());
+ }
+ if (other.hasMaximum()) {
+ setMaximum(other.getMaximum());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional sint64 minimum = 1;
+ private long minimum_ ;
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public boolean hasMinimum() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public long getMinimum() {
+ return minimum_;
+ }
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public Builder setMinimum(long value) {
+ bitField0_ |= 0x00000001;
+ minimum_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional sint64 minimum = 1;</code>
+ *
+ * <pre>
+ * min,max values saved as milliseconds since epoch
+ * </pre>
+ */
+ public Builder clearMinimum() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ minimum_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional sint64 maximum = 2;
+ private long maximum_ ;
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public boolean hasMaximum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public long getMaximum() {
+ return maximum_;
+ }
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public Builder setMaximum(long value) {
+ bitField0_ |= 0x00000002;
+ maximum_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional sint64 maximum = 2;</code>
+ */
+ public Builder clearMaximum() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ maximum_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.TimestampStatistics)
+ }
+
+ static {
+ defaultInstance = new TimestampStatistics(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.TimestampStatistics)
+ }
+
public interface BinaryStatisticsOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -4273,6 +4782,20 @@ public final class OrcProto {
* <code>optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8;</code>
*/
org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder getBinaryStatisticsOrBuilder();
+
+ // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ boolean hasTimestampStatistics();
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics();
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder();
}
/**
* Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.ColumnStatistics}
@@ -4421,6 +4944,19 @@ public final class OrcProto {
bitField0_ |= 0x00000080;
break;
}
+ case 74: {
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ subBuilder = timestampStatistics_.toBuilder();
+ }
+ timestampStatistics_ = input.readMessage(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(timestampStatistics_);
+ timestampStatistics_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000100;
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4631,6 +5167,28 @@ public final class OrcProto {
return binaryStatistics_;
}
+ // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ public static final int TIMESTAMPSTATISTICS_FIELD_NUMBER = 9;
+ private org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics timestampStatistics_;
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public boolean hasTimestampStatistics() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics() {
+ return timestampStatistics_;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder() {
+ return timestampStatistics_;
+ }
+
private void initFields() {
numberOfValues_ = 0L;
intStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.IntegerStatistics.getDefaultInstance();
@@ -4640,6 +5198,7 @@ public final class OrcProto {
decimalStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.getDefaultInstance();
dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance();
binaryStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance();
+ timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -4677,6 +5236,9 @@ public final class OrcProto {
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeMessage(8, binaryStatistics_);
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeMessage(9, timestampStatistics_);
+ }
getUnknownFields().writeTo(output);
}
@@ -4718,6 +5280,10 @@ public final class OrcProto {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, binaryStatistics_);
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(9, timestampStatistics_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -4833,6 +5399,7 @@ public final class OrcProto {
getDecimalStatisticsFieldBuilder();
getDateStatisticsFieldBuilder();
getBinaryStatisticsFieldBuilder();
+ getTimestampStatisticsFieldBuilder();
}
}
private static Builder create() {
@@ -4885,6 +5452,12 @@ public final class OrcProto {
binaryStatisticsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000080);
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ } else {
+ timestampStatisticsBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000100);
return this;
}
@@ -4973,6 +5546,14 @@ public final class OrcProto {
} else {
result.binaryStatistics_ = binaryStatisticsBuilder_.build();
}
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ if (timestampStatisticsBuilder_ == null) {
+ result.timestampStatistics_ = timestampStatistics_;
+ } else {
+ result.timestampStatistics_ = timestampStatisticsBuilder_.build();
+ }
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -5013,6 +5594,9 @@ public final class OrcProto {
if (other.hasBinaryStatistics()) {
mergeBinaryStatistics(other.getBinaryStatistics());
}
+ if (other.hasTimestampStatistics()) {
+ mergeTimestampStatistics(other.getTimestampStatistics());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -5892,6 +6476,123 @@ public final class OrcProto {
return binaryStatisticsBuilder_;
}
+ // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;
+ private org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder> timestampStatisticsBuilder_;
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public boolean hasTimestampStatistics() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics() {
+ if (timestampStatisticsBuilder_ == null) {
+ return timestampStatistics_;
+ } else {
+ return timestampStatisticsBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public Builder setTimestampStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics value) {
+ if (timestampStatisticsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ timestampStatistics_ = value;
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public Builder setTimestampStatistics(
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder builderForValue) {
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatistics_ = builderForValue.build();
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public Builder mergeTimestampStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics value) {
+ if (timestampStatisticsBuilder_ == null) {
+ if (((bitField0_ & 0x00000100) == 0x00000100) &&
+ timestampStatistics_ != org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance()) {
+ timestampStatistics_ =
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.newBuilder(timestampStatistics_).mergeFrom(value).buildPartial();
+ } else {
+ timestampStatistics_ = value;
+ }
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000100;
+ return this;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public Builder clearTimestampStatistics() {
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance();
+ onChanged();
+ } else {
+ timestampStatisticsBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000100);
+ return this;
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder getTimestampStatisticsBuilder() {
+ bitField0_ |= 0x00000100;
+ onChanged();
+ return getTimestampStatisticsFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder() {
+ if (timestampStatisticsBuilder_ != null) {
+ return timestampStatisticsBuilder_.getMessageOrBuilder();
+ } else {
+ return timestampStatistics_;
+ }
+ }
+ /**
+ * <code>optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder>
+ getTimestampStatisticsFieldBuilder() {
+ if (timestampStatisticsBuilder_ == null) {
+ timestampStatisticsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder>(
+ timestampStatistics_,
+ getParentForChildren(),
+ isClean());
+ timestampStatistics_ = null;
+ }
+ return timestampStatisticsBuilder_;
+ }
+
// @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.ColumnStatistics)
}
@@ -16654,6 +17355,11 @@ public final class OrcProto {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -16742,74 +17448,78 @@ public final class OrcProto {
"nt\030\001 \003(\004B\002\020\001\"B\n\021DecimalStatistics\022\017\n\007min" +
"imum\030\001 \001(\t\022\017\n\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t" +
"\"2\n\016DateStatistics\022\017\n\007minimum\030\001 \001(\021\022\017\n\007m",
- "aximum\030\002 \001(\021\"\037\n\020BinaryStatistics\022\013\n\003sum\030" +
- "\001 \001(\022\"\310\004\n\020ColumnStatistics\022\026\n\016numberOfVa" +
- "lues\030\001 \001(\004\022J\n\rintStatistics\030\002 \001(\01323.org." +
- "apache.hadoop.hive.ql.io.orc.IntegerStat" +
- "istics\022L\n\020doubleStatistics\030\003 \001(\01322.org.a" +
- "pache.hadoop.hive.ql.io.orc.DoubleStatis" +
- "tics\022L\n\020stringStatistics\030\004 \001(\01322.org.apa" +
- "che.hadoop.hive.ql.io.orc.StringStatisti" +
- "cs\022L\n\020bucketStatistics\030\005 \001(\01322.org.apach" +
- "e.hadoop.hive.ql.io.orc.BucketStatistics",
- "\022N\n\021decimalStatistics\030\006 \001(\01323.org.apache" +
- ".hadoop.hive.ql.io.orc.DecimalStatistics" +
- "\022H\n\016dateStatistics\030\007 \001(\01320.org.apache.ha" +
- "doop.hive.ql.io.orc.DateStatistics\022L\n\020bi" +
- "naryStatistics\030\010 \001(\01322.org.apache.hadoop" +
- ".hive.ql.io.orc.BinaryStatistics\"n\n\rRowI" +
- "ndexEntry\022\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstat" +
- "istics\030\002 \001(\01322.org.apache.hadoop.hive.ql" +
- ".io.orc.ColumnStatistics\"J\n\010RowIndex\022>\n\005" +
- "entry\030\001 \003(\0132/.org.apache.hadoop.hive.ql.",
- "io.orc.RowIndexEntry\"\331\001\n\006Stream\022;\n\004kind\030" +
- "\001 \002(\0162-.org.apache.hadoop.hive.ql.io.orc" +
- ".Stream.Kind\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003" +
- " \001(\004\"r\n\004Kind\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006L" +
- "ENGTH\020\002\022\023\n\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONA" +
- "RY_COUNT\020\004\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006" +
- "\"\263\001\n\016ColumnEncoding\022C\n\004kind\030\001 \002(\01625.org." +
- "apache.hadoop.hive.ql.io.orc.ColumnEncod" +
- "ing.Kind\022\026\n\016dictionarySize\030\002 \001(\r\"D\n\004Kind" +
- "\022\n\n\006DIRECT\020\000\022\016\n\nDICTIONARY\020\001\022\r\n\tDIRECT_V",
- "2\020\002\022\021\n\rDICTIONARY_V2\020\003\"\214\001\n\014StripeFooter\022" +
- "9\n\007streams\030\001 \003(\0132(.org.apache.hadoop.hiv" +
- "e.ql.io.orc.Stream\022A\n\007columns\030\002 \003(\01320.or" +
- "g.apache.hadoop.hive.ql.io.orc.ColumnEnc" +
- "oding\"\370\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apach" +
- "e.hadoop.hive.ql.io.orc.Type.Kind\022\024\n\010sub" +
- "types\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\022\025\n\rm" +
- "aximumLength\030\004 \001(\r\022\021\n\tprecision\030\005 \001(\r\022\r\n" +
- "\005scale\030\006 \001(\r\"\321\001\n\004Kind\022\013\n\007BOOLEAN\020\000\022\010\n\004BY" +
- "TE\020\001\022\t\n\005SHORT\020\002\022\007\n\003INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FL",
- "OAT\020\005\022\n\n\006DOUBLE\020\006\022\n\n\006STRING\020\007\022\n\n\006BINARY\020" +
- "\010\022\r\n\tTIMESTAMP\020\t\022\010\n\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006S" +
- "TRUCT\020\014\022\t\n\005UNION\020\r\022\013\n\007DECIMAL\020\016\022\010\n\004DATE\020" +
- "\017\022\013\n\007VARCHAR\020\020\022\010\n\004CHAR\020\021\"x\n\021StripeInform" +
- "ation\022\016\n\006offset\030\001 \001(\004\022\023\n\013indexLength\030\002 \001" +
- "(\004\022\022\n\ndataLength\030\003 \001(\004\022\024\n\014footerLength\030\004" +
- " \001(\004\022\024\n\014numberOfRows\030\005 \001(\004\"/\n\020UserMetada" +
- "taItem\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"X\n\020S" +
- "tripeStatistics\022D\n\010colStats\030\001 \003(\01322.org." +
- "apache.hadoop.hive.ql.io.orc.ColumnStati",
- "stics\"S\n\010Metadata\022G\n\013stripeStats\030\001 \003(\01322" +
- ".org.apache.hadoop.hive.ql.io.orc.Stripe" +
- "Statistics\"\356\002\n\006Footer\022\024\n\014headerLength\030\001 " +
- "\001(\004\022\025\n\rcontentLength\030\002 \001(\004\022D\n\007stripes\030\003 " +
- "\003(\01323.org.apache.hadoop.hive.ql.io.orc.S" +
- "tripeInformation\0225\n\005types\030\004 \003(\0132&.org.ap" +
- "ache.hadoop.hive.ql.io.orc.Type\022D\n\010metad" +
- "ata\030\005 \003(\01322.org.apache.hadoop.hive.ql.io" +
- ".orc.UserMetadataItem\022\024\n\014numberOfRows\030\006 " +
- "\001(\004\022F\n\nstatistics\030\007 \003(\01322.org.apache.had",
- "oop.hive.ql.io.orc.ColumnStatistics\022\026\n\016r" +
- "owIndexStride\030\010 \001(\r\"\305\001\n\nPostScript\022\024\n\014fo" +
- "oterLength\030\001 \001(\004\022F\n\013compression\030\002 \001(\01621." +
- "org.apache.hadoop.hive.ql.io.orc.Compres" +
- "sionKind\022\034\n\024compressionBlockSize\030\003 \001(\004\022\023" +
- "\n\007version\030\004 \003(\rB\002\020\001\022\026\n\016metadataLength\030\005 " +
- "\001(\004\022\016\n\005magic\030\300> \001(\t*:\n\017CompressionKind\022\010" +
- "\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
+ "aximum\030\002 \001(\021\"7\n\023TimestampStatistics\022\017\n\007m" +
+ "inimum\030\001 \001(\022\022\017\n\007maximum\030\002 \001(\022\"\037\n\020BinaryS" +
+ "tatistics\022\013\n\003sum\030\001 \001(\022\"\234\005\n\020ColumnStatist" +
+ "ics\022\026\n\016numberOfValues\030\001 \001(\004\022J\n\rintStatis" +
+ "tics\030\002 \001(\01323.org.apache.hadoop.hive.ql.i" +
+ "o.orc.IntegerStatistics\022L\n\020doubleStatist" +
+ "ics\030\003 \001(\01322.org.apache.hadoop.hive.ql.io" +
+ ".orc.DoubleStatistics\022L\n\020stringStatistic" +
+ "s\030\004 \001(\01322.org.apache.hadoop.hive.ql.io.o" +
+ "rc.StringStatistics\022L\n\020bucketStatistics\030",
+ "\005 \001(\01322.org.apache.hadoop.hive.ql.io.orc" +
+ ".BucketStatistics\022N\n\021decimalStatistics\030\006" +
+ " \001(\01323.org.apache.hadoop.hive.ql.io.orc." +
+ "DecimalStatistics\022H\n\016dateStatistics\030\007 \001(" +
+ "\01320.org.apache.hadoop.hive.ql.io.orc.Dat" +
+ "eStatistics\022L\n\020binaryStatistics\030\010 \001(\01322." +
+ "org.apache.hadoop.hive.ql.io.orc.BinaryS" +
+ "tatistics\022R\n\023timestampStatistics\030\t \001(\01325" +
+ ".org.apache.hadoop.hive.ql.io.orc.Timest" +
+ "ampStatistics\"n\n\rRowIndexEntry\022\025\n\tpositi",
+ "ons\030\001 \003(\004B\002\020\001\022F\n\nstatistics\030\002 \001(\01322.org." +
+ "apache.hadoop.hive.ql.io.orc.ColumnStati" +
+ "stics\"J\n\010RowIndex\022>\n\005entry\030\001 \003(\0132/.org.a" +
+ "pache.hadoop.hive.ql.io.orc.RowIndexEntr" +
+ "y\"\331\001\n\006Stream\022;\n\004kind\030\001 \002(\0162-.org.apache." +
+ "hadoop.hive.ql.io.orc.Stream.Kind\022\016\n\006col" +
+ "umn\030\002 \001(\r\022\016\n\006length\030\003 \001(\004\"r\n\004Kind\022\013\n\007PRE" +
+ "SENT\020\000\022\010\n\004DATA\020\001\022\n\n\006LENGTH\020\002\022\023\n\017DICTIONA" +
+ "RY_DATA\020\003\022\024\n\020DICTIONARY_COUNT\020\004\022\r\n\tSECON" +
+ "DARY\020\005\022\r\n\tROW_INDEX\020\006\"\263\001\n\016ColumnEncoding",
+ "\022C\n\004kind\030\001 \002(\01625.org.apache.hadoop.hive." +
+ "ql.io.orc.ColumnEncoding.Kind\022\026\n\016diction" +
+ "arySize\030\002 \001(\r\"D\n\004Kind\022\n\n\006DIRECT\020\000\022\016\n\nDIC" +
+ "TIONARY\020\001\022\r\n\tDIRECT_V2\020\002\022\021\n\rDICTIONARY_V" +
+ "2\020\003\"\214\001\n\014StripeFooter\0229\n\007streams\030\001 \003(\0132(." +
+ "org.apache.hadoop.hive.ql.io.orc.Stream\022" +
+ "A\n\007columns\030\002 \003(\01320.org.apache.hadoop.hiv" +
+ "e.ql.io.orc.ColumnEncoding\"\370\002\n\004Type\0229\n\004k" +
+ "ind\030\001 \002(\0162+.org.apache.hadoop.hive.ql.io" +
+ ".orc.Type.Kind\022\024\n\010subtypes\030\002 \003(\rB\002\020\001\022\022\n\n",
+ "fieldNames\030\003 \003(\t\022\025\n\rmaximumLength\030\004 \001(\r\022" +
+ "\021\n\tprecision\030\005 \001(\r\022\r\n\005scale\030\006 \001(\r\"\321\001\n\004Ki" +
+ "nd\022\013\n\007BOOLEAN\020\000\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002\022\007\n\003" +
+ "INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE\020\006\022\n" +
+ "\n\006STRING\020\007\022\n\n\006BINARY\020\010\022\r\n\tTIMESTAMP\020\t\022\010\n" +
+ "\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006STRUCT\020\014\022\t\n\005UNION\020\r\022" +
+ "\013\n\007DECIMAL\020\016\022\010\n\004DATE\020\017\022\013\n\007VARCHAR\020\020\022\010\n\004C" +
+ "HAR\020\021\"x\n\021StripeInformation\022\016\n\006offset\030\001 \001" +
+ "(\004\022\023\n\013indexLength\030\002 \001(\004\022\022\n\ndataLength\030\003 " +
+ "\001(\004\022\024\n\014footerLength\030\004 \001(\004\022\024\n\014numberOfRow",
+ "s\030\005 \001(\004\"/\n\020UserMetadataItem\022\014\n\004name\030\001 \002(" +
+ "\t\022\r\n\005value\030\002 \002(\014\"X\n\020StripeStatistics\022D\n\010" +
+ "colStats\030\001 \003(\01322.org.apache.hadoop.hive." +
+ "ql.io.orc.ColumnStatistics\"S\n\010Metadata\022G" +
+ "\n\013stripeStats\030\001 \003(\01322.org.apache.hadoop." +
+ "hive.ql.io.orc.StripeStatistics\"\356\002\n\006Foot" +
+ "er\022\024\n\014headerLength\030\001 \001(\004\022\025\n\rcontentLengt" +
+ "h\030\002 \001(\004\022D\n\007stripes\030\003 \003(\01323.org.apache.ha" +
+ "doop.hive.ql.io.orc.StripeInformation\0225\n" +
+ "\005types\030\004 \003(\0132&.org.apache.hadoop.hive.ql",
+ ".io.orc.Type\022D\n\010metadata\030\005 \003(\01322.org.apa" +
+ "che.hadoop.hive.ql.io.orc.UserMetadataIt" +
+ "em\022\024\n\014numberOfRows\030\006 \001(\004\022F\n\nstatistics\030\007" +
+ " \003(\01322.org.apache.hadoop.hive.ql.io.orc." +
+ "ColumnStatistics\022\026\n\016rowIndexStride\030\010 \001(\r" +
+ "\"\305\001\n\nPostScript\022\024\n\014footerLength\030\001 \001(\004\022F\n" +
+ "\013compression\030\002 \001(\01621.org.apache.hadoop.h" +
+ "ive.ql.io.orc.CompressionKind\022\034\n\024compres" +
+ "sionBlockSize\030\003 \001(\004\022\023\n\007version\030\004 \003(\rB\002\020\001" +
+ "\022\026\n\016metadataLength\030\005 \001(\004\022\016\n\005magic\030\300> \001(\t",
+ "*:\n\017CompressionKind\022\010\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022" +
+ "\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -16852,86 +17562,92 @@ public final class OrcProto {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor,
new java.lang.String[] { "Minimum", "Maximum", });
- internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor =
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor =
getDescriptor().getMessageTypes().get(6);
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor,
+ new java.lang.String[] { "Minimum", "Maximum", });
+ internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor =
+ getDescriptor().getMessageTypes().get(7);
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor,
new java.lang.String[] { "Sum", });
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor,
- new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", "BinaryStatistics", });
+ new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", "BinaryStatistics", "TimestampStatistics", });
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor,
new java.lang.String[] { "Positions", "Statistics", });
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor,
new java.lang.String[] { "Entry", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor,
new java.lang.String[] { "Kind", "Column", "Length", });
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor,
new java.lang.String[] { "Kind", "DictionarySize", });
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor,
new java.lang.String[] { "Streams", "Columns", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_org_apache_hadoop_hive_ql_io_orc_Type_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor,
new java.lang.String[] { "Kind", "Subtypes", "FieldNames", "MaximumLength", "Precision", "Scale", });
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor,
new java.lang.String[] { "Offset", "IndexLength", "DataLength", "FooterLength", "NumberOfRows", });
internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_descriptor =
- getDescriptor().getMessageTypes().get(16);
+ getDescriptor().getMessageTypes().get(17);
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_descriptor,
new java.lang.String[] { "ColStats", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_descriptor =
- getDescriptor().getMessageTypes().get(17);
+ getDescriptor().getMessageTypes().get(18);
internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_descriptor,
new java.lang.String[] { "StripeStats", });
internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(19);
internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor,
new java.lang.String[] { "HeaderLength", "ContentLength", "Stripes", "Types", "Metadata", "NumberOfRows", "Statistics", "RowIndexStride", });
internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(20);
internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor,
Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/AutoProgressor.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/AutoProgressor.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/AutoProgressor.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/AutoProgressor.java Tue Sep 2 19:56:56 2014
@@ -44,7 +44,7 @@ public class AutoProgressor {
// Name of the class to report for
String logClassName = null;
int notificationInterval;
- int timeout;
+ long timeout;
Reporter reporter;
class ReporterTask extends TimerTask {
@@ -116,7 +116,7 @@ public class AutoProgressor {
* @param timeout - when the autoprogressor should stop reporting (in ms)
*/
AutoProgressor(String logClassName, Reporter reporter,
- int notificationInterval, int timeout) {
+ int notificationInterval, long timeout) {
this.logClassName = logClassName;
this.reporter = reporter;
this.notificationInterval = notificationInterval;
Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java Tue Sep 2 19:56:56 2014
@@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
@@ -342,9 +343,7 @@ public class ColumnStatsTask extends Tas
// Construct a column statistics object from the result
List<ColumnStatistics> colStats = constructColumnStatsFromPackedRows();
// Persist the column statistics object to the metastore
- for (ColumnStatistics colStat : colStats) {
- db.updatePartitionColumnStatistics(colStat);
- }
+ db.setPartitionColumnStatistics(new SetPartitionsStatsRequest(colStats));
return 0;
}
Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Tue Sep 2 19:56:56 2014
@@ -507,21 +507,19 @@ public class DDLTask extends Task<DDLWor
throw new HiveException("invalid configuration name " + showConf.getConfName());
}
String description = conf.getDescription();
- String defaltValue = conf.getDefaultValue();
+ String defaultValue = conf.getDefaultValue();
DataOutputStream output = getOutputStream(showConf.getResFile());
try {
+ if (defaultValue != null) {
+ output.write(defaultValue.getBytes());
+ }
+ output.write(separator);
+ output.write(conf.typeString().getBytes());
+ output.write(separator);
if (description != null) {
- if (defaltValue != null) {
- output.write(defaltValue.getBytes());
- }
- output.write(separator);
- output.write(conf.typeString().getBytes());
- output.write(separator);
- if (description != null) {
- output.write(description.replaceAll(" *\n *", " ").getBytes());
- }
- output.write(terminator);
+ output.write(description.replaceAll(" *\n *", " ").getBytes());
}
+ output.write(terminator);
} finally {
output.close();
}
Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java Tue Sep 2 19:56:56 2014
@@ -92,8 +92,8 @@ public class FileSinkOperator extends Te
protected transient ListBucketingCtx lbCtx;
protected transient boolean isSkewedStoredAsSubDirectories;
protected transient boolean statsCollectRawDataSize;
- private transient boolean[] statsFromRecordWriter;
- private transient boolean isCollectRWStats;
+ protected transient boolean[] statsFromRecordWriter;
+ protected transient boolean isCollectRWStats;
private transient FSPaths prevFsp;
private transient FSPaths fpaths;
private transient ObjectInspector keyOI;
@@ -626,7 +626,7 @@ public class FileSinkOperator extends Te
}
}
- private boolean areAllTrue(boolean[] statsFromRW) {
+ protected boolean areAllTrue(boolean[] statsFromRW) {
for(boolean b : statsFromRW) {
if (!b) {
return false;
Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/Heartbeater.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/Heartbeater.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/Heartbeater.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/Heartbeater.java Tue Sep 2 19:56:56 2014
@@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.lockmgr
import org.apache.hadoop.hive.ql.lockmgr.LockException;
import java.io.IOException;
+import java.util.concurrent.TimeUnit;
/**
* Class to handle heartbeats for MR and Tez tasks.
@@ -64,7 +65,8 @@ public class Heartbeater {
if (heartbeatInterval == 0) {
// Multiply the heartbeat interval by 1000 to convert to milliseconds,
// but divide by 2 to give us a safety factor.
- heartbeatInterval = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT) * 500;
+ heartbeatInterval = HiveConf.getTimeVar(
+ conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) / 2;
if (heartbeatInterval == 0) {
LOG.warn(HiveConf.ConfVars.HIVE_TXN_MANAGER.toString() + " not set, heartbeats won't be sent");
dontHeartbeat = true;
Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/HiveTotalOrderPartitioner.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/HiveTotalOrderPartitioner.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/HiveTotalOrderPartitioner.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/HiveTotalOrderPartitioner.java Tue Sep 2 19:56:56 2014
@@ -20,24 +20,50 @@
package org.apache.hadoop.hive.ql.exec;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.io.HiveKey;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
import org.apache.hadoop.mapred.lib.TotalOrderPartitioner;
-public class HiveTotalOrderPartitioner implements Partitioner<HiveKey, Object> {
+public class HiveTotalOrderPartitioner implements Partitioner<HiveKey, Object>, Configurable {
- private Partitioner<BytesWritable, Object> partitioner
- = new TotalOrderPartitioner<BytesWritable, Object>();
+ private static final Log LOG = LogFactory.getLog(HiveTotalOrderPartitioner.class);
+ private Partitioner<BytesWritable, Object> partitioner;
+
+ @Override
public void configure(JobConf job) {
- JobConf newconf = new JobConf(job);
- newconf.setMapOutputKeyClass(BytesWritable.class);
- partitioner.configure(newconf);
+ if (partitioner == null) {
+ configurePartitioner(new JobConf(job));
+ }
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ // walk-around of TEZ-1403
+ if (partitioner == null) {
+ configurePartitioner(new JobConf(conf));
+ }
}
public int getPartition(HiveKey key, Object value, int numPartitions) {
return partitioner.getPartition(key, value, numPartitions);
}
+
+ @Override
+ public Configuration getConf() {
+ return null;
+ }
+
+ private void configurePartitioner(JobConf conf) {
+ LOG.info(TotalOrderPartitioner.getPartitionFile(conf));
+ conf.setMapOutputKeyClass(BytesWritable.class);
+ partitioner = new TotalOrderPartitioner<BytesWritable, Object>();
+ partitioner.configure(conf);
+ }
}
Modified: hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hive/branches/tez/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Tue Sep 2 19:56:56 2014
@@ -348,17 +348,17 @@ public class MoveTask extends Task<MoveW
// want to isolate any potential issue it may introduce.
ArrayList<LinkedHashMap<String, String>> dp =
db.loadDynamicPartitions(
- tbd.getSourcePath(),
- tbd.getTable().getTableName(),
- tbd.getPartitionSpec(),
- tbd.getReplace(),
- dpCtx.getNumDPCols(),
- tbd.getHoldDDLTime(),
- isSkewedStoredAsDirs(tbd));
+ tbd.getSourcePath(),
+ tbd.getTable().getTableName(),
+ tbd.getPartitionSpec(),
+ tbd.getReplace(),
+ dpCtx.getNumDPCols(),
+ tbd.getHoldDDLTime(),
+ isSkewedStoredAsDirs(tbd));
if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) {
throw new HiveException("This query creates no partitions." +
- " To turn off this error, set hive.error.on.empty.partition=false.");
+ " To turn off this error, set hive.error.on.empty.partition=false.");
}
// for each partition spec, get the partition
@@ -412,13 +412,13 @@ public class MoveTask extends Task<MoveW
numBuckets, sortCols);
}
- dc = new DataContainer(table.getTTable(), partn.getTPartition());
- // add this partition to post-execution hook
- if (work.getOutputs() != null) {
- work.getOutputs().add(new WriteEntity(partn,
+ dc = new DataContainer(table.getTTable(), partn.getTPartition());
+ // add this partition to post-execution hook
+ if (work.getOutputs() != null) {
+ work.getOutputs().add(new WriteEntity(partn,
(tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE
: WriteEntity.WriteType.INSERT)));
- }
+ }
}
}
if (SessionState.get() != null && dc != null) {