You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2013/11/12 19:23:14 UTC

svn commit: r1541190 [2/15] - in /hive/branches/tez: ./ ant/src/org/apache/hadoop/hive/ant/ beeline/ beeline/src/java/org/apache/hive/beeline/ cli/ cli/src/java/org/apache/hadoop/hive/cli/ common/ common/src/java/org/apache/hadoop/hive/common/ common/s...

Modified: hive/branches/tez/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java (original)
+++ hive/branches/tez/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java Tue Nov 12 18:23:05 2013
@@ -54,7 +54,14 @@ public class TestHiveDecimal {
     dec = HiveDecimal.create("178613588865784752580332404014434337809799306448796128931113691624");
     Assert.assertNull(dec);
   }
-
+  
+  @Test
+  public void testTrailingZeroRemovalAfterEnforcement() {
+    String decStr = "8.0900000000000000000000000000000123456";
+    HiveDecimal dec = HiveDecimal.create(decStr);
+    Assert.assertEquals("8.09", dec.toString());
+  }
+  
   @Test
   public void testMultiply() {
     HiveDecimal dec1 = HiveDecimal.create("0.1786135888657847525803");

Modified: hive/branches/tez/conf/hive-default.xml.template
URL: http://svn.apache.org/viewvc/hive/branches/tez/conf/hive-default.xml.template?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/conf/hive-default.xml.template (original)
+++ hive/branches/tez/conf/hive-default.xml.template Tue Nov 12 18:23:05 2013
@@ -1799,6 +1799,16 @@
 
 
 <property>
+  <name>hive.fetch.task.conversion.threshold</name>
+  <value>-1</value>
+  <description>
+    Input threshold for applying hive.fetch.task.conversion. If target table is native, input length
+    is calculated by summation of file lengths. If it's not native, storage handler for the table
+    can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface.
+  </description>
+</property>
+
+<property>
   <name>hive.hmshandler.retry.attempts</name>
   <value>1</value>
   <description>The number of times to retry a HMSHandler call if there were a connection error</description>
@@ -1897,18 +1907,32 @@
 
 <property>
   <name>hive.server2.async.exec.threads</name>
-  <value>50</value>
+  <value>100</value>
   <description>Number of threads in the async thread pool for HiveServer2</description>
 </property>
 
 <property>
   <name>hive.server2.async.exec.shutdown.timeout</name>
   <value>10</value>
-  <description>Time (in seconds) for which HiveServer2 shutdown will wait for async 
+  <description>Time (in seconds) for which HiveServer2 shutdown will wait for async
   threads to terminate</description>
 </property>
 
 <property>
+  <name>hive.server2.async.exec.keepalive.time</name>
+  <value>10</value>
+  <description>Time (in seconds) that an idle HiveServer2 async thread (from the thread pool) will wait
+  for a new task to arrive before terminating</description>
+</property>
+
+<property>
+  <name>hive.server2.async.exec.wait.queue.size</name>
+  <value>100</value>
+  <description>Size of the wait queue for async thread pool in HiveServer2.
+  After hitting this limit, the async thread pool will reject new requests.</description>
+</property>
+
+<property>
   <name>hive.server2.thrift.port</name>
   <value>10000</value>
   <description>Port number of HiveServer2 Thrift interface.

Modified: hive/branches/tez/contrib/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/contrib/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/contrib/pom.xml (original)
+++ hive/branches/tez/contrib/pom.xml Tue Nov 12 18:23:05 2013
@@ -47,7 +47,6 @@
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-shims</artifactId>
       <version>${project.version}</version>
-      <classifier>uberjar</classifier>
     </dependency>
     <!-- inter-project -->
     <dependency>

Modified: hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java (original)
+++ hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java Tue Nov 12 18:23:05 2013
@@ -172,11 +172,6 @@ public class Base64TextInputFormat imple
     return format.getSplits(job, numSplits);
   }
 
-  // Cannot put @Override here because hadoop 0.18+ removed this method.
-  public void validateInput(JobConf job) throws IOException {
-    ShimLoader.getHadoopShims().inputFormatValidateInput(format, job);
-  }
-
   /**
    * Workaround an incompatible change from commons-codec 1.3 to 1.4.
    * Since Hadoop has this jar on its classpath, we have no way of knowing

Modified: hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMax.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMax.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMax.java (original)
+++ hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMax.java Tue Nov 12 18:23:05 2013
@@ -246,7 +246,7 @@ public class UDAFExampleMax extends UDAF
         if (mEmpty) {
           mMax = new Text(o);
           mEmpty = false;
-        } else if (ShimLoader.getHadoopShims().compareText(mMax, o) < 0) {
+        } else if (mMax.compareTo(o) < 0) {
           mMax.set(o);
         }
       }

Modified: hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMin.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMin.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMin.java (original)
+++ hive/branches/tez/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMin.java Tue Nov 12 18:23:05 2013
@@ -246,7 +246,7 @@ public class UDAFExampleMin extends UDAF
         if (mEmpty) {
           mMin = new Text(o);
           mEmpty = false;
-        } else if (ShimLoader.getHadoopShims().compareText(mMin, o) > 0) {
+        } else if (mMin.compareTo(o) > 0) {
           mMin.set(o);
         }
       }

Modified: hive/branches/tez/data/conf/hive-log4j.properties
URL: http://svn.apache.org/viewvc/hive/branches/tez/data/conf/hive-log4j.properties?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/data/conf/hive-log4j.properties (original)
+++ hive/branches/tez/data/conf/hive-log4j.properties Tue Nov 12 18:23:05 2013
@@ -75,4 +75,6 @@ log4j.category.JPOX.Query=ERROR,DRFA
 log4j.category.JPOX.General=ERROR,DRFA
 log4j.category.JPOX.Enhancer=ERROR,DRFA
 log4j.logger.org.apache.hadoop.conf.Configuration=ERROR,DRFA
+log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA
 

Modified: hive/branches/tez/data/files/datatypes.txt
URL: http://svn.apache.org/viewvc/hive/branches/tez/data/files/datatypes.txt?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/data/files/datatypes.txt (original)
+++ hive/branches/tez/data/files/datatypes.txt Tue Nov 12 18:23:05 2013
@@ -1,3 +1,3 @@
-\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N
--1false-1.1\N\N\N-1-1-1.0-1\N\N\N\N\N\N\N
-1true1.11121x2ykva92.2111.01abcd1111213142212212x1abcd22012-04-22 09:00:00.123456789123456789.0123456YWJjZA==2013-01-01abc123
+\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N\N
+-1false-1.1\N\N\N-1-1-1.0-1\N\N\N\N\N\N\N\N
+1true1.11121x2ykva92.2111.01abcd1111213142212212x1abcd22012-04-22 09:00:00.123456789123456789.0123456YWJjZA==2013-01-01abc123abc123

Modified: hive/branches/tez/hbase-handler/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/hbase-handler/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/hbase-handler/pom.xml (original)
+++ hive/branches/tez/hbase-handler/pom.xml Tue Nov 12 18:23:05 2013
@@ -62,7 +62,6 @@
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-shims</artifactId>
       <version>${project.version}</version>
-      <classifier>uberjar</classifier>
     </dependency>
     <!-- inter-project -->
     <dependency>

Modified: hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats.q.out (original)
+++ hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats.q.out Tue Nov 12 18:23:05 2013
@@ -42,6 +42,7 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -217,6 +218,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -264,6 +266,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                

Modified: hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats2.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats2.q.out (original)
+++ hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats2.q.out Tue Nov 12 18:23:05 2013
@@ -42,6 +42,7 @@ Retention:          	0                  
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -217,6 +218,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -264,6 +266,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                

Modified: hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats3.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats3.q.out (original)
+++ hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats3.q.out Tue Nov 12 18:23:05 2013
@@ -40,6 +40,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -97,6 +98,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -158,6 +160,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -227,6 +230,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -296,6 +300,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -369,6 +374,7 @@ Table:              	stats_part         
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                

Modified: hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out (original)
+++ hive/branches/tez/hbase-handler/src/test/results/positive/hbase_stats_empty_partition.q.out Tue Nov 12 18:23:05 2013
@@ -43,6 +43,7 @@ Table:              	tmptable           
 Protect Mode:       	None                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	true                
 	numFiles            	1                   
 	numRows             	0                   
 	rawDataSize         	0                   

Modified: hive/branches/tez/hcatalog/storage-handlers/hbase/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/storage-handlers/hbase/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/storage-handlers/hbase/pom.xml (original)
+++ hive/branches/tez/hcatalog/storage-handlers/hbase/pom.xml Tue Nov 12 18:23:05 2013
@@ -84,6 +84,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>${junit.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase</artifactId>
       <version>${hbase.version}</version>

Modified: hive/branches/tez/hcatalog/webhcat/svr/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/webhcat/svr/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/webhcat/svr/pom.xml (original)
+++ hive/branches/tez/hcatalog/webhcat/svr/pom.xml Tue Nov 12 18:23:05 2013
@@ -92,8 +92,16 @@
       <artifactId>jul-to-slf4j</artifactId>
       <version>${slf4j.version}</version>
     </dependency>
+    <!-- test inter-project -->
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>${junit.version}</version>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
+
   <profiles>
     <profile>
       <id>hadoop-1</id>
@@ -140,7 +148,7 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-javadoc-plugin</artifactId>
-        <version>${maven-javadoc-plugin.version}</version>
+        <version>${maven.javadoc.plugin.version}</version>
         <executions>
           <execution>
             <id>resourcesdoc.xml</id>

Modified: hive/branches/tez/hwi/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/hwi/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/hwi/pom.xml (original)
+++ hive/branches/tez/hwi/pom.xml Tue Nov 12 18:23:05 2013
@@ -47,7 +47,6 @@
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-shims</artifactId>
       <version>${project.version}</version>
-      <classifier>uberjar</classifier>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
@@ -65,6 +64,11 @@
       <artifactId>jetty</artifactId>
       <version>${jetty.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty-util</artifactId>
+      <version>${jetty.version}</version>
+    </dependency>
     <!-- test intra-project -->
     <dependency>
       <groupId>org.apache.hive</groupId>

Modified: hive/branches/tez/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java (original)
+++ hive/branches/tez/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java Tue Nov 12 18:23:05 2013
@@ -184,7 +184,8 @@ public class TestJdbcDriver2 {
         + " c18 decimal(16,7), "
         + " c19 binary, "
         + " c20 date,"
-        + " c21 varchar(20)"
+        + " c21 varchar(20),"
+        + " c22 char(15)"
         + ") comment'" + dataTypeTableComment
         +"' partitioned by (dt STRING)");
 
@@ -400,34 +401,34 @@ public class TestJdbcDriver2 {
     for (int i = 1; i < meta.getColumnCount(); i++) {
       assertNull(res.getObject(i));
     }
-
   }
 
   @Test
   public void testErrorDiag() throws SQLException {
     Statement stmt = con.createStatement();
-
     // verify syntax error
     try {
-      ResultSet res = stmt.executeQuery("select from " + dataTypeTableName);
+      stmt.executeQuery("select from " + dataTypeTableName);
+      fail("SQLException is expected");
     } catch (SQLException e) {
       assertEquals("42000", e.getSQLState());
     }
 
     // verify table not fuond error
     try {
-      ResultSet res = stmt.executeQuery("select * from nonTable");
+      stmt.executeQuery("select * from nonTable");
+      fail("SQLException is expected");
     } catch (SQLException e) {
       assertEquals("42S02", e.getSQLState());
     }
 
     // verify invalid column error
     try {
-      ResultSet res = stmt.executeQuery("select zzzz from " + dataTypeTableName);
+      stmt.executeQuery("select zzzz from " + dataTypeTableName);
+      fail("SQLException is expected");
     } catch (SQLException e) {
       assertEquals("42000", e.getSQLState());
     }
-
   }
 
   /**
@@ -790,6 +791,7 @@ public class TestJdbcDriver2 {
     assertEquals(null, res.getString(20));
     assertEquals(null, res.getDate(20));
     assertEquals(null, res.getString(21));
+    assertEquals(null, res.getString(22));
 
     // row 2
     assertTrue(res.next());
@@ -816,6 +818,7 @@ public class TestJdbcDriver2 {
     assertEquals(null, res.getString(20));
     assertEquals(null, res.getDate(20));
     assertEquals(null, res.getString(21));
+    assertEquals(null, res.getString(22));
 
     // row 3
     assertTrue(res.next());
@@ -842,6 +845,7 @@ public class TestJdbcDriver2 {
     assertEquals("2013-01-01", res.getString(20));
     assertEquals("2013-01-01", res.getDate(20).toString());
     assertEquals("abc123", res.getString(21));
+    assertEquals("abc123         ", res.getString(22));
 
     // test getBoolean rules on non-boolean columns
     assertEquals(true, res.getBoolean(1));
@@ -1319,14 +1323,14 @@ public class TestJdbcDriver2 {
 
     ResultSet res = stmt.executeQuery(
         "select c1, c2, c3, c4, c5 as a, c6, c7, c8, c9, c10, c11, c12, " +
-            "c1*2, sentences(null, null, null) as b, c17, c18, c20, c21 from " + dataTypeTableName +
+            "c1*2, sentences(null, null, null) as b, c17, c18, c20, c21, c22 from " + dataTypeTableName +
         " limit 1");
     ResultSetMetaData meta = res.getMetaData();
 
     ResultSet colRS = con.getMetaData().getColumns(null, null,
         dataTypeTableName.toLowerCase(), null);
 
-    assertEquals(18, meta.getColumnCount());
+    assertEquals(19, meta.getColumnCount());
 
     assertTrue(colRS.next());
 
@@ -1523,9 +1527,9 @@ public class TestJdbcDriver2 {
     assertEquals("c18", meta.getColumnName(16));
     assertEquals(Types.DECIMAL, meta.getColumnType(16));
     assertEquals("decimal", meta.getColumnTypeName(16));
-    assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(16));
-    assertEquals(Integer.MAX_VALUE, meta.getPrecision(16));
-    assertEquals(Integer.MAX_VALUE, meta.getScale(16));
+    assertEquals(18, meta.getColumnDisplaySize(16));
+    assertEquals(16, meta.getPrecision(16));
+    assertEquals(7, meta.getScale(16));
 
     assertEquals("c20", meta.getColumnName(17));
     assertEquals(Types.DATE, meta.getColumnType(17));
@@ -1542,6 +1546,14 @@ public class TestJdbcDriver2 {
     assertEquals(20, meta.getPrecision(18));
     assertEquals(0, meta.getScale(18));
 
+    assertEquals("c22", meta.getColumnName(19));
+    assertEquals(Types.CHAR, meta.getColumnType(19));
+    assertEquals("char", meta.getColumnTypeName(19));
+    // char columns should have correct display size/precision
+    assertEquals(15, meta.getColumnDisplaySize(19));
+    assertEquals(15, meta.getPrecision(19));
+    assertEquals(0, meta.getScale(19));
+
     for (int i = 1; i <= meta.getColumnCount(); i++) {
       assertFalse(meta.isAutoIncrement(i));
       assertFalse(meta.isCurrency(i));
@@ -1589,7 +1601,7 @@ public class TestJdbcDriver2 {
 
   @Test
   public void testParseUrlHttpMode() throws SQLException {
-    HiveDriver driver = new HiveDriver();
+    new HiveDriver();
     for (String[] testValues : HTTP_URL_PROPERTIES) {
       JdbcConnectionParams params = Utils.parseURL(testValues[0]);
       assertEquals(params.getHost(), testValues[1]);

Modified: hive/branches/tez/itests/qtest/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/itests/qtest/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/itests/qtest/pom.xml (original)
+++ hive/branches/tez/itests/qtest/pom.xml Tue Nov 12 18:23:05 2013
@@ -438,7 +438,7 @@
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>build-helper-maven-plugin</artifactId>
-        <version>1.8</version>
+        <version>${build.helper.maven.plugin.version}</version>
         <executions>
           <execution>
             <id>add-test-sources</id>

Modified: hive/branches/tez/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/branches/tez/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java Tue Nov 12 18:23:05 2013
@@ -1564,8 +1564,8 @@ public class QTestUtil {
   }
 
   public static void outputTestFailureHelpMessage() {
-    System.err.println("See build/ql/tmp/hive.log, "
-        + "or try \"ant test ... -Dtest.silent=false\" to get more logs.");
+    System.err.println("See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, "
+        + "or check ./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific test cases logs.");
     System.err.flush();
   }
 

Modified: hive/branches/tez/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDAFTestMax.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDAFTestMax.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDAFTestMax.java (original)
+++ hive/branches/tez/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDAFTestMax.java Tue Nov 12 18:23:05 2013
@@ -272,7 +272,7 @@ public class UDAFTestMax extends UDAF {
         if (mEmpty) {
           mMax = new Text(o);
           mEmpty = false;
-        } else if (ShimLoader.getHadoopShims().compareText(mMax, o) < 0) {
+        } else if (mMax.compareTo(o) < 0) {
           mMax.set(o);
         }
       }

Modified: hive/branches/tez/jdbc/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/jdbc/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/jdbc/pom.xml (original)
+++ hive/branches/tez/jdbc/pom.xml Tue Nov 12 18:23:05 2013
@@ -65,6 +65,16 @@
       <version>${commons-logging.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.httpcomponents</groupId>
+      <artifactId>httpclient</artifactId>
+      <version>${httpcomponents.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.httpcomponents</groupId>
+      <artifactId>httpcore</artifactId>
+      <version>${httpcomponents.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.thrift</groupId>
       <artifactId>libthrift</artifactId>
       <version>${libthrift.version}</version>

Modified: hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java (original)
+++ hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java Tue Nov 12 18:23:05 2013
@@ -511,6 +511,8 @@ public abstract class HiveBaseResultSet 
       return getDoubleValue(tColumnValue.getDoubleVal());
     case STRING_TYPE:
       return getStringValue(tColumnValue.getStringVal());
+    case CHAR_TYPE:
+      return getStringValue(tColumnValue.getStringVal());
     case VARCHAR_TYPE:
       return getStringValue(tColumnValue.getStringVal());
     case BINARY_TYPE:

Modified: hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java (original)
+++ hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java Tue Nov 12 18:23:05 2013
@@ -24,6 +24,7 @@ import java.sql.CallableStatement;
 import java.sql.Clob;
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
+import java.sql.DriverManager;
 import java.sql.NClob;
 import java.sql.PreparedStatement;
 import java.sql.SQLClientInfoException;
@@ -46,6 +47,7 @@ import javax.security.sasl.SaslException
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hive.service.auth.HiveAuthFactory;
 import org.apache.hive.service.auth.KerberosSaslHelper;
 import org.apache.hive.service.auth.PlainSaslHelper;
 import org.apache.hive.service.auth.SaslQOP;
@@ -61,7 +63,6 @@ import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.protocol.TProtocol;
 import org.apache.thrift.transport.THttpClient;
-import org.apache.thrift.transport.TSocket;
 import org.apache.thrift.transport.TTransport;
 import org.apache.thrift.transport.TTransportException;
 
@@ -78,6 +79,10 @@ public class HiveConnection implements j
   private static final String HIVE_AUTH_PASSWD = "password";
   private static final String HIVE_ANONYMOUS_USER = "anonymous";
   private static final String HIVE_ANONYMOUS_PASSWD = "anonymous";
+  private static final String HIVE_USE_SSL = "ssl";
+  private static final String HIVE_SSL_TRUST_STORE = "sslTrustStore";
+  private static final String HIVE_SSL_TRUST_STORE_PASSWORD = "trustStorePassword";
+
   private final String jdbcURI;
   private final String host;
   private final int port;
@@ -91,8 +96,10 @@ public class HiveConnection implements j
   private SQLWarning warningChain = null;
   private TSessionHandle sessHandle = null;
   private final List<TProtocolVersion> supportedProtocols = new LinkedList<TProtocolVersion>();
+  private int loginTimeout = 0;
 
   public HiveConnection(String uri, Properties info) throws SQLException {
+    loginTimeout = DriverManager.getLoginTimeout();
     jdbcURI = uri;
     // parse the connection uri
     Utils.JdbcConnectionParams connParams = Utils.parseURL(jdbcURI);
@@ -127,6 +134,7 @@ public class HiveConnection implements j
     supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1);
     supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V2);
     supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V3);
+    supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V4);
 
     // open client session
     openSession();
@@ -177,26 +185,26 @@ public class HiveConnection implements j
   }
 
   private TTransport createBinaryTransport() throws SQLException {
-    transport = new TSocket(host, port);
-    // handle secure connection if specified
-    if (!sessConfMap.containsKey(HIVE_AUTH_TYPE)
-        || !sessConfMap.get(HIVE_AUTH_TYPE).equals(HIVE_AUTH_SIMPLE)) {
-      try {
+    try {
+      // handle secure connection if specified
+      if (!HIVE_AUTH_SIMPLE.equals(sessConfMap.get(HIVE_AUTH_TYPE))) {
         // If Kerberos
         if (sessConfMap.containsKey(HIVE_AUTH_PRINCIPAL)) {
           Map<String, String> saslProps = new HashMap<String, String>();
           SaslQOP saslQOP = SaslQOP.AUTH;
-          if(sessConfMap.containsKey(HIVE_AUTH_QOP)) {
+          if (sessConfMap.containsKey(HIVE_AUTH_QOP)) {
             try {
               saslQOP = SaslQOP.fromString(sessConfMap.get(HIVE_AUTH_QOP));
             } catch (IllegalArgumentException e) {
-              throw new SQLException("Invalid " + HIVE_AUTH_QOP + " parameter. " + e.getMessage(), "42000", e);
+              throw new SQLException("Invalid " + HIVE_AUTH_QOP + " parameter. " + e.getMessage(),
+                  "42000", e);
             }
           }
           saslProps.put(Sasl.QOP, saslQOP.toString());
           saslProps.put(Sasl.SERVER_AUTH, "true");
           transport = KerberosSaslHelper.getKerberosTransport(
-              sessConfMap.get(HIVE_AUTH_PRINCIPAL), host, transport, saslProps);
+              sessConfMap.get(HIVE_AUTH_PRINCIPAL), host,
+              HiveAuthFactory.getSocketTransport(host, port, loginTimeout), saslProps);
         } else {
           String userName = sessConfMap.get(HIVE_AUTH_USER);
           if ((userName == null) || userName.isEmpty()) {
@@ -206,12 +214,30 @@ public class HiveConnection implements j
           if ((passwd == null) || passwd.isEmpty()) {
             passwd = HIVE_ANONYMOUS_PASSWD;
           }
+          String useSslStr = sessConfMap.get(HIVE_USE_SSL);
+          if ("true".equalsIgnoreCase(useSslStr)) {
+            String sslTrustStore = sessConfMap.get(HIVE_SSL_TRUST_STORE);
+            String sslTrustStorePassword = sessConfMap.get(HIVE_SSL_TRUST_STORE_PASSWORD);
+            if (sslTrustStore == null || sslTrustStore.isEmpty()) {
+              transport = HiveAuthFactory.getSSLSocket(host, port, loginTimeout);
+            } else {
+              transport = HiveAuthFactory.getSSLSocket(host, port, loginTimeout,
+                  sslTrustStore, sslTrustStorePassword);
+            }
+          } else {
+            transport = HiveAuthFactory.getSocketTransport(host, port, loginTimeout);
+          }
           transport = PlainSaslHelper.getPlainTransport(userName, passwd, transport);
         }
-      } catch (SaslException e) {
-        throw new SQLException("Could not create secure connection to "
-            + jdbcURI + ": " + e.getMessage(), " 08S01", e);
+      } else {
+        transport = HiveAuthFactory.getSocketTransport(host, port, loginTimeout);
       }
+    } catch (SaslException e) {
+      throw new SQLException("Could not create secure connection to "
+          + jdbcURI + ": " + e.getMessage(), " 08S01", e);
+    } catch (TTransportException e) {
+      throw new SQLException("Could not create connection to "
+          + jdbcURI + ": " + e.getMessage(), " 08S01", e);
     }
     return transport;
   }

Modified: hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java (original)
+++ hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java Tue Nov 12 18:23:05 2013
@@ -29,6 +29,7 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hive.service.cli.TableSchema;
 import org.apache.hive.service.cli.thrift.TCLIService;
 import org.apache.hive.service.cli.thrift.TCLIServiceConstants;
@@ -180,6 +181,7 @@ public class HiveQueryResultSet extends 
     if (primitiveTypeEntry.isSetTypeQualifiers()) {
       TTypeQualifiers tq = primitiveTypeEntry.getTypeQualifiers();
       switch (primitiveTypeEntry.getType()) {
+        case CHAR_TYPE:
         case VARCHAR_TYPE:
           TTypeQualifierValue val =
               tq.getQualifiers().get(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH);
@@ -188,6 +190,12 @@ public class HiveQueryResultSet extends 
             ret = new JdbcColumnAttributes(val.getI32Value(), 0);
           }
           break;
+        case DECIMAL_TYPE:
+          TTypeQualifierValue prec = tq.getQualifiers().get(TCLIServiceConstants.PRECISION);
+          TTypeQualifierValue scale = tq.getQualifiers().get(TCLIServiceConstants.SCALE);
+          ret = new JdbcColumnAttributes(prec == null ? HiveDecimal.DEFAULT_PRECISION : prec.getI32Value(),
+              scale == null ? HiveDecimal.DEFAULT_SCALE : scale.getI32Value());
+          break;
         default:
           break;
       }

Modified: hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveResultSetMetaData.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveResultSetMetaData.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveResultSetMetaData.java (original)
+++ hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/HiveResultSetMetaData.java Tue Nov 12 18:23:05 2013
@@ -95,6 +95,8 @@ public class HiveResultSetMetaData imple
       return serdeConstants.STRING_TYPE_NAME;
     } else if ("varchar".equalsIgnoreCase(type)) {
       return serdeConstants.VARCHAR_TYPE_NAME;
+    } else if ("char".equalsIgnoreCase(type)) {
+      return serdeConstants.CHAR_TYPE_NAME;
     } else if ("float".equalsIgnoreCase(type)) {
       return serdeConstants.FLOAT_TYPE_NAME;
     } else if ("double".equalsIgnoreCase(type)) {

Modified: hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java (original)
+++ hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/JdbcColumn.java Tue Nov 12 18:23:05 2013
@@ -69,11 +69,9 @@ public class JdbcColumn {
     switch(columnType) {
     case Types.BOOLEAN:
       return columnPrecision(columnType, columnAttributes);
+    case Types.CHAR:
     case Types.VARCHAR:
-      if (columnAttributes != null) {
-        return columnAttributes.precision;
-      }
-      return Integer.MAX_VALUE; // hive has no max limit for strings
+      return columnPrecision(columnType, columnAttributes);
     case Types.TINYINT:
     case Types.SMALLINT:
     case Types.INTEGER:
@@ -91,10 +89,7 @@ public class JdbcColumn {
     case Types.DOUBLE:
       return 25; // e.g. -(17#).e-####
     case Types.DECIMAL:
-      if (columnAttributes != null) {
-        return columnAttributes.precision + 2;  // '-' sign and '.'
-      }
-      return Integer.MAX_VALUE;
+      return columnPrecision(columnType, columnAttributes) + 2;  // '-' sign and '.'
     default:
       throw new SQLException("Invalid column type: " + columnType);
     }
@@ -106,6 +101,7 @@ public class JdbcColumn {
     switch(columnType) {
     case Types.BOOLEAN:
       return 1;
+    case Types.CHAR:
     case Types.VARCHAR:
       if (columnAttributes != null) {
         return columnAttributes.precision;
@@ -128,10 +124,7 @@ public class JdbcColumn {
     case Types.TIMESTAMP:
       return 29;
     case Types.DECIMAL:
-      if (columnAttributes != null) {
-        return columnAttributes.precision;
-      }
-      return Integer.MAX_VALUE;
+      return columnAttributes.precision;
     default:
       throw new SQLException("Invalid column type: " + columnType);
     }
@@ -142,6 +135,7 @@ public class JdbcColumn {
     // according to hiveTypeToSqlType possible options are:
     switch(columnType) {
     case Types.BOOLEAN:
+    case Types.CHAR:
     case Types.VARCHAR:
     case Types.TINYINT:
     case Types.SMALLINT:
@@ -156,10 +150,7 @@ public class JdbcColumn {
     case  Types.TIMESTAMP:
       return 9;
     case Types.DECIMAL:
-      if (columnAttributes != null) {
-        return columnAttributes.scale;
-      }
-      return Integer.MAX_VALUE;
+      return columnAttributes.scale;
     default:
       throw new SQLException("Invalid column type: " + columnType);
     }

Modified: hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/JdbcColumnAttributes.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/JdbcColumnAttributes.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/JdbcColumnAttributes.java (original)
+++ hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/JdbcColumnAttributes.java Tue Nov 12 18:23:05 2013
@@ -29,4 +29,10 @@ class JdbcColumnAttributes {
     this.precision = precision;
     this.scale = scale;
   }
+
+  @Override
+  public String toString() {
+    return "(" + precision + "," + scale + ")";
+  }
+
 }
\ No newline at end of file

Modified: hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/Utils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/Utils.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/Utils.java (original)
+++ hive/branches/tez/jdbc/src/java/org/apache/hive/jdbc/Utils.java Tue Nov 12 18:23:05 2013
@@ -116,6 +116,8 @@ public class Utils {
       return Types.VARCHAR;
     } else if ("varchar".equalsIgnoreCase(type)) {
       return Types.VARCHAR;
+    } else if ("char".equalsIgnoreCase(type)) {
+      return Types.CHAR;
     } else if ("float".equalsIgnoreCase(type)) {
       return Types.FLOAT;
     } else if ("double".equalsIgnoreCase(type)) {

Modified: hive/branches/tez/metastore/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/metastore/pom.xml (original)
+++ hive/branches/tez/metastore/pom.xml Tue Nov 12 18:23:05 2013
@@ -42,10 +42,14 @@
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-shims</artifactId>
       <version>${project.version}</version>
-      <classifier>uberjar</classifier>
     </dependency>
     <!-- inter-project -->
     <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>${guava.version}</version>
+    </dependency>
+    <dependency>
       <groupId>com.jolbox</groupId>
       <artifactId>bonecp</artifactId>
       <version>${bonecp.version}</version>

Modified: hive/branches/tez/metastore/scripts/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql Tue Nov 12 18:23:05 2013
@@ -1,3 +1,11 @@
-SELECT 'Upgrading MetaStore schema from 0.11.0 to 0.12.0' AS ' ';
+SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0' AS ' ';
+
+UPDATE PARTITION_KEY_VALS
+  INNER JOIN PARTITIONS ON PARTITION_KEY_VALS.PART_ID = PARTITIONS.PART_ID
+  INNER JOIN PARTITION_KEYS ON PARTITION_KEYS.TBL_ID = PARTITIONS.TBL_ID
+    AND PARTITION_KEYS.INTEGER_IDX = PARTITION_KEY_VALS.INTEGER_IDX
+    AND PARTITION_KEYS.PKEY_TYPE = 'date'
+SET PART_KEY_VAL = IFNULL(DATE_FORMAT(cast(PART_KEY_VAL as date),'%Y-%m-%d'), PART_KEY_VAL);
+
 UPDATE VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;
-SELECT 'Finished upgrading MetaStore schema from 0.11.0 to 0.12.0' AS ' ';
+SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0' AS ' ';

Modified: hive/branches/tez/metastore/scripts/upgrade/oracle/upgrade-0.12.0-to-0.13.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/oracle/upgrade-0.12.0-to-0.13.0.oracle.sql?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/oracle/upgrade-0.12.0-to-0.13.0.oracle.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/oracle/upgrade-0.12.0-to-0.13.0.oracle.sql Tue Nov 12 18:23:05 2013
@@ -1,3 +1,28 @@
-SELECT 'Upgrading MetaStore schema from 0.11.0 to 0.12.0' AS Status from dual;
+SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;
+
+CREATE FUNCTION hive13_to_date(date_str IN VARCHAR2)
+RETURN DATE
+IS dt DATE;
+BEGIN
+  dt := TO_DATE(date_str, 'YYYY-MM-DD');
+  RETURN dt;
+EXCEPTION
+  WHEN others THEN RETURN null;
+END;
+/
+
+MERGE INTO PARTITION_KEY_VALS
+USING (
+  SELECT SRC.PART_ID as IPART_ID, SRC.INTEGER_IDX as IINTEGER_IDX, 
+     NVL(TO_CHAR(hive13_to_date(PART_KEY_VAL),'YYYY-MM-DD'), PART_KEY_VAL) as NORM
+  FROM PARTITION_KEY_VALS SRC
+    INNER JOIN PARTITIONS ON SRC.PART_ID = PARTITIONS.PART_ID
+    INNER JOIN PARTITION_KEYS ON PARTITION_KEYS.TBL_ID = PARTITIONS.TBL_ID
+      AND PARTITION_KEYS.INTEGER_IDX = SRC.INTEGER_IDX AND PARTITION_KEYS.PKEY_TYPE = 'date'
+) ON (IPART_ID = PARTITION_KEY_VALS.PART_ID AND IINTEGER_IDX = PARTITION_KEY_VALS.INTEGER_IDX)
+WHEN MATCHED THEN UPDATE SET PART_KEY_VAL = NORM;
+
+DROP FUNCTION hive13_to_date;
+
 UPDATE VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;
-SELECT 'Finished upgrading MetaStore schema from 0.11.0 to 0.12.0' AS Status from dual;
+SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;

Modified: hive/branches/tez/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql (original)
+++ hive/branches/tez/metastore/scripts/upgrade/postgres/upgrade-0.12.0-to-0.13.0.postgres.sql Tue Nov 12 18:23:05 2013
@@ -1,3 +1,26 @@
-SELECT 'Upgrading MetaStore schema from 0.11.0 to 0.12.0';
+SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0';
+
+CREATE FUNCTION hive13_to_date(date_str text) RETURNS DATE AS $$
+DECLARE dt DATE;
+BEGIN
+  dt := date_str::DATE;
+  RETURN dt;
+EXCEPTION
+  WHEN others THEN RETURN null;
+END;
+$$ LANGUAGE plpgsql;
+
+UPDATE "PARTITION_KEY_VALS"
+SET "PART_KEY_VAL" = COALESCE(TO_CHAR(hive13_to_date(src."PART_KEY_VAL"),'YYYY-MM-DD'), src."PART_KEY_VAL")
+FROM "PARTITION_KEY_VALS" src
+  INNER JOIN "PARTITIONS" ON src."PART_ID" = "PARTITIONS"."PART_ID"
+  INNER JOIN "PARTITION_KEYS" ON "PARTITION_KEYS"."TBL_ID" = "PARTITIONS"."TBL_ID"
+    AND "PARTITION_KEYS"."INTEGER_IDX" = src."INTEGER_IDX"
+    AND "PARTITION_KEYS"."PKEY_TYPE" = 'date';
+
+DROP FUNCTION hive13_to_date(date_str text);
+
 UPDATE "VERSION" SET "SCHEMA_VERSION"='0.13.0', "VERSION_COMMENT"='Hive release version 0.13.0' where "VER_ID"=1;
-SELECT 'Finished upgrading MetaStore schema from 0.11.0 to 0.12.0';
+SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0';
+
+

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Tue Nov 12 18:23:05 2013
@@ -277,6 +277,10 @@ public class HiveMetaStore extends Thrif
       return threadLocalId.get();
     }
 
+    public static void resetDefaultDBFlag() {
+      createDefaultDB = false;
+    }
+
     public HMSHandler(String name) throws MetaException {
       super(name);
       hiveConf = new HiveConf(this.getClass());
@@ -4109,7 +4113,6 @@ public class HiveMetaStore extends Thrif
   }
 
 
-
   /**
    * Discard a current delegation token.
    *

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Tue Nov 12 18:23:05 2013
@@ -148,8 +148,7 @@ public class MetaStoreUtils {
    * @return True if the passed Parameters Map contains values for all "Fast Stats".
    */
   public static boolean containsAllFastStats(Map<String, String> partParams) {
-    List<String> fastStats = StatsSetupConst.getStatsFastCollection();
-    for (String stat : fastStats) {
+    for (String stat : StatsSetupConst.fastStats) {
       if (!partParams.containsKey(stat)) {
         return false;
       }
@@ -157,11 +156,6 @@ public class MetaStoreUtils {
     return true;
   }
 
-  public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh)
-      throws MetaException {
-    return updateUnpartitionedTableStatsFast(db, tbl, wh, false, false);
-  }
-
   public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh,
       boolean madeDir) throws MetaException {
     return updateUnpartitionedTableStatsFast(db, tbl, wh, madeDir, false);
@@ -200,9 +194,15 @@ public class MetaStoreUtils {
         }
         params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize));
         LOG.info("Updated size of table " + tbl.getTableName() +" to "+ Long.toString(tableSize));
-        if (params.containsKey(StatsSetupConst.ROW_COUNT) ||
-            params.containsKey(StatsSetupConst.RAW_DATA_SIZE)) {
-          // TODO: Add a MetaStore flag indicating accuracy of these stats and update it here.
+        if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) {
+          // invalidate stats requiring scan since this is a regular ddl alter case
+          for (String stat : StatsSetupConst.statsRequireCompute) {
+            params.put(stat, "-1");
+          }
+          params.put(StatsSetupConst.COLUMN_STATS_ACCURATE, StatsSetupConst.FALSE);
+        } else {
+          params.remove(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK);	
+          params.put(StatsSetupConst.COLUMN_STATS_ACCURATE, StatsSetupConst.TRUE);
         }
       }
       tbl.setParameters(params);
@@ -215,10 +215,6 @@ public class MetaStoreUtils {
   public static boolean requireCalStats(Configuration hiveConf, Partition oldPart,
     Partition newPart, Table tbl) {
 
-    if (!HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
-      return false;
-    }
-
     if (MetaStoreUtils.isView(tbl)) {
       return false;
     }
@@ -233,9 +229,13 @@ public class MetaStoreUtils {
       return true;
     }
 
+    if(newPart.getParameters().containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) {
+      return true;
+    }
+    
     // requires to calculate stats if new and old have different fast stats
     if ((oldPart != null) && (oldPart.getParameters() != null)) {
-      for (String stat : StatsSetupConst.getStatsFastCollection()) {
+      for (String stat : StatsSetupConst.fastStats) {
         if (oldPart.getParameters().containsKey(stat)) {
           Long oldStat = Long.parseLong(oldPart.getParameters().get(stat));
           Long newStat = Long.parseLong(newPart.getParameters().get(stat));
@@ -290,11 +290,15 @@ public class MetaStoreUtils {
         }
         params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(partSize));
         LOG.warn("Updated size to " + Long.toString(partSize));
-        if (params.containsKey(StatsSetupConst.ROW_COUNT) ||
-            params.containsKey(StatsSetupConst.RAW_DATA_SIZE)) {
-          // The accuracy of these "collectable" stats at this point is suspect unless we know that
-          // StatsTask was just run before this MetaStore call and populated them.
-          // TODO: Add a MetaStore flag indicating accuracy of these stats and update it here.
+        if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) {
+          // invalidate stats requiring scan since this is a regular ddl alter case
+          for (String stat : StatsSetupConst.statsRequireCompute) {
+            params.put(stat, "-1");
+          }
+          params.put(StatsSetupConst.COLUMN_STATS_ACCURATE, StatsSetupConst.FALSE);
+        } else {
+          params.remove(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK);
+          params.put(StatsSetupConst.COLUMN_STATS_ACCURATE, StatsSetupConst.TRUE);
         }
       }
       part.setParameters(params);

Modified: hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/branches/tez/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Tue Nov 12 18:23:05 2013
@@ -1420,41 +1420,30 @@ public class ObjectStore implements RawS
     return getPartitionsInternal(dbName, tableName, maxParts, true, true);
   }
 
-  protected List<Partition> getPartitionsInternal(String dbName, String tableName,
+  protected List<Partition> getPartitionsInternal(String dbName, String tblName,
       int maxParts, boolean allowSql, boolean allowJdo) throws MetaException {
-    assert allowSql || allowJdo;
-    boolean doTrace = LOG.isDebugEnabled();
-    boolean doUseDirectSql = canUseDirectSql(allowSql, allowJdo);
-
-    boolean success = false;
-    List<Partition> parts = null;
+    dbName = dbName.toLowerCase();
+    tblName = tblName.toLowerCase();
+    GetPartsHelper ctx = new GetPartsHelper(dbName, tblName, allowSql, allowJdo);
     try {
-      long start = doTrace ? System.nanoTime() : 0;
-      openTransaction();
-      if (doUseDirectSql) {
+      ctx.start(false);
+      if (ctx.canUseDirectSql()) {
         try {
           Integer max = (maxParts < 0) ? null : maxParts;
-          parts = directSql.getPartitions(dbName, tableName, max);
+          ctx.setResult(directSql.getPartitions(dbName, tblName, max));
         } catch (Exception ex) {
-          handleDirectSqlError(allowJdo, ex);
-          doUseDirectSql = false;
-          start = doTrace ? System.nanoTime() : 0;
+          ctx.handleDirectSqlError(ex);
         }
       }
 
-      if (!doUseDirectSql) {
-        parts = convertToParts(listMPartitions(dbName, tableName, maxParts));
-      }
-      success = commitTransaction();
-      if (doTrace) {
-        LOG.debug(parts.size() + " partition retrieved using " + (doUseDirectSql ? "SQL" : "ORM")
-            + " in " + ((System.nanoTime() - start) / 1000000.0) + "ms");
+      if (!ctx.canUseDirectSql()) {
+        ctx.setResult(convertToParts(listMPartitions(dbName, tblName, maxParts)));
       }
-      return parts;
+      return ctx.commit();
+    } catch (NoSuchObjectException ex) {
+      throw new MetaException(ex.getMessage());
     } finally {
-      if (!success) {
-        rollbackTransaction();
-      }
+      ctx.close();
     }
   }
 
@@ -1750,41 +1739,26 @@ public class ObjectStore implements RawS
   protected List<Partition> getPartitionsByNamesInternal(String dbName, String tblName,
       List<String> partNames, boolean allowSql, boolean allowJdo)
           throws MetaException, NoSuchObjectException {
-    assert allowSql || allowJdo;
     dbName = dbName.toLowerCase();
     tblName = tblName.toLowerCase();
-    boolean doTrace = LOG.isDebugEnabled();
-    boolean doUseDirectSql = canUseDirectSql(allowSql, allowJdo);
-
-    boolean success = false;
-    List<Partition> results = null;
+    GetPartsHelper ctx = new GetPartsHelper(dbName, tblName, allowSql, allowJdo);
     try {
-      long start = doTrace ? System.nanoTime() : 0;
-      openTransaction();
-      if (doUseDirectSql) {
+      ctx.start(false);
+      if (ctx.canUseDirectSql()) {
         try {
-          results = directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames, null);
+          ctx.setResult(directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames, null));
         } catch (Exception ex) {
-          handleDirectSqlError(allowJdo, ex);
-          doUseDirectSql = false;
-          start = doTrace ? System.nanoTime() : 0;
+          ctx.handleDirectSqlError(ex);
         }
       }
 
-      if (!doUseDirectSql) {
-        results = getPartitionsViaOrmFilter(dbName, tblName, partNames);
-      }
-      success = commitTransaction();
-      if (doTrace) {
-        LOG.debug(results.size() + " partition retrieved using " + (doUseDirectSql ? "SQL" : "ORM")
-            + " in " + ((System.nanoTime() - start) / 1000000.0) + "ms");
+      if (!ctx.canUseDirectSql()) {
+        ctx.setResult(getPartitionsViaOrmFilter(dbName, tblName, partNames));
       }
+      return ctx.commit();
     } finally {
-      if (!success) {
-        rollbackTransaction();
-      }
+      ctx.close();
     }
-    return results;
   }
 
   @Override
@@ -1797,10 +1771,7 @@ public class ObjectStore implements RawS
   protected boolean getPartitionsByExprInternal(String dbName, String tblName,
       byte[] expr, String defaultPartitionName, short maxParts, Set<Partition> result,
       boolean allowSql, boolean allowJdo) throws TException {
-    assert allowSql || allowJdo;
     assert result != null;
-    dbName = dbName.toLowerCase();
-    tblName = tblName.toLowerCase();
 
     // We will try pushdown first, so make the filter. This will also validate the expression,
     // if serialization fails we will throw incompatible metastore error to the client.
@@ -1819,61 +1790,45 @@ public class ObjectStore implements RawS
     //       Filter.g stuff. That way this method and ...ByFilter would just be merged.
     ExpressionTree exprTree = makeExpressionTree(filter);
 
-    boolean doUseDirectSql = canUseDirectSql(allowSql, allowJdo);
-    boolean doTrace = LOG.isDebugEnabled();
-    List<Partition> partitions = null;
+    dbName = dbName.toLowerCase();
+    tblName = tblName.toLowerCase();
+    GetPartsHelper ctx = new GetPartsHelper(dbName, tblName, allowSql, allowJdo);
     boolean hasUnknownPartitions = false;
-    boolean success = false;
     try {
-      long start = doTrace ? System.nanoTime() : 0;
-      openTransaction();
-      Table table = ensureGetTable(dbName, tblName);
-      if (doUseDirectSql) {
+      ctx.start(true);
+      if (ctx.canUseDirectSql()) {
         try {
-          if (exprTree != null) {
-            // We have some sort of expression tree, try SQL filter pushdown.
-            partitions = directSql.getPartitionsViaSqlFilter(table, exprTree, null);
-          }
-          if (partitions == null) {
+          // If we have some sort of expression tree, try SQL filter pushdown.
+          boolean haveResult = (exprTree != null) && ctx.setResult(
+              directSql.getPartitionsViaSqlFilter(ctx.getTable(), exprTree, null));
+          if (!haveResult) {
             // We couldn't do SQL filter pushdown. Get names via normal means.
             List<String> partNames = new LinkedList<String>();
             hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(
-                table, expr, defaultPartitionName, maxParts, partNames);
-            partitions = directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames, null);
+                ctx.getTable(), expr, defaultPartitionName, maxParts, partNames);
+            ctx.setResult(directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames, null));
           }
         } catch (Exception ex) {
-          handleDirectSqlError(allowJdo, ex);
-          doUseDirectSql = false;
-          table = ensureGetTable(dbName, tblName); // Get again, detached on rollback.
+          ctx.handleDirectSqlError(ex);
         }
       }
 
-      if (!doUseDirectSql) {
-        assert partitions == null;
-        if (exprTree != null) {
-          // We have some sort of expression tree, try JDOQL filter pushdown.
-          partitions = getPartitionsViaOrmFilter(table, exprTree, maxParts, false);
-        }
-        if (partitions == null) {
+      if (!ctx.canUseDirectSql()) {
+        // If we have some sort of expression tree, try JDOQL filter pushdown.
+        boolean haveResult = (exprTree != null) && ctx.setResult(
+            getPartitionsViaOrmFilter(ctx.getTable(), exprTree, maxParts, false));
+        if (!haveResult) {
           // We couldn't do JDOQL filter pushdown. Get names via normal means.
           List<String> partNames = new ArrayList<String>();
           hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(
-              table, expr, defaultPartitionName, maxParts, partNames);
-          partitions = getPartitionsViaOrmFilter(dbName, tblName, partNames);
+              ctx.getTable(), expr, defaultPartitionName, maxParts, partNames);
+          ctx.setResult(getPartitionsViaOrmFilter(dbName, tblName, partNames));
         }
       }
-      success = commitTransaction();
-      if (doTrace) {
-        double time = ((System.nanoTime() - start) / 1000000.0);
-        LOG.debug(partitions.size() + " partition retrieved using "
-          + (doUseDirectSql ? "SQL" : "ORM") + " in " + time + "ms");
-      }
+      result.addAll(ctx.commit());
     } finally {
-      if (!success) {
-        rollbackTransaction();
-      }
+      ctx.close();
     }
-    result.addAll(partitions);
     return hasUnknownPartitions;
   }
 
@@ -1986,18 +1941,6 @@ public class ObjectStore implements RawS
     return results;
   }
 
-  private void handleDirectSqlError(boolean allowJdo, Exception ex) throws MetaException {
-    LOG.error("Direct SQL failed" + (allowJdo ? ", falling back to ORM" : ""), ex);
-    if (!allowJdo) {
-      if (ex instanceof MetaException) {
-        throw (MetaException)ex;
-      }
-      throw new MetaException(ex.getMessage());
-    }
-    rollbackTransaction();
-    openTransaction();
-  }
-
   /**
    * Gets partition names from the table via ORM (JDOQL) name filter.
    * @param dbName Database name.
@@ -2051,73 +1994,130 @@ public class ObjectStore implements RawS
     return getPartitionsByFilterInternal(dbName, tblName, filter, maxParts, true, true);
   }
 
-  protected List<Partition> getPartitionsByFilterInternal(String dbName, String tblName,
-      String filter, short maxParts, boolean allowSql, boolean allowJdo)
-      throws MetaException, NoSuchObjectException {
-    assert allowSql || allowJdo;
-    boolean doTrace = LOG.isDebugEnabled();
-    boolean doUseDirectSql = canUseDirectSql(allowSql, allowJdo);
+  /** Helper class for getting partitions w/transaction, direct SQL, perf logging, etc. */
+  private class GetPartsHelper {
+    private final boolean isInTxn, doTrace, allowSql, allowJdo;
+    private boolean doUseDirectSql;
+    private long start;
+    private Table table;
+    private String dbName = null, tblName = null;
+    boolean success = false;
+    private List<Partition> results = null;
 
-    dbName = dbName.toLowerCase();
-    tblName = tblName.toLowerCase();
-    ExpressionTree tree = (filter != null && !filter.isEmpty())
-        ? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE;
+    public GetPartsHelper(String dbName, String tblName, boolean allowSql, boolean allowJdo)
+        throws MetaException {
+      assert allowSql || allowJdo;
+      this.allowSql = allowSql;
+      this.allowJdo = allowJdo;
+      this.dbName = dbName;
+      this.tblName = tblName;
+      this.doTrace = LOG.isDebugEnabled();
+      this.isInTxn = isActiveTransaction();
 
-    List<Partition> results = null;
-    boolean success = false;
-    try {
-      long start = doTrace ? System.nanoTime() : 0;
+      // SQL usage inside a larger transaction (e.g. droptable) may not be desirable because
+      // some databases (e.g. Postgres) abort the entire transaction when any query fails, so
+      // the fallback from failed SQL to JDO is not possible.
+      boolean isConfigEnabled = HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL)
+          && (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL_DDL) || !isInTxn);
+      if (!allowJdo && isConfigEnabled && !directSql.isCompatibleDatastore()) {
+        throw new MetaException("SQL is not operational"); // test path; SQL is enabled and broken.
+      }
+      this.doUseDirectSql = allowSql && isConfigEnabled && directSql.isCompatibleDatastore();
+    }
+
+    public void start(boolean initTable) throws MetaException, NoSuchObjectException {
+      start = doTrace ? System.nanoTime() : 0;
       openTransaction();
-      Table table = ensureGetTable(dbName, tblName);
-      if (doUseDirectSql) {
-        try {
-          Integer max = (maxParts < 0) ? null : (int)maxParts;
-          results = directSql.getPartitionsViaSqlFilter(table, tree, max);
-          if (results == null) {
-            // Cannot push down SQL filter. The message has been logged internally.
-            // This is not an error so don't roll back, just go to JDO.
-            doUseDirectSql = false;
-          }
-        } catch (Exception ex) {
-          handleDirectSqlError(allowJdo, ex);
-          doUseDirectSql = false;
-          start = doTrace ? System.nanoTime() : 0;
-          table = ensureGetTable(dbName, tblName); // detached on rollback, get again
-        }
+      if (initTable) {
+        table = ensureGetTable(dbName, tblName);
       }
+    }
 
-      if (!doUseDirectSql) {
-        results = getPartitionsViaOrmFilter(table, tree, maxParts, true);
+    public boolean setResult(List<Partition> results) {
+      this.results = results;
+      return this.results != null;
+    }
+
+    public void handleDirectSqlError(Exception ex) throws MetaException, NoSuchObjectException {
+      LOG.error("Direct SQL failed" + (allowJdo ? ", falling back to ORM" : ""), ex);
+      if (!allowJdo) {
+        if (ex instanceof MetaException) {
+          throw (MetaException)ex;
+        }
+        throw new MetaException(ex.getMessage());
+      }
+      if (!isInTxn) {
+        rollbackTransaction();
+        start = doTrace ? System.nanoTime() : 0;
+        openTransaction();
+        if (table != null) {
+          table = ensureGetTable(dbName, tblName);
+        }
+      } else {
+        start = doTrace ? System.nanoTime() : 0;
       }
+      doUseDirectSql = false;
+    }
+
+    public void disableDirectSql() {
+      this.doUseDirectSql = false;
+    }
+
+    public List<Partition> commit() {
       success = commitTransaction();
       if (doTrace) {
         LOG.debug(results.size() + " partition retrieved using " + (doUseDirectSql ? "SQL" : "ORM")
             + " in " + ((System.nanoTime() - start) / 1000000.0) + "ms");
       }
       return results;
-    } finally {
+    }
+
+    public void close() {
       if (!success) {
         rollbackTransaction();
       }
     }
+
+    public boolean canUseDirectSql() {
+      return doUseDirectSql;
+    }
+
+    public Table getTable() {
+      return table;
+    }
   }
 
-  /**
-   * @param allowSql Whether SQL usage is allowed (always true outside test).
-   * @param allowJdo Whether JDO usage is allowed (always true outside test).
-   * @return Whether we can use direct SQL.
-   */
-  private boolean canUseDirectSql(boolean allowSql, boolean allowJdo) throws MetaException {
-    // We don't allow direct SQL usage if we are inside a larger transaction (e.g. droptable).
-    // That is because some databases (e.g. Postgres) abort the entire transaction when
-    // any query fails, so the fallback from failed SQL to JDO is not possible.
-    // TODO: Drop table can be very slow on large tables, we might want to address this.
-    boolean isEnabled = !isActiveTransaction()
-        && HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL);
-    if (!allowJdo && isEnabled && !directSql.isCompatibleDatastore()) {
-      throw new MetaException("SQL is not operational"); // test path; SQL is enabled and broken.
+  protected List<Partition> getPartitionsByFilterInternal(String dbName, String tblName,
+      String filter, short maxParts, boolean allowSql, boolean allowJdo)
+      throws MetaException, NoSuchObjectException {
+    ExpressionTree tree = (filter != null && !filter.isEmpty())
+        ? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE;
+
+    dbName = dbName.toLowerCase();
+    tblName = tblName.toLowerCase();
+    GetPartsHelper ctx = new GetPartsHelper(dbName, tblName, allowSql, allowJdo);
+    try {
+      ctx.start(true);
+      if (ctx.canUseDirectSql()) {
+        try {
+          if (!ctx.setResult(directSql.getPartitionsViaSqlFilter(
+              ctx.getTable(), tree, (maxParts < 0) ? null : (int)maxParts))) {
+            // Cannot push down SQL filter. The message has been logged internally.
+            // This is not an error so don't roll back, just go to JDO.
+            ctx.disableDirectSql();
+          }
+        } catch (Exception ex) {
+          ctx.handleDirectSqlError(ex);
+        }
+      }
+
+      if (!ctx.canUseDirectSql()) {
+        ctx.setResult(getPartitionsViaOrmFilter(ctx.getTable(), tree, maxParts, true));
+      }
+      return ctx.commit();
+    } finally {
+      ctx.close();
     }
-    return allowSql && isEnabled && directSql.isCompatibleDatastore();
   }
 
   /**

Modified: hive/branches/tez/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/pom.xml (original)
+++ hive/branches/tez/pom.xml Tue Nov 12 18:23:05 2013
@@ -49,14 +49,33 @@
   <properties>
     <hive.version.shortname>0.13.0</hive.version.shortname>
 
+    <!-- Build Properties -->
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    <maven.compiler.useIncrementalCompilation>false</maven.compiler.useIncrementalCompilation>
     <maven.repo.local>${user.home}/.m2/repository</maven.repo.local>
     <hive.path.to.root>.</hive.path.to.root>
+
+    <!-- Test Properties -->
     <test.tmp.dir>${project.build.directory}/tmp</test.tmp.dir>
     <test.warehouse.dir>${project.build.directory}/warehouse</test.warehouse.dir>
     <test.warehouse.scheme>pfile://</test.warehouse.scheme>
 
-    <!-- the versions of libraries that we use -->
+    <!-- Plugin and Plugin Dependency Versions -->
+    <ant.contrib.version>1.0b3</ant.contrib.version>
+    <build.helper.maven.plugin.version>1.8</build.helper.maven.plugin.version>
+    <datanucleus.maven.plugin.version>3.3.0-release</datanucleus.maven.plugin.version>
+    <maven.antrun.plugin.version>1.7</maven.antrun.plugin.version>
+    <maven.assembly.plugin.version>2.3</maven.assembly.plugin.version>
+    <maven.compiler.plugin.version>3.1</maven.compiler.plugin.version>
+    <maven.enforcer.plugin.version>1.3.1</maven.enforcer.plugin.version>
+    <maven.install.plugin.version>2.4</maven.install.plugin.version>
+    <maven.jar.plugin.version>2.2</maven.jar.plugin.version>
+    <maven.javadoc.plugin.version>2.4</maven.javadoc.plugin.version>
+    <maven.shade.plugin.version>2.1</maven.shade.plugin.version>
+    <maven.surefire.plugin.version>2.16</maven.surefire.plugin.version>
+    <maven.war.plugin.version>2.4</maven.war.plugin.version>
+
+    <!-- Library Dependency Versions -->
     <activemq.version>5.5.0</activemq.version>
     <ant.version>1.9.1</ant.version>
     <antlr.version>3.4</antlr.version>
@@ -77,15 +96,16 @@
     <commons-io.version>2.4</commons-io.version>
     <commons-lang.version>2.4</commons-lang.version>
     <commons-lang3.version>3.1</commons-lang3.version>
-    <commons-logging.version>1.0.4</commons-logging.version>
+    <commons-logging.version>1.1.3</commons-logging.version>
     <commons-pool.version>1.5.4</commons-pool.version>
     <derby.version>10.4.2.0</derby.version>
     <guava.version>11.0.2</guava.version>
     <groovy.version>2.1.6</groovy.version>
     <hadoop-20.version>0.20.2</hadoop-20.version>
-    <hadoop-20S.version>1.1.2</hadoop-20S.version>
+    <hadoop-20S.version>1.2.1</hadoop-20S.version>
     <hadoop-23.version>2.2.0</hadoop-23.version>
     <hbase.version>0.94.6.1</hbase.version>
+    <httpcomponents.version>4.1.3</httpcomponents.version>
     <jackson.version>1.9.2</jackson.version>
     <javaewah.version>0.3.2</javaewah.version>
     <javolution.version>5.5.1</javolution.version>
@@ -102,12 +122,12 @@
     <libfb303.version>0.9.0</libfb303.version>
     <libthrift.version>0.9.0</libthrift.version>
     <log4j.version>1.2.16</log4j.version>
-    <maven-javadoc-plugin.version>2.4</maven-javadoc-plugin.version>
     <mockito-all.version>1.8.2</mockito-all.version>
     <mina.version>2.0.0-M5</mina.version>
     <pig.version>0.10.1</pig.version>
     <protobuf.version>2.5.0</protobuf.version>
     <rat.version>0.8</rat.version>
+    <stax.version>1.0.1</stax.version>
     <slf4j.version>1.6.1</slf4j.version>
     <ST4.version>4.0.4</ST4.version>
     <tez.version>0.2.0-SNAPSHOT</tez.version>
@@ -117,7 +137,6 @@
     <velocity.version>1.5</velocity.version>
     <xerces.version>2.9.1</xerces.version>
     <zookeeper.version>3.4.3</zookeeper.version>
-    <maven.compiler.useIncrementalCompilation>false</maven.compiler.useIncrementalCompilation>
   </properties>
 
   <repositories>
@@ -191,7 +210,7 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-compiler-plugin</artifactId>
-          <version>3.1</version>
+          <version>${maven.compiler.plugin.version}</version>
           <configuration>
             <source>1.6</source>
             <target>1.6</target>
@@ -200,12 +219,12 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-antrun-plugin</artifactId>
-          <version>1.7</version>
+          <version>${maven.antrun.plugin.version}</version>
           <dependencies>
             <dependency>
               <groupId>ant-contrib</groupId>
               <artifactId>ant-contrib</artifactId>
-              <version>1.0b3</version>
+              <version>${ant.contrib.version}</version>
               <exclusions>
                 <exclusion>
                   <groupId>ant</groupId>
@@ -218,47 +237,47 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-assembly-plugin</artifactId>
-          <version>2.4</version>
+          <version>${maven.assembly.plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-enforcer-plugin</artifactId>
-          <version>1.3.1</version>
+          <version>${maven.enforcer.plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-install-plugin</artifactId>
-          <version>2.4</version>
+          <version>${maven.install.plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-shade-plugin</artifactId>
-          <version>2.1</version>
+          <version>${maven.shade.plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-surefire-plugin</artifactId>
-          <version>2.16</version>
+          <version>${maven.surefire.plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-jar-plugin</artifactId>
-          <version>2.2</version>
+          <version>${maven.jar.plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-war-plugin</artifactId>
-          <version>2.4</version>
+          <version>${maven.war.plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
           <artifactId>build-helper-maven-plugin</artifactId>
-          <version>1.8</version>
+          <version>${build.helper.maven.plugin.version}</version>
         </plugin>
         <plugin>
           <groupId>org.datanucleus</groupId>
           <artifactId>datanucleus-maven-plugin</artifactId>
-          <version>3.3.0-release</version>
+          <version>${datanucleus.maven.plugin.version}</version>
           <dependencies>
             <dependency>
               <groupId>org.datanucleus</groupId>

Modified: hive/branches/tez/ql/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/ql/pom.xml?rev=1541190&r1=1541189&r2=1541190&view=diff
==============================================================================
--- hive/branches/tez/ql/pom.xml (original)
+++ hive/branches/tez/ql/pom.xml Tue Nov 12 18:23:05 2013
@@ -33,6 +33,17 @@
 
   <dependencies>
     <!-- intra-project -->
+    <!-- used for vector code-gen -->
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-ant</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-common</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-metastore</artifactId>
@@ -40,7 +51,12 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-ant</artifactId>
+      <artifactId>hive-serde</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-shims</artifactId>
       <version>${project.version}</version>
     </dependency>
     <!-- inter-project -->
@@ -55,14 +71,9 @@
       <version>${commons-codec.version}</version>
     </dependency>
     <dependency>
-      <groupId>commons-collections</groupId>
-      <artifactId>commons-collections</artifactId>
-      <version>${commons-collections.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-configuration</groupId>
-      <artifactId>commons-configuration</artifactId>
-      <version>${commons-configuration.version}</version>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+      <version>${commons-httpclient.version}</version>
     </dependency>
     <dependency>
       <groupId>commons-io</groupId>
@@ -73,7 +84,12 @@
        <groupId>org.apache.commons</groupId>
        <artifactId>commons-lang3</artifactId>
        <version>${commons-lang3.version}</version>
-  </dependency>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+      <version>${commons-lang.version}</version>
+    </dependency>
     <dependency>
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>
@@ -101,6 +117,11 @@
     </dependency>
     <dependency>
       <groupId>org.apache.avro</groupId>
+      <artifactId>avro</artifactId>
+      <version>${avro.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.avro</groupId>
       <artifactId>avro-mapred</artifactId>
       <version>${avro.version}</version>
     </dependency>
@@ -110,6 +131,11 @@
       <version>${ant.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-compress</artifactId>
+      <version>${commons-compress.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.thrift</groupId>
       <artifactId>libfb303</artifactId>
       <version>${libfb303.version}</version>
@@ -130,6 +156,16 @@
       <version>${groovy.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-core-asl</artifactId>
+      <version>${jackson.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.jackson</groupId>
+      <artifactId>jackson-mapper-asl</artifactId>
+      <version>${jackson.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.datanucleus</groupId>
       <artifactId>datanucleus-core</artifactId>
       <version>${datanucleus-core.version}</version>
@@ -159,13 +195,12 @@
       <artifactId>json</artifactId>
       <version>${json.version}</version>
     </dependency>
-    <!-- test intra-project -->
     <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-testutils</artifactId>
-      <version>${project.version}</version>
-      <scope>test</scope>
+      <groupId>stax</groupId>
+      <artifactId>stax-api</artifactId>
+      <version>${stax.version}</version>
     </dependency>
+    <!-- test intra-project -->
     <!-- test inter-project -->
     <dependency>
       <groupId>junit</groupId>
@@ -174,12 +209,6 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase</artifactId>
-      <version>${hbase.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
       <groupId>org.apache.mina</groupId>
       <artifactId>mina-core</artifactId>
       <version>${mina.version}</version>
@@ -392,7 +421,7 @@
                   <include>commons-lang:commons-lang</include>
                   <include>org.json:json</include>
                   <include>org.apache.avro:arvro-mapred</include>
-                  <include>org.apache.hive:hive-shims:*:uberjar</include>
+                  <include>org.apache.hive:hive-shims</include>
                   <include>com.googlecode.javaewah:JavaEWAH</include>
                   <include>javolution:javolution</include>
                   <include>com.google.protobuf:protobuf-java</include>