You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/08/24 05:43:57 UTC

svn commit: r1620103 [3/27] - in /hive/branches/spark: ./ accumulo-handler/ common/src/java/org/apache/hadoop/hive/ant/ common/src/java/org/apache/hadoop/hive/common/type/ common/src/test/org/apache/hadoop/hive/common/type/ data/files/ hcatalog/streami...

Modified: hive/branches/spark/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java (original)
+++ hive/branches/spark/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java Sun Aug 24 03:43:48 2014
@@ -21,6 +21,7 @@ package org.apache.hive.hcatalog.api;
 import java.math.BigInteger;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -38,7 +39,7 @@ import org.apache.hadoop.hive.ql.io.orc.
 import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
+import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
 import org.apache.hive.hcatalog.common.HCatConstants;
@@ -65,19 +66,37 @@ public class TestHCatClient {
   private static final Logger LOG = LoggerFactory.getLogger(TestHCatClient.class);
   private static final String msPort = "20101";
   private static HiveConf hcatConf;
+  private static boolean isReplicationTargetHCatRunning = false;
+  private static final String replicationTargetHCatPort = "20102";
+  private static HiveConf replicationTargetHCatConf;
   private static SecurityManager securityManager;
 
   private static class RunMS implements Runnable {
 
+    private final String msPort;
+    private List<String> args = new ArrayList<String>();
+
+    public RunMS(String msPort) {
+      this.msPort = msPort;
+      this.args.add("-v");
+      this.args.add("-p");
+      this.args.add(this.msPort);
+    }
+
+    public RunMS arg(String arg) {
+      this.args.add(arg);
+      return this;
+    }
+
     @Override
     public void run() {
       try {
-        HiveMetaStore.main(new String[]{"-v", "-p", msPort});
+        HiveMetaStore.main(args.toArray(new String[args.size()]));
       } catch (Throwable t) {
         LOG.error("Exiting. Got exception from metastore: ", t);
       }
     }
-  }
+  } // class RunMS;
 
   @AfterClass
   public static void tearDown() throws Exception {
@@ -88,9 +107,9 @@ public class TestHCatClient {
   @BeforeClass
   public static void startMetaStoreServer() throws Exception {
 
-    Thread t = new Thread(new RunMS());
+    Thread t = new Thread(new RunMS(msPort));
     t.start();
-    Thread.sleep(40000);
+    Thread.sleep(10000);
 
     securityManager = System.getSecurityManager();
     System.setSecurityManager(new NoExitSecurityManager());
@@ -152,7 +171,7 @@ public class TestHCatClient {
     assertTrue(table1.getOutputFileFormat().equalsIgnoreCase(
       RCFileOutputFormat.class.getName()));
     assertTrue(table1.getSerdeLib().equalsIgnoreCase(
-      ColumnarSerDe.class.getName()));
+      LazyBinaryColumnarSerDe.class.getName()));
     assertTrue(table1.getCols().equals(cols));
     // Since "ifexists" was not set to true, trying to create the same table
     // again
@@ -171,8 +190,8 @@ public class TestHCatClient {
       mapKeysTerminatedBy('\004').collectionItemsTerminatedBy('\005').nullDefinedAs('\006').build();
     client.createTable(tableDesc2);
     HCatTable table2 = client.getTable(db, tableTwo);
-    assertTrue(table2.getInputFileFormat().equalsIgnoreCase(
-      TextInputFormat.class.getName()));
+    assertTrue("Expected TextInputFormat, but got: " + table2.getInputFileFormat(),
+               table2.getInputFileFormat().equalsIgnoreCase(TextInputFormat.class.getName()));
     assertTrue(table2.getOutputFileFormat().equalsIgnoreCase(
       HiveIgnoreKeyTextOutputFormat.class.getName()));
     assertTrue("SerdeParams not found", table2.getSerdeParams() != null);
@@ -222,9 +241,10 @@ public class TestHCatClient {
     cols.add(new HCatFieldSchema("id", Type.INT, "id comment"));
     cols.add(new HCatFieldSchema("value", Type.STRING, "value comment"));
 
+    client.dropTable(dbName, tblName, true);
     // Create a minimalistic table
     client.createTable(HCatCreateTableDesc
-        .create(dbName, tblName, cols)
+        .create(new HCatTable(dbName, tblName).cols(cols), false)
         .build());
 
     HCatTable tCreated = client.getTable(dbName, tblName);
@@ -281,21 +301,26 @@ public class TestHCatClient {
     ptnCols.add(new HCatFieldSchema("dt", Type.STRING, "date column"));
     ptnCols.add(new HCatFieldSchema("country", Type.STRING,
       "country column"));
-    HCatCreateTableDesc tableDesc = HCatCreateTableDesc
-      .create(dbName, tableName, cols).fileFormat("sequencefile")
-      .partCols(ptnCols).build();
+    HCatTable table = new HCatTable(dbName, tableName).cols(cols)
+                                                      .partCols(ptnCols)
+                                                      .fileFormat("sequenceFile");
+    HCatCreateTableDesc tableDesc = HCatCreateTableDesc.create(table, false).build();
     client.createTable(tableDesc);
 
+    // Verify that the table is created successfully.
+    table = client.getTable(dbName, tableName);
+
     Map<String, String> firstPtn = new HashMap<String, String>();
     firstPtn.put("dt", "04/30/2012");
     firstPtn.put("country", "usa");
-    HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(dbName,
-      tableName, null, firstPtn).build();
+    // Test new HCatAddPartitionsDesc API.
+    HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(new HCatPartition(table, firstPtn, null)).build();
     client.addPartition(addPtn);
 
     Map<String, String> secondPtn = new HashMap<String, String>();
     secondPtn.put("dt", "04/12/2012");
     secondPtn.put("country", "brazil");
+    // Test deprecated HCatAddPartitionsDesc API.
     HCatAddPartitionDesc addPtn2 = HCatAddPartitionDesc.create(dbName,
       tableName, null, secondPtn).build();
     client.addPartition(addPtn2);
@@ -303,6 +328,7 @@ public class TestHCatClient {
     Map<String, String> thirdPtn = new HashMap<String, String>();
     thirdPtn.put("dt", "04/13/2012");
     thirdPtn.put("country", "argentina");
+    // Test deprecated HCatAddPartitionsDesc API.
     HCatAddPartitionDesc addPtn3 = HCatAddPartitionDesc.create(dbName,
       tableName, null, thirdPtn).build();
     client.addPartition(addPtn3);
@@ -540,9 +566,8 @@ public class TestHCatClient {
       List<HCatFieldSchema> columns = Arrays.asList(new HCatFieldSchema("col", Type.STRING, ""));
       ArrayList<HCatFieldSchema> partitionColumns = new ArrayList<HCatFieldSchema>(
           Arrays.asList(new HCatFieldSchema(partitionColumn, Type.STRING, "")));
-      client.createTable(HCatCreateTableDesc.create(dbName, tableName, columns)
-          .partCols(partitionColumns)
-          .build());
+      HCatTable table = new HCatTable(dbName, tableName).cols(columns).partCols(partitionColumns);
+      client.createTable(HCatCreateTableDesc.create(table, false).build());
 
       Map<String, String> partitionSpec = new HashMap<String, String>();
       partitionSpec.put(partitionColumn, "foobar");
@@ -555,7 +580,7 @@ public class TestHCatClient {
             exception instanceof ObjectNotFoundException);
       }
 
-      client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build());
+      client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build());
 
       // Test that listPartitionsByFilter() returns an empty-set, if the filter selects no partitions.
       assertEquals("Expected empty set of partitions.",
@@ -649,21 +674,26 @@ public class TestHCatClient {
       List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""),
           new HCatFieldSchema("grid", Type.STRING, ""));
 
-      client.createTable(HCatCreateTableDesc.create(dbName, tableName, columnSchema).partCols(new ArrayList<HCatFieldSchema>(partitionSchema)).build());
+      HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema);
+      client.createTable(HCatCreateTableDesc.create(table, false).build());
+
+      // Verify that the table was created successfully.
+      table = client.getTable(dbName, tableName);
+      assertNotNull("The created just now can't be null.", table);
 
       Map<String, String> partitionSpec = new HashMap<String, String>();
       partitionSpec.put("grid", "AB");
       partitionSpec.put("dt", "2011_12_31");
-      client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build());
+      client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build());
       partitionSpec.put("grid", "AB");
       partitionSpec.put("dt", "2012_01_01");
-      client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build());
+      client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build());
       partitionSpec.put("dt", "2012_01_01");
       partitionSpec.put("grid", "OB");
-      client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build());
+      client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build());
       partitionSpec.put("dt", "2012_01_01");
       partitionSpec.put("grid", "XB");
-      client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build());
+      client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build());
 
       Map<String, String> partialPartitionSpec = new HashMap<String, String>();
       partialPartitionSpec.put("dt", "2012_01_01");
@@ -698,21 +728,26 @@ public class TestHCatClient {
       List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""),
           new HCatFieldSchema("grid", Type.STRING, ""));
 
-      client.createTable(HCatCreateTableDesc.create(dbName, tableName, columnSchema).partCols(new ArrayList<HCatFieldSchema>(partitionSchema)).build());
+      HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema);
+      client.createTable(HCatCreateTableDesc.create(table, false).build());
+
+      // Verify that the table was created successfully.
+      table = client.getTable(dbName, tableName);
+      assertNotNull("Table couldn't be queried for. ", table);
 
       Map<String, String> partitionSpec = new HashMap<String, String>();
       partitionSpec.put("grid", "AB");
       partitionSpec.put("dt", "2011_12_31");
-      client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build());
+      client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build());
       partitionSpec.put("grid", "AB");
       partitionSpec.put("dt", "2012_01_01");
-      client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build());
+      client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build());
       partitionSpec.put("dt", "2012_01_01");
       partitionSpec.put("grid", "OB");
-      client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build());
+      client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build());
       partitionSpec.put("dt", "2012_01_01");
       partitionSpec.put("grid", "XB");
-      client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build());
+      client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build());
 
       Map<String, String> partialPartitionSpec = new HashMap<String, String>();
       partialPartitionSpec.put("dt", "2012_01_01");
@@ -731,4 +766,235 @@ public class TestHCatClient {
     }
   }
 
+  private void startReplicationTargetMetaStoreIfRequired() throws Exception {
+    if (!isReplicationTargetHCatRunning) {
+      Thread t = new Thread(new RunMS(replicationTargetHCatPort)
+                              .arg("--hiveconf")
+                              .arg("javax.jdo.option.ConnectionURL") // Reset, to use a different Derby instance.
+                              .arg(hcatConf.get("javax.jdo.option.ConnectionURL")
+                                                 .replace("metastore", "target_metastore")));
+      t.start();
+      Thread.sleep(10000);
+      replicationTargetHCatConf = new HiveConf(hcatConf);
+      replicationTargetHCatConf.setVar(HiveConf.ConfVars.METASTOREURIS,
+                                       "thrift://localhost:" + replicationTargetHCatPort);
+      isReplicationTargetHCatRunning = true;
+    }
+  }
+
+  /**
+   * Test for detecting schema-changes for an HCatalog table, across 2 different HCat instances.
+   * A table is created with the same schema on 2 HCat instances. The table-schema is modified on the source HCat
+   * instance (columns, I/O formats, SerDe definitions, etc.). The table metadata is compared between source
+   * and target, the changes are detected and propagated to target.
+   * @throws Exception
+   */
+  @Test
+  public void testTableSchemaPropagation() throws Exception {
+    try {
+      startReplicationTargetMetaStoreIfRequired();
+      HCatClient sourceMetaStore = HCatClient.create(new Configuration(hcatConf));
+      final String dbName = "myDb";
+      final String tableName = "myTable";
+
+      sourceMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
+
+      sourceMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());
+      List<HCatFieldSchema> columnSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""),
+          new HCatFieldSchema("bar", Type.STRING, ""));
+
+      List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""),
+          new HCatFieldSchema("grid", Type.STRING, ""));
+
+      HCatTable sourceTable = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema);
+      sourceMetaStore.createTable(HCatCreateTableDesc.create(sourceTable).build());
+
+      // Verify that the sourceTable was created successfully.
+      sourceTable = sourceMetaStore.getTable(dbName, tableName);
+      assertNotNull("Table couldn't be queried for. ", sourceTable);
+
+      // Serialize Table definition. Deserialize using the target HCatClient instance.
+      String tableStringRep = sourceMetaStore.serializeTable(sourceTable);
+      HCatClient targetMetaStore = HCatClient.create(new Configuration(replicationTargetHCatConf));
+      targetMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
+      targetMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());
+
+      HCatTable targetTable = targetMetaStore.deserializeTable(tableStringRep);
+
+      assertEquals("Table after deserialization should have been identical to sourceTable.",
+          sourceTable.diff(targetTable), HCatTable.NO_DIFF);
+
+      // Create table on Target.
+      targetMetaStore.createTable(HCatCreateTableDesc.create(targetTable).build());
+      // Verify that the created table is identical to sourceTable.
+      targetTable = targetMetaStore.getTable(dbName, tableName);
+      assertEquals("Table after deserialization should have been identical to sourceTable.",
+          sourceTable.diff(targetTable), HCatTable.NO_DIFF);
+
+      // Modify sourceTable.
+      List<HCatFieldSchema> newColumnSchema = new ArrayList<HCatFieldSchema>(columnSchema);
+      newColumnSchema.add(new HCatFieldSchema("goo_new", Type.DOUBLE, ""));
+      Map<String, String> tableParams = new HashMap<String, String>(1);
+      tableParams.put("orc.compress", "ZLIB");
+      sourceTable.cols(newColumnSchema) // Add a column.
+                 .fileFormat("orcfile")     // Change SerDe, File I/O formats.
+                 .tblProps(tableParams)
+                 .serdeParam(serdeConstants.FIELD_DELIM, Character.toString('\001'));
+      sourceMetaStore.updateTableSchema(dbName, tableName, sourceTable);
+      sourceTable = sourceMetaStore.getTable(dbName, tableName);
+
+      // Diff against table on target.
+
+      EnumSet<HCatTable.TableAttribute> diff = targetTable.diff(sourceTable);
+      assertTrue("Couldn't find change in column-schema.",
+          diff.contains(HCatTable.TableAttribute.COLUMNS));
+      assertTrue("Couldn't find change in InputFormat.",
+          diff.contains(HCatTable.TableAttribute.INPUT_FORMAT));
+      assertTrue("Couldn't find change in OutputFormat.",
+          diff.contains(HCatTable.TableAttribute.OUTPUT_FORMAT));
+      assertTrue("Couldn't find change in SerDe.",
+          diff.contains(HCatTable.TableAttribute.SERDE));
+      assertTrue("Couldn't find change in SerDe parameters.",
+          diff.contains(HCatTable.TableAttribute.SERDE_PROPERTIES));
+      assertTrue("Couldn't find change in Table parameters.",
+          diff.contains(HCatTable.TableAttribute.TABLE_PROPERTIES));
+
+      // Replicate the changes to the replicated-table.
+      targetMetaStore.updateTableSchema(dbName, tableName, targetTable.resolve(sourceTable, diff));
+      targetTable = targetMetaStore.getTable(dbName, tableName);
+
+      assertEquals("After propagating schema changes, source and target tables should have been equivalent.",
+          targetTable.diff(sourceTable), HCatTable.NO_DIFF);
+
+    }
+    catch (Exception unexpected) {
+      LOG.error("Unexpected exception!", unexpected);
+      assertTrue("Unexpected exception! " + unexpected.getMessage(), false);
+    }
+  }
+
+  /**
+   * Test that partition-definitions can be replicated between HCat-instances,
+   * independently of table-metadata replication.
+   * 2 identical tables are created on 2 different HCat instances ("source" and "target").
+   * On the source instance,
+   * 1. One partition is added with the old format ("TEXTFILE").
+   * 2. The table is updated with an additional column and the data-format changed to ORC.
+   * 3. Another partition is added with the new format.
+   * 4. The partitions' metadata is copied to the target HCat instance, without updating the target table definition.
+   * 5. The partitions' metadata is tested to be an exact replica of that on the source.
+   * @throws Exception
+   */
+  @Test
+  public void testPartitionRegistrationWithCustomSchema() throws Exception {
+    try {
+      startReplicationTargetMetaStoreIfRequired();
+
+      HCatClient sourceMetaStore = HCatClient.create(new Configuration(hcatConf));
+      final String dbName = "myDb";
+      final String tableName = "myTable";
+
+      sourceMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
+
+      sourceMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());
+      List<HCatFieldSchema> columnSchema = new ArrayList<HCatFieldSchema>(
+          Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""),
+                        new HCatFieldSchema("bar", Type.STRING, "")));
+
+      List<HCatFieldSchema> partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""),
+                                                            new HCatFieldSchema("grid", Type.STRING, ""));
+
+      HCatTable sourceTable = new HCatTable(dbName, tableName).cols(columnSchema)
+                                                              .partCols(partitionSchema)
+                                                              .comment("Source table.");
+
+      sourceMetaStore.createTable(HCatCreateTableDesc.create(sourceTable).build());
+
+      // Verify that the sourceTable was created successfully.
+      sourceTable = sourceMetaStore.getTable(dbName, tableName);
+      assertNotNull("Table couldn't be queried for. ", sourceTable);
+
+      // Partitions added now should inherit table-schema, properties, etc.
+      Map<String, String> partitionSpec_1 = new HashMap<String, String>();
+      partitionSpec_1.put("grid", "AB");
+      partitionSpec_1.put("dt", "2011_12_31");
+      HCatPartition sourcePartition_1 = new HCatPartition(sourceTable, partitionSpec_1, "");
+
+      sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_1).build());
+      assertEquals("Unexpected number of partitions. ",
+                   sourceMetaStore.getPartitions(dbName, tableName).size(), 1);
+      // Verify that partition_1 was added correctly, and properties were inherited from the HCatTable.
+      HCatPartition addedPartition_1 = sourceMetaStore.getPartition(dbName, tableName, partitionSpec_1);
+      assertEquals("Column schema doesn't match.", addedPartition_1.getColumns(), sourceTable.getCols());
+      assertEquals("InputFormat doesn't match.", addedPartition_1.getInputFormat(), sourceTable.getInputFileFormat());
+      assertEquals("OutputFormat doesn't match.", addedPartition_1.getOutputFormat(), sourceTable.getOutputFileFormat());
+      assertEquals("SerDe doesn't match.", addedPartition_1.getSerDe(), sourceTable.getSerdeLib());
+      assertEquals("SerDe params don't match.", addedPartition_1.getSerdeParams(), sourceTable.getSerdeParams());
+
+      // Replicate table definition.
+
+      HCatClient targetMetaStore = HCatClient.create(new Configuration(replicationTargetHCatConf));
+      targetMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
+
+      targetMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build());
+      // Make a copy of the source-table, as would be done across class-loaders.
+      HCatTable targetTable = targetMetaStore.deserializeTable(sourceMetaStore.serializeTable(sourceTable));
+      targetMetaStore.createTable(HCatCreateTableDesc.create(targetTable).build());
+      targetTable = targetMetaStore.getTable(dbName, tableName);
+
+      assertEquals("Created table doesn't match the source.",
+                  targetTable.diff(sourceTable), HCatTable.NO_DIFF);
+
+      // Modify Table schema at the source.
+      List<HCatFieldSchema> newColumnSchema = new ArrayList<HCatFieldSchema>(columnSchema);
+      newColumnSchema.add(new HCatFieldSchema("goo_new", Type.DOUBLE, ""));
+      Map<String, String> tableParams = new HashMap<String, String>(1);
+      tableParams.put("orc.compress", "ZLIB");
+      sourceTable.cols(newColumnSchema) // Add a column.
+          .fileFormat("orcfile")     // Change SerDe, File I/O formats.
+          .tblProps(tableParams)
+          .serdeParam(serdeConstants.FIELD_DELIM, Character.toString('\001'));
+      sourceMetaStore.updateTableSchema(dbName, tableName, sourceTable);
+      sourceTable = sourceMetaStore.getTable(dbName, tableName);
+
+      // Add another partition to the source.
+      Map<String, String> partitionSpec_2 = new HashMap<String, String>();
+      partitionSpec_2.put("grid", "AB");
+      partitionSpec_2.put("dt", "2012_01_01");
+      HCatPartition sourcePartition_2 = new HCatPartition(sourceTable, partitionSpec_2, "");
+      sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_2).build());
+
+      // The source table now has 2 partitions, one in TEXTFILE, the other in ORC.
+      // Test that adding these partitions to the target-table *without* replicating the table-change.
+
+      List<HCatPartition> sourcePartitions = sourceMetaStore.getPartitions(dbName, tableName);
+      assertEquals("Unexpected number of source partitions.", 2, sourcePartitions.size());
+
+      List<HCatAddPartitionDesc> addPartitionDescs = new ArrayList<HCatAddPartitionDesc>(sourcePartitions.size());
+      for (HCatPartition partition : sourcePartitions) {
+        addPartitionDescs.add(HCatAddPartitionDesc.create(partition).build());
+      }
+
+      targetMetaStore.addPartitions(addPartitionDescs);
+
+      List<HCatPartition> targetPartitions = targetMetaStore.getPartitions(dbName, tableName);
+
+      assertEquals("Expected the same number of partitions. ", targetPartitions.size(), sourcePartitions.size());
+
+      for (int i=0; i<targetPartitions.size(); ++i) {
+        HCatPartition sourcePartition = sourcePartitions.get(i),
+                      targetPartition = targetPartitions.get(i);
+        assertEquals("Column schema doesn't match.", sourcePartition.getColumns(), targetPartition.getColumns());
+        assertEquals("InputFormat doesn't match.", sourcePartition.getInputFormat(), targetPartition.getInputFormat());
+        assertEquals("OutputFormat doesn't match.", sourcePartition.getOutputFormat(), targetPartition.getOutputFormat());
+        assertEquals("SerDe doesn't match.", sourcePartition.getSerDe(), targetPartition.getSerDe());
+        assertEquals("SerDe params don't match.", sourcePartition.getSerdeParams(), targetPartition.getSerdeParams());
+      }
+
+    }
+    catch (Exception unexpected) {
+      LOG.error( "Unexpected exception! ",  unexpected);
+      assertTrue("Unexpected exception! " + unexpected.getMessage(), false);
+    }
+  }
 }

Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java Sun Aug 24 03:43:48 2014
@@ -30,6 +30,8 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.CommandNeedRetryException;
@@ -43,7 +45,6 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
-import org.mockito.Matchers;
 import org.mockito.Mockito;
 
 /**
@@ -52,7 +53,10 @@ import org.mockito.Mockito;
 public class TestHiveAuthorizerCheckInvocation {
   protected static HiveConf conf;
   protected static Driver driver;
-  private static final String tableName = TestHiveAuthorizerCheckInvocation.class.getSimpleName();
+  private static final String tableName = TestHiveAuthorizerCheckInvocation.class.getSimpleName()
+      + "Table";
+  private static final String dbName = TestHiveAuthorizerCheckInvocation.class.getSimpleName()
+      + "Db";
   static HiveAuthorizer mockedAuthorizer;
 
   /**
@@ -82,8 +86,13 @@ public class TestHiveAuthorizerCheckInvo
 
     SessionState.start(conf);
     driver = new Driver(conf);
-    CommandProcessorResponse resp = driver.run("create table " + tableName
+    runCmd("create table " + tableName
         + " (i int, j int, k string) partitioned by (city string, date string) ");
+    runCmd("create database " + dbName);
+  }
+
+  private static void runCmd(String cmd) throws CommandNeedRetryException {
+    CommandProcessorResponse resp = driver.run(cmd);
     assertEquals(0, resp.getResponseCode());
   }
 
@@ -101,7 +110,7 @@ public class TestHiveAuthorizerCheckInvo
         + " where k = 'X' and city = 'Scottsdale-AZ' ");
     assertEquals(0, status);
 
-    List<HivePrivilegeObject> inputs = getHivePrivilegeObjectInputs();
+    List<HivePrivilegeObject> inputs = getHivePrivilegeObjectInputs().getLeft();
     checkSingleTableInput(inputs);
     HivePrivilegeObject tableObj = inputs.get(0);
     assertEquals("no of columns used", 3, tableObj.getColumns().size());
@@ -123,7 +132,7 @@ public class TestHiveAuthorizerCheckInvo
     int status = driver.compile("select * from " + tableName + " order by i");
     assertEquals(0, status);
 
-    List<HivePrivilegeObject> inputs = getHivePrivilegeObjectInputs();
+    List<HivePrivilegeObject> inputs = getHivePrivilegeObjectInputs().getLeft();
     checkSingleTableInput(inputs);
     HivePrivilegeObject tableObj = inputs.get(0);
     assertEquals("no of columns used", 5, tableObj.getColumns().size());
@@ -139,12 +148,60 @@ public class TestHiveAuthorizerCheckInvo
     int status = driver.compile("describe " + tableName);
     assertEquals(0, status);
 
-    List<HivePrivilegeObject> inputs = getHivePrivilegeObjectInputs();
+    List<HivePrivilegeObject> inputs = getHivePrivilegeObjectInputs().getLeft();
     checkSingleTableInput(inputs);
     HivePrivilegeObject tableObj = inputs.get(0);
     assertNull("columns used", tableObj.getColumns());
   }
 
+  @Test
+  public void testPermFunction() throws HiveAuthzPluginException, HiveAccessControlException,
+      CommandNeedRetryException {
+
+    reset(mockedAuthorizer);
+    final String funcName = "testauthfunc1";
+    int status = driver.compile("create function " + dbName + "." + funcName
+        + " as 'org.apache.hadoop.hive.ql.udf.UDFPI'");
+    assertEquals(0, status);
+
+    List<HivePrivilegeObject> outputs = getHivePrivilegeObjectInputs().getRight();
+
+    HivePrivilegeObject funcObj;
+    HivePrivilegeObject dbObj;
+    assertEquals("number of output object", 2, outputs.size());
+    if(outputs.get(0).getType() == HivePrivilegeObjectType.FUNCTION) {
+      funcObj = outputs.get(0);
+      dbObj = outputs.get(1);
+    } else {
+      funcObj = outputs.get(1);
+      dbObj = outputs.get(0);
+    }
+
+    assertEquals("input type", HivePrivilegeObjectType.FUNCTION, funcObj.getType());
+    assertTrue("function name", funcName.equalsIgnoreCase(funcObj.getObjectName()));
+    assertTrue("db name", dbName.equalsIgnoreCase(funcObj.getDbname()));
+
+    assertEquals("input type", HivePrivilegeObjectType.DATABASE, dbObj.getType());
+    assertTrue("db name", dbName.equalsIgnoreCase(dbObj.getDbname()));
+  }
+
+  @Test
+  public void testTempFunction() throws HiveAuthzPluginException, HiveAccessControlException,
+      CommandNeedRetryException {
+
+    reset(mockedAuthorizer);
+    final String funcName = "testAuthFunc2";
+    int status = driver.compile("create temporary function " + funcName
+        + " as 'org.apache.hadoop.hive.ql.udf.UDFPI'");
+    assertEquals(0, status);
+
+    List<HivePrivilegeObject> outputs = getHivePrivilegeObjectInputs().getRight();
+    HivePrivilegeObject funcObj = outputs.get(0);
+    assertEquals("input type", HivePrivilegeObjectType.FUNCTION, funcObj.getType());
+    assertTrue("function name", funcName.equalsIgnoreCase(funcObj.getObjectName()));
+    assertEquals("db name", null, funcObj.getDbname());
+  }
+
   private void checkSingleTableInput(List<HivePrivilegeObject> inputs) {
     assertEquals("number of inputs", 1, inputs.size());
 
@@ -154,23 +211,26 @@ public class TestHiveAuthorizerCheckInvo
   }
 
   /**
-   * @return the inputs passed in current call to authorizer.checkPrivileges
+   * @return pair with left value as inputs and right value as outputs,
+   *  passed in current call to authorizer.checkPrivileges
    * @throws HiveAuthzPluginException
    * @throws HiveAccessControlException
    */
-  private List<HivePrivilegeObject> getHivePrivilegeObjectInputs() throws HiveAuthzPluginException,
+  private Pair<List<HivePrivilegeObject>, List<HivePrivilegeObject>> getHivePrivilegeObjectInputs() throws HiveAuthzPluginException,
       HiveAccessControlException {
     // Create argument capturer
     // a class variable cast to this generic of generic class
     Class<List<HivePrivilegeObject>> class_listPrivObjects = (Class) List.class;
     ArgumentCaptor<List<HivePrivilegeObject>> inputsCapturer = ArgumentCaptor
         .forClass(class_listPrivObjects);
+    ArgumentCaptor<List<HivePrivilegeObject>> outputsCapturer = ArgumentCaptor
+        .forClass(class_listPrivObjects);
 
     verify(mockedAuthorizer).checkPrivileges(any(HiveOperationType.class),
-        inputsCapturer.capture(), Matchers.anyListOf(HivePrivilegeObject.class),
+        inputsCapturer.capture(), outputsCapturer.capture(),
         any(HiveAuthzContext.class));
 
-    return inputsCapturer.getValue();
+    return new ImmutablePair(inputsCapturer.getValue(), outputsCapturer.getValue());
   }
 
 }

Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java Sun Aug 24 03:43:48 2014
@@ -868,7 +868,7 @@ public class TestJdbcDriver2 {
     assertNotNull("ResultSet is null", res);
     assertTrue("getResultSet() not returning expected ResultSet", res == stmt
         .getResultSet());
-    assertEquals("get update count not as expected", 0, stmt.getUpdateCount());
+    assertEquals("get update count not as expected", -1, stmt.getUpdateCount());
     int i = 0;
 
     ResultSetMetaData meta = res.getMetaData();

Modified: hive/branches/spark/itests/qtest/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/qtest/pom.xml?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/itests/qtest/pom.xml (original)
+++ hive/branches/spark/itests/qtest/pom.xml Sun Aug 24 03:43:48 2014
@@ -42,6 +42,11 @@
     <!-- dependencies are always listed in sorted order by groupId, artifectId -->
     <!-- test intra-project -->
     <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-minicluster</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-ant</artifactId>
       <version>${project.version}</version>
@@ -402,6 +407,7 @@
                 <mkdir dir="${project.build.directory}/qfile-results/hbase-handler/positive/" />
                 <mkdir dir="${project.build.directory}/qfile-results/hbase-handler/negative/" />
                 <mkdir dir="${project.build.directory}/qfile-results/hbase-handler/minimrpositive/" />
+                <mkdir dir="${project.build.directory}/qfile-results/accumulo-handler/positive/" />
 
                 <mkdir dir="${project.build.directory}/qfile-results/contribpositive"/>
                 <mkdir dir="${project.build.directory}/qfile-results/contribnegative"/>
@@ -590,6 +596,20 @@
                   initScript="q_test_init.sql"
                   cleanupScript="q_test_cleanup.sql"/>
 
+                <!-- Accumulo Positive -->
+                <qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
+                  outputDirectory="${project.build.directory}/generated-test-sources/java/org/apache/hadoop/hive/cli/"
+                  templatePath="${basedir}/${hive.path.to.root}/accumulo-handler/src/test/templates/" template="TestAccumuloCliDriver.vm"
+                  queryDirectory="${basedir}/${hive.path.to.root}/accumulo-handler/src/test/queries/positive/"
+                  queryFile="${qfile}"
+                  runDisabled="${run_disabled}"
+                  clusterMode="${clustermode}"
+                  resultsDirectory="${basedir}/${hive.path.to.root}/accumulo-handler/src/test/results/positive/" className="TestAccumuloCliDriver"
+                  logFile="${project.build.directory}/testaccumuloclidrivergen.log"
+                  logDirectory="${project.build.directory}/qfile-results/accumulo-handler/positive/"
+                  initScript="q_test_init.sql"
+                  cleanupScript="q_test_cleanup.sql"/>
+
 
                 <!-- Beeline -->
                 <if>

Modified: hive/branches/spark/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/src/test/resources/testconfiguration.properties?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/spark/itests/src/test/resources/testconfiguration.properties Sun Aug 24 03:43:48 2014
@@ -146,7 +146,8 @@ minitez.query.files=bucket_map_join_tez1
   tez_join_tests.q,\
   tez_joins_explain.q,\
   tez_schema_evolution.q,\
-  tez_union.q
+  tez_union.q,\
+  tez_union_decimal.q
 
 beeline.positive.exclude=add_part_exist.q,\
   alter1.q,\

Modified: hive/branches/spark/itests/util/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/util/pom.xml?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/itests/util/pom.xml (original)
+++ hive/branches/spark/itests/util/pom.xml Sun Aug 24 03:43:48 2014
@@ -35,6 +35,21 @@
     <!-- dependencies are always listed in sorted order by groupId, artifectId -->
     <!-- test intra-project -->
     <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-minicluster</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-accumulo-handler</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-accumulo-handler</artifactId>
+      <version>${project.version}</version>
+      <classifier>tests</classifier>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-common</artifactId>
       <version>${project.version}</version>

Modified: hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java (original)
+++ hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java Sun Aug 24 03:43:48 2014
@@ -21,6 +21,7 @@ package org.apache.hive.jdbc;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
 import java.sql.SQLWarning;
 import java.util.HashMap;
 import java.util.Map;
@@ -435,7 +436,7 @@ public class HiveStatement implements ja
 
   @Override
   public ResultSet getGeneratedKeys() throws SQLException {
-    throw new SQLException("Method not supported");
+    throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -469,7 +470,7 @@ public class HiveStatement implements ja
 
   @Override
   public boolean getMoreResults() throws SQLException {
-    throw new SQLException("Method not supported");
+    return false;
   }
 
   /*
@@ -480,7 +481,7 @@ public class HiveStatement implements ja
 
   @Override
   public boolean getMoreResults(int current) throws SQLException {
-    throw new SQLException("Method not supported");
+    throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -550,7 +551,7 @@ public class HiveStatement implements ja
   @Override
   public int getUpdateCount() throws SQLException {
     checkConnection("getUpdateCount");
-    return 0;
+    return -1;
   }
 
   /*
@@ -600,7 +601,7 @@ public class HiveStatement implements ja
 
   @Override
   public void setCursorName(String name) throws SQLException {
-    throw new SQLException("Method not supported");
+    throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -611,7 +612,9 @@ public class HiveStatement implements ja
 
   @Override
   public void setEscapeProcessing(boolean enable) throws SQLException {
-    throw new SQLException("Method not supported");
+    if (enable) {
+      throw new SQLException("Method not supported");
+    }
   }
 
   /*

Modified: hive/branches/spark/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/if/hive_metastore.thrift?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/if/hive_metastore.thrift (original)
+++ hive/branches/spark/metastore/if/hive_metastore.thrift Sun Aug 24 03:43:48 2014
@@ -366,6 +366,10 @@ struct AggrStats {
 2: required i64 partsFound // number of partitions for which stats were found
 }
 
+struct SetPartitionsStatsRequest {
+1: required list<ColumnStatistics> colStats
+}
+
 // schema of the table/query results etc.
 struct Schema {
  // column names, types, comments
@@ -960,6 +964,8 @@ service ThriftHiveMetastore extends fb30
               (1:NoSuchObjectException o1, 2:MetaException o2)
   AggrStats get_aggr_stats_for(1:PartitionsStatsRequest request) throws
               (1:NoSuchObjectException o1, 2:MetaException o2)
+  bool set_aggr_stats_for(1:SetPartitionsStatsRequest request) throws
+              (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
 
 
   // delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name]

Modified: hive/branches/spark/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql Sun Aug 24 03:43:48 2014
@@ -114,6 +114,8 @@ CREATE INDEX "APP"."TABLECOLUMNPRIVILEGE
 
 CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
 
+CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+
 CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
 
 CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");

Modified: hive/branches/spark/metastore/scripts/upgrade/derby/upgrade-0.13.0-to-0.14.0.derby.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/derby/upgrade-0.13.0-to-0.14.0.derby.sql?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/derby/upgrade-0.13.0-to-0.14.0.derby.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/derby/upgrade-0.13.0-to-0.14.0.derby.sql Sun Aug 24 03:43:48 2014
@@ -1,3 +1,5 @@
 -- Upgrade MetaStore schema from 0.13.0 to 0.14.0
 
+RUN '019-HIVE-7784.derby.sql';
+
 UPDATE "APP".VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1;

Modified: hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade.order.mssql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade.order.mssql?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade.order.mssql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade.order.mssql Sun Aug 24 03:43:48 2014
@@ -1,2 +1,3 @@
 0.11.0-to-0.12.0
 0.12.0-to-0.13.0
+0.13.0-to-0.14.0

Modified: hive/branches/spark/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql Sun Aug 24 03:43:48 2014
@@ -704,6 +704,8 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STA
   CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
+
 --
 -- Table structure for table `TYPES`
 --

Modified: hive/branches/spark/metastore/scripts/upgrade/mysql/upgrade-0.13.0-to-0.14.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mysql/upgrade-0.13.0-to-0.14.0.mysql.sql?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mysql/upgrade-0.13.0-to-0.14.0.mysql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mysql/upgrade-0.13.0-to-0.14.0.mysql.sql Sun Aug 24 03:43:48 2014
@@ -1,4 +1,7 @@
 SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS ' ';
 
+SOURCE 019-HIVE-7784.mysql.sql;
+
 UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0' AS ' ';
+

Modified: hive/branches/spark/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql Sun Aug 24 03:43:48 2014
@@ -527,6 +527,8 @@ ALTER TABLE PART_COL_STATS ADD CONSTRAIN
 
 CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
 
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+
 CREATE TABLE FUNCS (
   FUNC_ID NUMBER NOT NULL,
   CLASS_NAME VARCHAR2(4000),

Modified: hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql Sun Aug 24 03:43:48 2014
@@ -1,4 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS Status from dual;
 
+@020-HIVE-7784.oracle.sql;
+
 UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0' AS Status from dual;

Modified: hive/branches/spark/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql Sun Aug 24 03:43:48 2014
@@ -1059,6 +1059,13 @@ CREATE INDEX "PART_PRIVS_N49" ON "PART_P
 
 
 --
+-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+
+
+--
 -- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
 --
 

Modified: hive/branches/spark/metastore/scripts/upgrade/postgres/upgrade-0.13.0-to-0.14.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/postgres/upgrade-0.13.0-to-0.14.0.postgres.sql?rev=1620103&r1=1620102&r2=1620103&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/postgres/upgrade-0.13.0-to-0.14.0.postgres.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/postgres/upgrade-0.13.0-to-0.14.0.postgres.sql Sun Aug 24 03:43:48 2014
@@ -1,6 +1,13 @@
 SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0';
 
+\i 019-HIVE-7784.postgres.sql;
+
 UPDATE "VERSION" SET "SCHEMA_VERSION"='0.14.0', "VERSION_COMMENT"='Hive release version 0.14.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0';
 
 
+--
+-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+