You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2013/09/12 03:21:29 UTC

svn commit: r1522098 [11/30] - in /hive/branches/vectorization: ./ beeline/src/test/org/apache/hive/beeline/src/test/ bin/ bin/ext/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ conf/ contrib/src/java/org/a...

Modified: hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManager.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManager.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManager.java (original)
+++ hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManager.java Thu Sep 12 01:21:10 2013
@@ -36,225 +36,225 @@ import org.junit.Test;
 
 public class TestRevisionManager extends SkeletonHBaseTest {
 
-    @Test
-    public void testBasicZNodeCreation() throws IOException, KeeperException, InterruptedException {
+  @Test
+  public void testBasicZNodeCreation() throws IOException, KeeperException, InterruptedException {
 
-        int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
-        String servers = getHbaseConf().get("hbase.zookeeper.quorum");
-        String[] splits = servers.split(",");
-        StringBuffer sb = new StringBuffer();
-        for (String split : splits) {
-            sb.append(split);
-            sb.append(':');
-            sb.append(port);
-        }
-
-        ZKUtil zkutil = new ZKUtil(sb.toString(), "/rm_base");
-        String tableName = newTableName("testTable");
-        List<String> columnFamilies = Arrays.asList("cf001", "cf002", "cf003");
-
-        zkutil.createRootZNodes();
-        ZooKeeper zk = zkutil.getSession();
-        Stat tempTwo = zk.exists("/rm_base" + PathUtil.DATA_DIR, false);
-        assertTrue(tempTwo != null);
-        Stat tempThree = zk.exists("/rm_base" + PathUtil.CLOCK_NODE, false);
-        assertTrue(tempThree != null);
-
-        zkutil.setUpZnodesForTable(tableName, columnFamilies);
-        String transactionDataTablePath = "/rm_base" + PathUtil.DATA_DIR + "/" + tableName;
-        Stat result = zk.exists(transactionDataTablePath, false);
-        assertTrue(result != null);
-
-        for (String colFamiliy : columnFamilies) {
-            String cfPath = transactionDataTablePath + "/" + colFamiliy;
-            Stat resultTwo = zk.exists(cfPath, false);
-            assertTrue(resultTwo != null);
-        }
-
-    }
-
-    @Test
-    public void testCommitTransaction() throws IOException {
-
-        int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
-        String servers = getHbaseConf().get("hbase.zookeeper.quorum");
-        String[] splits = servers.split(",");
-        StringBuffer sb = new StringBuffer();
-        for (String split : splits) {
-            sb.append(split);
-            sb.append(':');
-            sb.append(port);
-        }
-
-        Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
-        conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
-        ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
-        manager.initialize(conf);
-        manager.open();
-        ZKUtil zkutil = new ZKUtil(sb.toString(), "/rm_base");
-
-        String tableName = newTableName("testTable");
-        List<String> columnFamilies = Arrays.asList("cf1", "cf2", "cf3");
-        Transaction txn = manager.beginWriteTransaction(tableName,
-            columnFamilies);
-
-        List<String> cfs = zkutil.getColumnFamiliesOfTable(tableName);
-        assertTrue(cfs.size() == columnFamilies.size());
-        for (String cf : cfs) {
-            assertTrue(columnFamilies.contains(cf));
-        }
-
-        for (String colFamily : columnFamilies) {
-            String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamily);
-            byte[] data = zkutil.getRawData(path, null);
-            StoreFamilyRevisionList list = new StoreFamilyRevisionList();
-            ZKUtil.deserialize(list, data);
-            assertEquals(list.getRevisionListSize(), 1);
-            StoreFamilyRevision lightTxn = list.getRevisionList().get(0);
-            assertEquals(lightTxn.timestamp, txn.getTransactionExpireTimeStamp());
-            assertEquals(lightTxn.revision, txn.getRevisionNumber());
-
-        }
-        manager.commitWriteTransaction(txn);
-        for (String colFamiliy : columnFamilies) {
-            String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamiliy);
-            byte[] data = zkutil.getRawData(path, null);
-            StoreFamilyRevisionList list = new StoreFamilyRevisionList();
-            ZKUtil.deserialize(list, data);
-            assertEquals(list.getRevisionListSize(), 0);
-
-        }
-
-        manager.close();
-    }
-
-    @Test
-    public void testAbortTransaction() throws IOException {
-
-        int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
-        String host = getHbaseConf().get("hbase.zookeeper.quorum");
-        Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
-        conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
-        ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
-        manager.initialize(conf);
-        manager.open();
-        ZKUtil zkutil = new ZKUtil(host + ':' + port, "/rm_base");
-
-        String tableName = newTableName("testTable");
-        List<String> columnFamilies = Arrays.asList("cf1", "cf2", "cf3");
-        Transaction txn = manager.beginWriteTransaction(tableName, columnFamilies);
-        List<String> cfs = zkutil.getColumnFamiliesOfTable(tableName);
-
-        assertTrue(cfs.size() == columnFamilies.size());
-        for (String cf : cfs) {
-            assertTrue(columnFamilies.contains(cf));
-        }
-
-        for (String colFamiliy : columnFamilies) {
-            String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamiliy);
-            byte[] data = zkutil.getRawData(path, null);
-            StoreFamilyRevisionList list = new StoreFamilyRevisionList();
-            ZKUtil.deserialize(list, data);
-            assertEquals(list.getRevisionListSize(), 1);
-            StoreFamilyRevision lightTxn = list.getRevisionList().get(0);
-            assertEquals(lightTxn.timestamp, txn.getTransactionExpireTimeStamp());
-            assertEquals(lightTxn.revision, txn.getRevisionNumber());
-
-        }
-        manager.abortWriteTransaction(txn);
-        for (String colFamiliy : columnFamilies) {
-            String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamiliy);
-            byte[] data = zkutil.getRawData(path, null);
-            StoreFamilyRevisionList list = new StoreFamilyRevisionList();
-            ZKUtil.deserialize(list, data);
-            assertEquals(list.getRevisionListSize(), 0);
-
-        }
-
-        for (String colFamiliy : columnFamilies) {
-            String path = PathUtil.getAbortInformationPath("/rm_base", tableName, colFamiliy);
-            byte[] data = zkutil.getRawData(path, null);
-            StoreFamilyRevisionList list = new StoreFamilyRevisionList();
-            ZKUtil.deserialize(list, data);
-            assertEquals(list.getRevisionListSize(), 1);
-            StoreFamilyRevision abortedTxn = list.getRevisionList().get(0);
-            assertEquals(abortedTxn.getRevision(), txn.getRevisionNumber());
-        }
-        manager.close();
-    }
-
-    @Test
-    public void testKeepAliveTransaction() throws InterruptedException, IOException {
-
-        int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
-        String servers = getHbaseConf().get("hbase.zookeeper.quorum");
-        String[] splits = servers.split(",");
-        StringBuffer sb = new StringBuffer();
-        for (String split : splits) {
-            sb.append(split);
-            sb.append(':');
-            sb.append(port);
-        }
-
-        Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
-        conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
-        ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
-        manager.initialize(conf);
-        manager.open();
-        String tableName = newTableName("testTable");
-        List<String> columnFamilies = Arrays.asList("cf1", "cf2");
-        Transaction txn = manager.beginWriteTransaction(tableName,
-            columnFamilies, 40);
-        Thread.sleep(100);
-        try {
-            manager.commitWriteTransaction(txn);
-        } catch (Exception e) {
-            assertTrue(e instanceof IOException);
-            assertEquals(e.getMessage(),
-                "The transaction to be removed not found in the data.");
-        }
-
-    }
-
-    @Test
-    public void testCreateSnapshot() throws IOException {
-        int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
-        String host = getHbaseConf().get("hbase.zookeeper.quorum");
-        Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
-        conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
-        ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
-        manager.initialize(conf);
-        manager.open();
-        String tableName = newTableName("testTable");
-        List<String> cfOne = Arrays.asList("cf1", "cf2");
-        List<String> cfTwo = Arrays.asList("cf2", "cf3");
-        Transaction tsx1 = manager.beginWriteTransaction(tableName, cfOne);
-        Transaction tsx2 = manager.beginWriteTransaction(tableName, cfTwo);
-        TableSnapshot snapshotOne = manager.createSnapshot(tableName);
-        assertEquals(snapshotOne.getRevision("cf1"), 0);
-        assertEquals(snapshotOne.getRevision("cf2"), 0);
-        assertEquals(snapshotOne.getRevision("cf3"), 1);
-
-        List<String> cfThree = Arrays.asList("cf1", "cf3");
-        Transaction tsx3 = manager.beginWriteTransaction(tableName, cfThree);
-        manager.commitWriteTransaction(tsx1);
-        TableSnapshot snapshotTwo = manager.createSnapshot(tableName);
-        assertEquals(snapshotTwo.getRevision("cf1"), 2);
-        assertEquals(snapshotTwo.getRevision("cf2"), 1);
-        assertEquals(snapshotTwo.getRevision("cf3"), 1);
-
-        manager.commitWriteTransaction(tsx2);
-        TableSnapshot snapshotThree = manager.createSnapshot(tableName);
-        assertEquals(snapshotThree.getRevision("cf1"), 2);
-        assertEquals(snapshotThree.getRevision("cf2"), 3);
-        assertEquals(snapshotThree.getRevision("cf3"), 2);
-        manager.commitWriteTransaction(tsx3);
-        TableSnapshot snapshotFour = manager.createSnapshot(tableName);
-        assertEquals(snapshotFour.getRevision("cf1"), 3);
-        assertEquals(snapshotFour.getRevision("cf2"), 3);
-        assertEquals(snapshotFour.getRevision("cf3"), 3);
+    int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
+    String servers = getHbaseConf().get("hbase.zookeeper.quorum");
+    String[] splits = servers.split(",");
+    StringBuffer sb = new StringBuffer();
+    for (String split : splits) {
+      sb.append(split);
+      sb.append(':');
+      sb.append(port);
+    }
+
+    ZKUtil zkutil = new ZKUtil(sb.toString(), "/rm_base");
+    String tableName = newTableName("testTable");
+    List<String> columnFamilies = Arrays.asList("cf001", "cf002", "cf003");
+
+    zkutil.createRootZNodes();
+    ZooKeeper zk = zkutil.getSession();
+    Stat tempTwo = zk.exists("/rm_base" + PathUtil.DATA_DIR, false);
+    assertTrue(tempTwo != null);
+    Stat tempThree = zk.exists("/rm_base" + PathUtil.CLOCK_NODE, false);
+    assertTrue(tempThree != null);
+
+    zkutil.setUpZnodesForTable(tableName, columnFamilies);
+    String transactionDataTablePath = "/rm_base" + PathUtil.DATA_DIR + "/" + tableName;
+    Stat result = zk.exists(transactionDataTablePath, false);
+    assertTrue(result != null);
+
+    for (String colFamiliy : columnFamilies) {
+      String cfPath = transactionDataTablePath + "/" + colFamiliy;
+      Stat resultTwo = zk.exists(cfPath, false);
+      assertTrue(resultTwo != null);
+    }
+
+  }
+
+  @Test
+  public void testCommitTransaction() throws IOException {
+
+    int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
+    String servers = getHbaseConf().get("hbase.zookeeper.quorum");
+    String[] splits = servers.split(",");
+    StringBuffer sb = new StringBuffer();
+    for (String split : splits) {
+      sb.append(split);
+      sb.append(':');
+      sb.append(port);
+    }
+
+    Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
+    conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
+    ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
+    manager.initialize(conf);
+    manager.open();
+    ZKUtil zkutil = new ZKUtil(sb.toString(), "/rm_base");
+
+    String tableName = newTableName("testTable");
+    List<String> columnFamilies = Arrays.asList("cf1", "cf2", "cf3");
+    Transaction txn = manager.beginWriteTransaction(tableName,
+      columnFamilies);
+
+    List<String> cfs = zkutil.getColumnFamiliesOfTable(tableName);
+    assertTrue(cfs.size() == columnFamilies.size());
+    for (String cf : cfs) {
+      assertTrue(columnFamilies.contains(cf));
+    }
+
+    for (String colFamily : columnFamilies) {
+      String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamily);
+      byte[] data = zkutil.getRawData(path, null);
+      StoreFamilyRevisionList list = new StoreFamilyRevisionList();
+      ZKUtil.deserialize(list, data);
+      assertEquals(list.getRevisionListSize(), 1);
+      StoreFamilyRevision lightTxn = list.getRevisionList().get(0);
+      assertEquals(lightTxn.timestamp, txn.getTransactionExpireTimeStamp());
+      assertEquals(lightTxn.revision, txn.getRevisionNumber());
+
+    }
+    manager.commitWriteTransaction(txn);
+    for (String colFamiliy : columnFamilies) {
+      String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamiliy);
+      byte[] data = zkutil.getRawData(path, null);
+      StoreFamilyRevisionList list = new StoreFamilyRevisionList();
+      ZKUtil.deserialize(list, data);
+      assertEquals(list.getRevisionListSize(), 0);
+
+    }
+
+    manager.close();
+  }
+
+  @Test
+  public void testAbortTransaction() throws IOException {
 
+    int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
+    String host = getHbaseConf().get("hbase.zookeeper.quorum");
+    Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
+    conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
+    ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
+    manager.initialize(conf);
+    manager.open();
+    ZKUtil zkutil = new ZKUtil(host + ':' + port, "/rm_base");
+
+    String tableName = newTableName("testTable");
+    List<String> columnFamilies = Arrays.asList("cf1", "cf2", "cf3");
+    Transaction txn = manager.beginWriteTransaction(tableName, columnFamilies);
+    List<String> cfs = zkutil.getColumnFamiliesOfTable(tableName);
+
+    assertTrue(cfs.size() == columnFamilies.size());
+    for (String cf : cfs) {
+      assertTrue(columnFamilies.contains(cf));
     }
 
+    for (String colFamiliy : columnFamilies) {
+      String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamiliy);
+      byte[] data = zkutil.getRawData(path, null);
+      StoreFamilyRevisionList list = new StoreFamilyRevisionList();
+      ZKUtil.deserialize(list, data);
+      assertEquals(list.getRevisionListSize(), 1);
+      StoreFamilyRevision lightTxn = list.getRevisionList().get(0);
+      assertEquals(lightTxn.timestamp, txn.getTransactionExpireTimeStamp());
+      assertEquals(lightTxn.revision, txn.getRevisionNumber());
+
+    }
+    manager.abortWriteTransaction(txn);
+    for (String colFamiliy : columnFamilies) {
+      String path = PathUtil.getRunningTxnInfoPath("/rm_base", tableName, colFamiliy);
+      byte[] data = zkutil.getRawData(path, null);
+      StoreFamilyRevisionList list = new StoreFamilyRevisionList();
+      ZKUtil.deserialize(list, data);
+      assertEquals(list.getRevisionListSize(), 0);
+
+    }
+
+    for (String colFamiliy : columnFamilies) {
+      String path = PathUtil.getAbortInformationPath("/rm_base", tableName, colFamiliy);
+      byte[] data = zkutil.getRawData(path, null);
+      StoreFamilyRevisionList list = new StoreFamilyRevisionList();
+      ZKUtil.deserialize(list, data);
+      assertEquals(list.getRevisionListSize(), 1);
+      StoreFamilyRevision abortedTxn = list.getRevisionList().get(0);
+      assertEquals(abortedTxn.getRevision(), txn.getRevisionNumber());
+    }
+    manager.close();
+  }
+
+  @Test
+  public void testKeepAliveTransaction() throws InterruptedException, IOException {
+
+    int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
+    String servers = getHbaseConf().get("hbase.zookeeper.quorum");
+    String[] splits = servers.split(",");
+    StringBuffer sb = new StringBuffer();
+    for (String split : splits) {
+      sb.append(split);
+      sb.append(':');
+      sb.append(port);
+    }
+
+    Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
+    conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
+    ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
+    manager.initialize(conf);
+    manager.open();
+    String tableName = newTableName("testTable");
+    List<String> columnFamilies = Arrays.asList("cf1", "cf2");
+    Transaction txn = manager.beginWriteTransaction(tableName,
+      columnFamilies, 40);
+    Thread.sleep(100);
+    try {
+      manager.commitWriteTransaction(txn);
+    } catch (Exception e) {
+      assertTrue(e instanceof IOException);
+      assertEquals(e.getMessage(),
+        "The transaction to be removed not found in the data.");
+    }
+
+  }
+
+  @Test
+  public void testCreateSnapshot() throws IOException {
+    int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
+    String host = getHbaseConf().get("hbase.zookeeper.quorum");
+    Configuration conf = RevisionManagerConfiguration.create(getHbaseConf());
+    conf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
+    ZKBasedRevisionManager manager = new ZKBasedRevisionManager();
+    manager.initialize(conf);
+    manager.open();
+    String tableName = newTableName("testTable");
+    List<String> cfOne = Arrays.asList("cf1", "cf2");
+    List<String> cfTwo = Arrays.asList("cf2", "cf3");
+    Transaction tsx1 = manager.beginWriteTransaction(tableName, cfOne);
+    Transaction tsx2 = manager.beginWriteTransaction(tableName, cfTwo);
+    TableSnapshot snapshotOne = manager.createSnapshot(tableName);
+    assertEquals(snapshotOne.getRevision("cf1"), 0);
+    assertEquals(snapshotOne.getRevision("cf2"), 0);
+    assertEquals(snapshotOne.getRevision("cf3"), 1);
+
+    List<String> cfThree = Arrays.asList("cf1", "cf3");
+    Transaction tsx3 = manager.beginWriteTransaction(tableName, cfThree);
+    manager.commitWriteTransaction(tsx1);
+    TableSnapshot snapshotTwo = manager.createSnapshot(tableName);
+    assertEquals(snapshotTwo.getRevision("cf1"), 2);
+    assertEquals(snapshotTwo.getRevision("cf2"), 1);
+    assertEquals(snapshotTwo.getRevision("cf3"), 1);
+
+    manager.commitWriteTransaction(tsx2);
+    TableSnapshot snapshotThree = manager.createSnapshot(tableName);
+    assertEquals(snapshotThree.getRevision("cf1"), 2);
+    assertEquals(snapshotThree.getRevision("cf2"), 3);
+    assertEquals(snapshotThree.getRevision("cf3"), 2);
+    manager.commitWriteTransaction(tsx3);
+    TableSnapshot snapshotFour = manager.createSnapshot(tableName);
+    assertEquals(snapshotFour.getRevision("cf1"), 3);
+    assertEquals(snapshotFour.getRevision("cf2"), 3);
+    assertEquals(snapshotFour.getRevision("cf3"), 3);
+
+  }
+
 
 }

Modified: hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerConfiguration.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerConfiguration.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerConfiguration.java (original)
+++ hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerConfiguration.java Thu Sep 12 01:21:10 2013
@@ -25,10 +25,10 @@ import org.junit.Test;
 
 public class TestRevisionManagerConfiguration {
 
-    @Test
-    public void testDefault() {
-        Configuration conf = RevisionManagerConfiguration.create();
-        Assert.assertEquals("org.apache.hcatalog.hbase.snapshot.ZKBasedRevisionManager",
-            conf.get(RevisionManagerFactory.REVISION_MGR_IMPL_CLASS));
-    }
+  @Test
+  public void testDefault() {
+    Configuration conf = RevisionManagerConfiguration.create();
+    Assert.assertEquals("org.apache.hcatalog.hbase.snapshot.ZKBasedRevisionManager",
+      conf.get(RevisionManagerFactory.REVISION_MGR_IMPL_CLASS));
+  }
 }

Modified: hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerEndpoint.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerEndpoint.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerEndpoint.java (original)
+++ hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestRevisionManagerEndpoint.java Thu Sep 12 01:21:10 2013
@@ -35,172 +35,172 @@ import org.junit.Test;
 
 public class TestRevisionManagerEndpoint extends SkeletonHBaseTest {
 
-    static {
-        // test case specific mini cluster settings
-        testConf = new Configuration(false);
-        testConf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
-            "org.apache.hcatalog.hbase.snapshot.RevisionManagerEndpoint",
-            "org.apache.hadoop.hbase.coprocessor.GenericEndpoint");
-        testConf.set(RMConstants.REVISION_MGR_ENDPOINT_IMPL_CLASS, MockRM.class.getName());
-    }
-
-    /**
-     * Mock implementation to test the protocol/serialization
-     */
-    public static class MockRM implements RevisionManager {
-
-        private static class Invocation {
-            Invocation(String methodName, Object ret, Object... args) {
-                this.methodName = methodName;
-                this.args = args;
-                this.ret = ret;
-            }
-
-            String methodName;
-            Object[] args;
-            Object ret;
-
-            private static boolean equals(Object obj1, Object obj2) {
-                if (obj1 == obj2) return true;
-                if (obj1 == null || obj2 == null) return false;
-                if (obj1 instanceof Transaction || obj1 instanceof TableSnapshot) {
-                    return obj1.toString().equals(obj2.toString());
-                }
-                return obj1.equals(obj2);
-            }
-
-            @Override
-            public boolean equals(Object obj) {
-                Invocation other = (Invocation) obj;
-                if (this == other) return true;
-                if (other == null) return false;
-                if (this.args != other.args) {
-                    if (this.args == null || other.args == null) return false;
-                    if (this.args.length != other.args.length) return false;
-                    for (int i = 0; i < args.length; i++) {
-                        if (!equals(this.args[i], other.args[i])) return false;
-                    }
-                }
-                return equals(this.ret, other.ret);
-            }
-
-            @Override
-            public String toString() {
-                return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).
-                    append("method", this.methodName).
-                    append("args", this.args).
-                    append("returns", this.ret).
-                    toString();
-            }
-        }
+  static {
+    // test case specific mini cluster settings
+    testConf = new Configuration(false);
+    testConf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+      "org.apache.hcatalog.hbase.snapshot.RevisionManagerEndpoint",
+      "org.apache.hadoop.hbase.coprocessor.GenericEndpoint");
+    testConf.set(RMConstants.REVISION_MGR_ENDPOINT_IMPL_CLASS, MockRM.class.getName());
+  }
+
+  /**
+   * Mock implementation to test the protocol/serialization
+   */
+  public static class MockRM implements RevisionManager {
+
+    private static class Invocation {
+      Invocation(String methodName, Object ret, Object... args) {
+        this.methodName = methodName;
+        this.args = args;
+        this.ret = ret;
+      }
+
+      String methodName;
+      Object[] args;
+      Object ret;
+
+      private static boolean equals(Object obj1, Object obj2) {
+        if (obj1 == obj2) return true;
+        if (obj1 == null || obj2 == null) return false;
+        if (obj1 instanceof Transaction || obj1 instanceof TableSnapshot) {
+          return obj1.toString().equals(obj2.toString());
+        }
+        return obj1.equals(obj2);
+      }
+
+      @Override
+      public boolean equals(Object obj) {
+        Invocation other = (Invocation) obj;
+        if (this == other) return true;
+        if (other == null) return false;
+        if (this.args != other.args) {
+          if (this.args == null || other.args == null) return false;
+          if (this.args.length != other.args.length) return false;
+          for (int i = 0; i < args.length; i++) {
+            if (!equals(this.args[i], other.args[i])) return false;
+          }
+        }
+        return equals(this.ret, other.ret);
+      }
+
+      @Override
+      public String toString() {
+        return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).
+          append("method", this.methodName).
+          append("args", this.args).
+          append("returns", this.ret).
+          toString();
+      }
+    }
 
-        final static String DEFAULT_INSTANCE = "default";
-        final static Map<String, MockRM> INSTANCES = new ConcurrentHashMap<String, MockRM>();
-        Invocation lastCall;
-        boolean isOpen = false;
-
-        private <T extends Object> T recordCall(T result, Object... args) {
-            StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
-            lastCall = new Invocation(stackTrace[2].getMethodName(), result, args);
-            return result;
-        }
+    final static String DEFAULT_INSTANCE = "default";
+    final static Map<String, MockRM> INSTANCES = new ConcurrentHashMap<String, MockRM>();
+    Invocation lastCall;
+    boolean isOpen = false;
+
+    private <T extends Object> T recordCall(T result, Object... args) {
+      StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
+      lastCall = new Invocation(stackTrace[2].getMethodName(), result, args);
+      return result;
+    }
 
-        @Override
-        public void initialize(Configuration conf) {
-            if (!INSTANCES.containsKey(DEFAULT_INSTANCE))
-                INSTANCES.put(DEFAULT_INSTANCE, this);
-        }
+    @Override
+    public void initialize(Configuration conf) {
+      if (!INSTANCES.containsKey(DEFAULT_INSTANCE))
+        INSTANCES.put(DEFAULT_INSTANCE, this);
+    }
 
-        @Override
-        public void open() throws IOException {
-            isOpen = true;
-        }
+    @Override
+    public void open() throws IOException {
+      isOpen = true;
+    }
 
-        @Override
-        public void close() throws IOException {
-            isOpen = false;
-        }
+    @Override
+    public void close() throws IOException {
+      isOpen = false;
+    }
 
-        @Override
-        public void createTable(String table, List<String> columnFamilies) throws IOException {
-        }
+    @Override
+    public void createTable(String table, List<String> columnFamilies) throws IOException {
+    }
 
-        @Override
-        public void dropTable(String table) throws IOException {
-        }
+    @Override
+    public void dropTable(String table) throws IOException {
+    }
 
-        @Override
-        public Transaction beginWriteTransaction(String table,
-                                                 List<String> families) throws IOException {
-            return recordCall(null, table, families);
-        }
+    @Override
+    public Transaction beginWriteTransaction(String table,
+                         List<String> families) throws IOException {
+      return recordCall(null, table, families);
+    }
 
-        @Override
-        public Transaction beginWriteTransaction(String table,
-                                                 List<String> families, long keepAlive) throws IOException {
-            return recordCall(null, table, families, keepAlive);
-        }
+    @Override
+    public Transaction beginWriteTransaction(String table,
+                         List<String> families, long keepAlive) throws IOException {
+      return recordCall(null, table, families, keepAlive);
+    }
 
-        @Override
-        public void commitWriteTransaction(Transaction transaction)
-            throws IOException {
-        }
+    @Override
+    public void commitWriteTransaction(Transaction transaction)
+      throws IOException {
+    }
 
-        @Override
-        public void abortWriteTransaction(Transaction transaction)
-            throws IOException {
-        }
+    @Override
+    public void abortWriteTransaction(Transaction transaction)
+      throws IOException {
+    }
 
-        @Override
-        public List<FamilyRevision> getAbortedWriteTransactions(String table,
-                                                                String columnFamily) throws IOException {
-            return null;
-        }
+    @Override
+    public List<FamilyRevision> getAbortedWriteTransactions(String table,
+                                String columnFamily) throws IOException {
+      return null;
+    }
 
-        @Override
-        public TableSnapshot createSnapshot(String tableName)
-            throws IOException {
-            return null;
-        }
+    @Override
+    public TableSnapshot createSnapshot(String tableName)
+      throws IOException {
+      return null;
+    }
 
-        @Override
-        public TableSnapshot createSnapshot(String tableName, long revision)
-            throws IOException {
-            TableSnapshot ret = new TableSnapshot(tableName, new HashMap<String, Long>(), revision);
-            return recordCall(ret, tableName, revision);
-        }
+    @Override
+    public TableSnapshot createSnapshot(String tableName, long revision)
+      throws IOException {
+      TableSnapshot ret = new TableSnapshot(tableName, new HashMap<String, Long>(), revision);
+      return recordCall(ret, tableName, revision);
+    }
 
-        @Override
-        public void keepAlive(Transaction transaction) throws IOException {
-            recordCall(null, transaction);
-        }
+    @Override
+    public void keepAlive(Transaction transaction) throws IOException {
+      recordCall(null, transaction);
     }
+  }
 
-    @Test
-    public void testRevisionManagerProtocol() throws Throwable {
+  @Test
+  public void testRevisionManagerProtocol() throws Throwable {
 
-        Configuration conf = getHbaseConf();
-        RevisionManager rm = RevisionManagerFactory.getOpenedRevisionManager(
-            RevisionManagerEndpointClient.class.getName(), conf);
-
-        MockRM mockImpl = MockRM.INSTANCES.get(MockRM.DEFAULT_INSTANCE);
-        Assert.assertNotNull(mockImpl);
-        Assert.assertTrue(mockImpl.isOpen);
-
-        Transaction t = new Transaction("t1", Arrays.asList("f1", "f2"), 0, 0);
-        MockRM.Invocation call = new MockRM.Invocation("keepAlive", null, t);
-        rm.keepAlive(t);
-        Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
-
-        t = new Transaction("t2", Arrays.asList("f21", "f22"), 0, 0);
-        call = new MockRM.Invocation("beginWriteTransaction", null, t.getTableName(), t.getColumnFamilies());
-        call.ret = rm.beginWriteTransaction(t.getTableName(), t.getColumnFamilies());
-        Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
-
-        call = new MockRM.Invocation("createSnapshot", null, "t3", 1L);
-        call.ret = rm.createSnapshot("t3", 1);
-        Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
+    Configuration conf = getHbaseConf();
+    RevisionManager rm = RevisionManagerFactory.getOpenedRevisionManager(
+      RevisionManagerEndpointClient.class.getName(), conf);
+
+    MockRM mockImpl = MockRM.INSTANCES.get(MockRM.DEFAULT_INSTANCE);
+    Assert.assertNotNull(mockImpl);
+    Assert.assertTrue(mockImpl.isOpen);
+
+    Transaction t = new Transaction("t1", Arrays.asList("f1", "f2"), 0, 0);
+    MockRM.Invocation call = new MockRM.Invocation("keepAlive", null, t);
+    rm.keepAlive(t);
+    Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
+
+    t = new Transaction("t2", Arrays.asList("f21", "f22"), 0, 0);
+    call = new MockRM.Invocation("beginWriteTransaction", null, t.getTableName(), t.getColumnFamilies());
+    call.ret = rm.beginWriteTransaction(t.getTableName(), t.getColumnFamilies());
+    Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
+
+    call = new MockRM.Invocation("createSnapshot", null, "t3", 1L);
+    call.ret = rm.createSnapshot("t3", 1);
+    Assert.assertEquals(call.methodName, call, mockImpl.lastCall);
 
-    }
+  }
 
 }

Modified: hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestThriftSerialization.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestThriftSerialization.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestThriftSerialization.java (original)
+++ hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestThriftSerialization.java Thu Sep 12 01:21:10 2013
@@ -31,55 +31,55 @@ import org.junit.Test;
 
 public class TestThriftSerialization {
 
-    @Test
-    public void testLightWeightTransaction() {
-        StoreFamilyRevision trxn = new StoreFamilyRevision(0, 1000);
-        try {
-
-            byte[] data = ZKUtil.serialize(trxn);
-            StoreFamilyRevision newWtx = new StoreFamilyRevision();
-            ZKUtil.deserialize(newWtx, data);
-
-            assertTrue(newWtx.getRevision() == trxn.getRevision());
-            assertTrue(newWtx.getTimestamp() == trxn.getTimestamp());
-
-        } catch (IOException e) {
-            e.printStackTrace();
-        }
+  @Test
+  public void testLightWeightTransaction() {
+    StoreFamilyRevision trxn = new StoreFamilyRevision(0, 1000);
+    try {
+
+      byte[] data = ZKUtil.serialize(trxn);
+      StoreFamilyRevision newWtx = new StoreFamilyRevision();
+      ZKUtil.deserialize(newWtx, data);
+
+      assertTrue(newWtx.getRevision() == trxn.getRevision());
+      assertTrue(newWtx.getTimestamp() == trxn.getTimestamp());
+
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Test
+  public void testWriteTransactionList() {
+    List<StoreFamilyRevision> txnList = new ArrayList<StoreFamilyRevision>();
+    long version;
+    long timestamp;
+    for (int i = 0; i < 10; i++) {
+      version = i;
+      timestamp = 1000 + i;
+      StoreFamilyRevision wtx = new StoreFamilyRevision(version, timestamp);
+      txnList.add(wtx);
     }
 
-    @Test
-    public void testWriteTransactionList() {
-        List<StoreFamilyRevision> txnList = new ArrayList<StoreFamilyRevision>();
-        long version;
-        long timestamp;
-        for (int i = 0; i < 10; i++) {
-            version = i;
-            timestamp = 1000 + i;
-            StoreFamilyRevision wtx = new StoreFamilyRevision(version, timestamp);
-            txnList.add(wtx);
-        }
-
-        StoreFamilyRevisionList wList = new StoreFamilyRevisionList(txnList);
-
-        try {
-            byte[] data = ZKUtil.serialize(wList);
-            StoreFamilyRevisionList newList = new StoreFamilyRevisionList();
-            ZKUtil.deserialize(newList, data);
-            assertTrue(newList.getRevisionListSize() == wList.getRevisionListSize());
-
-            Iterator<StoreFamilyRevision> itr = newList.getRevisionListIterator();
-            int i = 0;
-            while (itr.hasNext()) {
-                StoreFamilyRevision txn = itr.next();
-                assertTrue(txn.getRevision() == i);
-                assertTrue(txn.getTimestamp() == (i + 1000));
-                i++;
-            }
-
-        } catch (IOException e) {
-            e.printStackTrace();
-        }
+    StoreFamilyRevisionList wList = new StoreFamilyRevisionList(txnList);
+
+    try {
+      byte[] data = ZKUtil.serialize(wList);
+      StoreFamilyRevisionList newList = new StoreFamilyRevisionList();
+      ZKUtil.deserialize(newList, data);
+      assertTrue(newList.getRevisionListSize() == wList.getRevisionListSize());
+
+      Iterator<StoreFamilyRevision> itr = newList.getRevisionListIterator();
+      int i = 0;
+      while (itr.hasNext()) {
+        StoreFamilyRevision txn = itr.next();
+        assertTrue(txn.getRevision() == i);
+        assertTrue(txn.getTimestamp() == (i + 1000));
+        i++;
+      }
+
+    } catch (IOException e) {
+      e.printStackTrace();
     }
+  }
 
 }

Modified: hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestZNodeSetUp.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestZNodeSetUp.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestZNodeSetUp.java (original)
+++ hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/TestZNodeSetUp.java Thu Sep 12 01:21:10 2013
@@ -33,8 +33,8 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hcatalog.cli.HCatDriver;
-import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
+import org.apache.hive.hcatalog.cli.HCatDriver;
+import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
 import org.apache.hcatalog.hbase.SkeletonHBaseTest;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
@@ -43,78 +43,78 @@ import org.junit.Test;
 
 public class TestZNodeSetUp extends SkeletonHBaseTest {
 
-    private static HiveConf hcatConf;
-    private static HCatDriver hcatDriver;
+  private static HiveConf hcatConf;
+  private static HCatDriver hcatDriver;
 
-    public void Initialize() throws Exception {
-
-        hcatConf = getHiveConf();
-        hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-            HCatSemanticAnalyzer.class.getName());
-        URI fsuri = getFileSystem().getUri();
-        Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(),
-            getTestDir());
-        hcatConf.set(HiveConf.ConfVars.HADOOPFS.varname, fsuri.toString());
-        hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString());
-
-        //Add hbase properties
-
-        for (Map.Entry<String, String> el : getHbaseConf()) {
-            if (el.getKey().startsWith("hbase.")) {
-                hcatConf.set(el.getKey(), el.getValue());
-            }
-        }
-        HBaseConfiguration.merge(hcatConf,
-            RevisionManagerConfiguration.create());
-        hcatConf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
-        SessionState.start(new CliSessionState(hcatConf));
-        hcatDriver = new HCatDriver();
+  public void Initialize() throws Exception {
 
+    hcatConf = getHiveConf();
+    hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
+      HCatSemanticAnalyzer.class.getName());
+    URI fsuri = getFileSystem().getUri();
+    Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(),
+      getTestDir());
+    hcatConf.set(HiveConf.ConfVars.HADOOPFS.varname, fsuri.toString());
+    hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString());
+
+    //Add hbase properties
+
+    for (Map.Entry<String, String> el : getHbaseConf()) {
+      if (el.getKey().startsWith("hbase.")) {
+        hcatConf.set(el.getKey(), el.getValue());
+      }
+    }
+    HBaseConfiguration.merge(hcatConf,
+      RevisionManagerConfiguration.create());
+    hcatConf.set(RMConstants.ZOOKEEPER_DATADIR, "/rm_base");
+    SessionState.start(new CliSessionState(hcatConf));
+    hcatDriver = new HCatDriver();
+
+  }
+
+  @Test
+  public void testBasicZNodeCreation() throws Exception {
+
+    Initialize();
+    int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
+    String servers = getHbaseConf().get("hbase.zookeeper.quorum");
+    String[] splits = servers.split(",");
+    StringBuffer sb = new StringBuffer();
+    for (String split : splits) {
+      sb.append(split);
+      sb.append(':');
+      sb.append(port);
     }
 
-    @Test
-    public void testBasicZNodeCreation() throws Exception {
+    hcatDriver.run("drop table test_table");
+    CommandProcessorResponse response = hcatDriver
+      .run("create table test_table(key int, value string) STORED BY " +
+        "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
+        + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:val')");
 
-        Initialize();
-        int port = getHbaseConf().getInt("hbase.zookeeper.property.clientPort", 2181);
-        String servers = getHbaseConf().get("hbase.zookeeper.quorum");
-        String[] splits = servers.split(",");
-        StringBuffer sb = new StringBuffer();
-        for (String split : splits) {
-            sb.append(split);
-            sb.append(':');
-            sb.append(port);
-        }
-
-        hcatDriver.run("drop table test_table");
-        CommandProcessorResponse response = hcatDriver
-            .run("create table test_table(key int, value string) STORED BY " +
-                "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
-                + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:val')");
-
-        assertEquals(0, response.getResponseCode());
-
-        HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
-        boolean doesTableExist = hAdmin.tableExists("test_table");
-        assertTrue(doesTableExist);
-
-
-        ZKUtil zkutil = new ZKUtil(sb.toString(), "/rm_base");
-        ZooKeeper zk = zkutil.getSession();
-        String tablePath = PathUtil.getTxnDataPath("/rm_base", "test_table");
-        Stat tempTwo = zk.exists(tablePath, false);
-        assertTrue(tempTwo != null);
-
-        String cfPath = PathUtil.getTxnDataPath("/rm_base", "test_table") + "/cf1";
-        Stat tempThree = zk.exists(cfPath, false);
-        assertTrue(tempThree != null);
-
-        hcatDriver.run("drop table test_table");
-
-        System.out.println("Table path : " + tablePath);
-        Stat tempFour = zk.exists(tablePath, false);
-        assertTrue(tempFour == null);
+    assertEquals(0, response.getResponseCode());
 
-    }
+    HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
+    boolean doesTableExist = hAdmin.tableExists("test_table");
+    assertTrue(doesTableExist);
+
+
+    ZKUtil zkutil = new ZKUtil(sb.toString(), "/rm_base");
+    ZooKeeper zk = zkutil.getSession();
+    String tablePath = PathUtil.getTxnDataPath("/rm_base", "test_table");
+    Stat tempTwo = zk.exists(tablePath, false);
+    assertTrue(tempTwo != null);
+
+    String cfPath = PathUtil.getTxnDataPath("/rm_base", "test_table") + "/cf1";
+    Stat tempThree = zk.exists(cfPath, false);
+    assertTrue(tempThree != null);
+
+    hcatDriver.run("drop table test_table");
+
+    System.out.println("Table path : " + tablePath);
+    Stat tempFour = zk.exists(tablePath, false);
+    assertTrue(tempFour == null);
+
+  }
 
 }

Modified: hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/TestWriteLock.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/TestWriteLock.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/TestWriteLock.java (original)
+++ hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/TestWriteLock.java Thu Sep 12 01:21:10 2013
@@ -35,127 +35,127 @@ import org.junit.Test;
  * Recipe with a change in the package name.
  */
 public class TestWriteLock extends ClientBase {
-    protected int sessionTimeout = 10 * 1000;
-    protected String dir = "/" + getClass().getName();
-    protected WriteLock[] nodes;
-    protected CountDownLatch latch = new CountDownLatch(1);
-    private boolean restartServer = true;
-    private boolean workAroundClosingLastZNodeFails = true;
-    private boolean killLeader = true;
-
-    @Test
-    public void testRun() throws Exception {
-        runTest(3);
-    }
-
-    class LockCallback implements LockListener {
-        public void lockAcquired() {
-            latch.countDown();
-        }
+  protected int sessionTimeout = 10 * 1000;
+  protected String dir = "/" + getClass().getName();
+  protected WriteLock[] nodes;
+  protected CountDownLatch latch = new CountDownLatch(1);
+  private boolean restartServer = true;
+  private boolean workAroundClosingLastZNodeFails = true;
+  private boolean killLeader = true;
+
+  @Test
+  public void testRun() throws Exception {
+    runTest(3);
+  }
+
+  class LockCallback implements LockListener {
+    public void lockAcquired() {
+      latch.countDown();
+    }
 
-        public void lockReleased() {
-
-        }
+    public void lockReleased() {
 
     }
 
-    protected void runTest(int count) throws Exception {
-        nodes = new WriteLock[count];
-        for (int i = 0; i < count; i++) {
-            ZooKeeper keeper = createClient();
-            WriteLock leader = new WriteLock(keeper, dir, null);
-            leader.setLockListener(new LockCallback());
-            nodes[i] = leader;
+  }
 
-            leader.lock();
-        }
+  protected void runTest(int count) throws Exception {
+    nodes = new WriteLock[count];
+    for (int i = 0; i < count; i++) {
+      ZooKeeper keeper = createClient();
+      WriteLock leader = new WriteLock(keeper, dir, null);
+      leader.setLockListener(new LockCallback());
+      nodes[i] = leader;
 
-        // lets wait for any previous leaders to die and one of our new
-        // nodes to become the new leader
-        latch.await(30, TimeUnit.SECONDS);
+      leader.lock();
+    }
 
-        WriteLock first = nodes[0];
-        dumpNodes(count);
+    // lets wait for any previous leaders to die and one of our new
+    // nodes to become the new leader
+    latch.await(30, TimeUnit.SECONDS);
+
+    WriteLock first = nodes[0];
+    dumpNodes(count);
+
+    // lets assert that the first election is the leader
+    Assert.assertTrue("The first znode should be the leader " + first.getId(), first.isOwner());
+
+    for (int i = 1; i < count; i++) {
+      WriteLock node = nodes[i];
+      Assert.assertFalse("Node should not be the leader " + node.getId(), node.isOwner());
+    }
 
+    if (count > 1) {
+      if (killLeader) {
+        System.out.println("Now killing the leader");
+        // now lets kill the leader
+        latch = new CountDownLatch(1);
+        first.unlock();
+        latch.await(30, TimeUnit.SECONDS);
+        //Thread.sleep(10000);
+        WriteLock second = nodes[1];
+        dumpNodes(count);
         // lets assert that the first election is the leader
-        Assert.assertTrue("The first znode should be the leader " + first.getId(), first.isOwner());
+        Assert.assertTrue("The second znode should be the leader " + second.getId(), second.isOwner());
 
-        for (int i = 1; i < count; i++) {
-            WriteLock node = nodes[i];
-            Assert.assertFalse("Node should not be the leader " + node.getId(), node.isOwner());
+        for (int i = 2; i < count; i++) {
+          WriteLock node = nodes[i];
+          Assert.assertFalse("Node should not be the leader " + node.getId(), node.isOwner());
         }
+      }
+
+
+      if (restartServer) {
+        // now lets stop the server
+        System.out.println("Now stopping the server");
+        stopServer();
+        Thread.sleep(10000);
+
+        // TODO lets assert that we are no longer the leader
+        dumpNodes(count);
 
-        if (count > 1) {
-            if (killLeader) {
-                System.out.println("Now killing the leader");
-                // now lets kill the leader
-                latch = new CountDownLatch(1);
-                first.unlock();
-                latch.await(30, TimeUnit.SECONDS);
-                //Thread.sleep(10000);
-                WriteLock second = nodes[1];
-                dumpNodes(count);
-                // lets assert that the first election is the leader
-                Assert.assertTrue("The second znode should be the leader " + second.getId(), second.isOwner());
-
-                for (int i = 2; i < count; i++) {
-                    WriteLock node = nodes[i];
-                    Assert.assertFalse("Node should not be the leader " + node.getId(), node.isOwner());
-                }
-            }
-
-
-            if (restartServer) {
-                // now lets stop the server
-                System.out.println("Now stopping the server");
-                stopServer();
-                Thread.sleep(10000);
-
-                // TODO lets assert that we are no longer the leader
-                dumpNodes(count);
-
-                System.out.println("Starting the server");
-                startServer();
-                Thread.sleep(10000);
-
-                for (int i = 0; i < count - 1; i++) {
-                    System.out.println("Calling acquire for node: " + i);
-                    nodes[i].lock();
-                }
-                dumpNodes(count);
-                System.out.println("Now closing down...");
-            }
+        System.out.println("Starting the server");
+        startServer();
+        Thread.sleep(10000);
+
+        for (int i = 0; i < count - 1; i++) {
+          System.out.println("Calling acquire for node: " + i);
+          nodes[i].lock();
         }
+        dumpNodes(count);
+        System.out.println("Now closing down...");
+      }
     }
+  }
 
-    protected void dumpNodes(int count) {
-        for (int i = 0; i < count; i++) {
-            WriteLock node = nodes[i];
-            System.out.println("node: " + i + " id: " +
-                node.getId() + " is leader: " + node.isOwner());
-        }
+  protected void dumpNodes(int count) {
+    for (int i = 0; i < count; i++) {
+      WriteLock node = nodes[i];
+      System.out.println("node: " + i + " id: " +
+        node.getId() + " is leader: " + node.isOwner());
     }
+  }
 
-    @After
-    public void tearDown() throws Exception {
-        if (nodes != null) {
-            for (int i = 0; i < nodes.length; i++) {
-                WriteLock node = nodes[i];
-                if (node != null) {
-                    System.out.println("Closing node: " + i);
-                    node.close();
-                    if (workAroundClosingLastZNodeFails && i == nodes.length - 1) {
-                        System.out.println("Not closing zookeeper: " + i + " due to bug!");
-                    } else {
-                        System.out.println("Closing zookeeper: " + i);
-                        node.getZookeeper().close();
-                        System.out.println("Closed zookeeper: " + i);
-                    }
-                }
-            }
+  @After
+  public void tearDown() throws Exception {
+    if (nodes != null) {
+      for (int i = 0; i < nodes.length; i++) {
+        WriteLock node = nodes[i];
+        if (node != null) {
+          System.out.println("Closing node: " + i);
+          node.close();
+          if (workAroundClosingLastZNodeFails && i == nodes.length - 1) {
+            System.out.println("Not closing zookeeper: " + i + " due to bug!");
+          } else {
+            System.out.println("Closing zookeeper: " + i);
+            node.getZookeeper().close();
+            System.out.println("Closed zookeeper: " + i);
+          }
         }
-        System.out.println("Now lets stop the server");
-        super.tearDown();
-
+      }
     }
+    System.out.println("Now lets stop the server");
+    super.tearDown();
+
+  }
 }

Modified: hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/TestZNodeName.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/TestZNodeName.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/TestZNodeName.java (original)
+++ hive/branches/vectorization/hcatalog/storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/snapshot/lock/TestZNodeName.java Thu Sep 12 01:21:10 2013
@@ -31,32 +31,32 @@ import org.junit.Test;
  * The package name has been changed.
  */
 public class TestZNodeName extends TestCase {
-    @Test
-    public void testOrderWithSamePrefix() throws Exception {
-        String[] names = { "x-3", "x-5", "x-11", "x-1" };
-        String[] expected = { "x-1", "x-3", "x-5", "x-11" };
-        assertOrderedNodeNames(names, expected);
-    }
-    @Test
-    public void testOrderWithDifferentPrefixes() throws Exception {
-        String[] names = { "r-3", "r-2", "r-1", "w-2", "w-1" };
-        String[] expected = { "r-1", "r-2", "r-3", "w-1", "w-2" };
-        assertOrderedNodeNames(names, expected);
-    }
+  @Test
+  public void testOrderWithSamePrefix() throws Exception {
+    String[] names = { "x-3", "x-5", "x-11", "x-1" };
+    String[] expected = { "x-1", "x-3", "x-5", "x-11" };
+    assertOrderedNodeNames(names, expected);
+  }
+  @Test
+  public void testOrderWithDifferentPrefixes() throws Exception {
+    String[] names = { "r-3", "r-2", "r-1", "w-2", "w-1" };
+    String[] expected = { "r-1", "r-2", "r-3", "w-1", "w-2" };
+    assertOrderedNodeNames(names, expected);
+  }
 
-    protected void assertOrderedNodeNames(String[] names, String[] expected) {
-        int size = names.length;
-        assertEquals("The two arrays should be the same size!", names.length, expected.length);
-        SortedSet<ZNodeName> nodeNames = new TreeSet<ZNodeName>();
-        for (String name : names) {
-            nodeNames.add(new ZNodeName(name));
-        }
+  protected void assertOrderedNodeNames(String[] names, String[] expected) {
+    int size = names.length;
+    assertEquals("The two arrays should be the same size!", names.length, expected.length);
+    SortedSet<ZNodeName> nodeNames = new TreeSet<ZNodeName>();
+    for (String name : names) {
+      nodeNames.add(new ZNodeName(name));
+    }
 
-        int index = 0;
-        for (ZNodeName nodeName : nodeNames) {
-            String name = nodeName.getName();
-            assertEquals("Node " + index, expected[index++], name);
-        }
+    int index = 0;
+    for (ZNodeName nodeName : nodeNames) {
+      String name = nodeName.getName();
+      assertEquals("Node " + index, expected[index++], name);
     }
+  }
 
 }

Modified: hive/branches/vectorization/hcatalog/webhcat/java-client/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/webhcat/java-client/pom.xml?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/webhcat/java-client/pom.xml (original)
+++ hive/branches/vectorization/hcatalog/webhcat/java-client/pom.xml Thu Sep 12 01:21:10 2013
@@ -22,14 +22,13 @@
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
 
     <parent>
-        <groupId>org.apache.hcatalog</groupId>
+        <groupId>org.apache.hive.hcatalog</groupId>
         <artifactId>hcatalog</artifactId>
-        <version>0.12.0-SNAPSHOT</version>
+        <version>0.13.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
     <modelVersion>4.0.0</modelVersion>
-    <groupId>org.apache.hcatalog</groupId>
     <artifactId>webhcat-java-client</artifactId>
     <packaging>jar</packaging>
     <name>webhcat-java-client</name>
@@ -37,7 +36,7 @@
 
     <dependencies>
         <dependency>
-            <groupId>org.apache.hcatalog</groupId>
+            <groupId>org.apache.hive.hcatalog</groupId>
             <artifactId>hcatalog-core</artifactId>
             <version>${hcatalog.version}</version>
             <scope>compile</scope>

Modified: hive/branches/vectorization/hcatalog/webhcat/svr/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/webhcat/svr/pom.xml?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/webhcat/svr/pom.xml (original)
+++ hive/branches/vectorization/hcatalog/webhcat/svr/pom.xml Thu Sep 12 01:21:10 2013
@@ -22,27 +22,37 @@
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
 
     <parent>
-        <groupId>org.apache.hcatalog</groupId>
+        <groupId>org.apache.hive.hcatalog</groupId>
         <artifactId>hcatalog</artifactId>
-        <version>0.12.0-SNAPSHOT</version>
+        <version>0.13.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
     <modelVersion>4.0.0</modelVersion>
-    <groupId>org.apache.hcatalog</groupId>
     <artifactId>webhcat</artifactId>
     <packaging>jar</packaging>
     <name>webhcat</name>
     <url>http://maven.apache.org</url>
 
     <dependencies>
-
+        <!--
+        <dependency>
+            <groupId>xerces</groupId>
+            <artifactId>xercesImpl</artifactId>
+            <version>2.9.1</version>
+        </dependency>
+        <dependency>
+            <groupId>xalan</groupId>
+            <artifactId>xalan</artifactId>
+            <version>2.7.1</version>
+        </dependency>
+        -->
         <!-- provided scope - made available as separate package
           not packaged or added as dependency
         -->
 
         <dependency>
-            <groupId>org.apache.hcatalog</groupId>
+            <groupId>org.apache.hive.hcatalog</groupId>
             <artifactId>hcatalog-core</artifactId>
             <version>${hcatalog.version}</version>
             <scope>provided</scope>
@@ -74,12 +84,6 @@
             <scope>compile</scope>
         </dependency>
         <dependency>
-            <groupId>com.sun.jersey</groupId>
-            <artifactId>jersey-json</artifactId>
-            <version>${jersey.version}</version>
-            <scope>compile</scope>
-        </dependency>
-        <dependency>
             <groupId>org.codehaus.jackson</groupId>
             <artifactId>jackson-core-asl</artifactId>
             <version>${jackson.version}</version>

Modified: hive/branches/vectorization/hcatalog/webhcat/svr/src/main/bin/webhcat_server.sh
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/webhcat/svr/src/main/bin/webhcat_server.sh?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/webhcat/svr/src/main/bin/webhcat_server.sh (original)
+++ hive/branches/vectorization/hcatalog/webhcat/svr/src/main/bin/webhcat_server.sh Thu Sep 12 01:21:10 2013
@@ -223,7 +223,7 @@ fi
 export HADOOP_USER_CLASSPATH_FIRST=true
 export HADOOP_OPTS="${HADOOP_OPTS} -Dwebhcat.log.dir=$WEBHCAT_LOG_DIR -Dlog4j.configuration=$WEBHCAT_LOG4J"
 
-start_cmd="$HADOOP_PREFIX/bin/hadoop jar $JAR org.apache.hcatalog.templeton.Main  "
+start_cmd="$HADOOP_PREFIX/bin/hadoop jar $JAR org.apache.hive.hcatalog.templeton.Main  "
 
 
 cmd=$1

Modified: hive/branches/vectorization/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml (original)
+++ hive/branches/vectorization/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml Thu Sep 12 01:21:10 2013
@@ -191,7 +191,7 @@
 
   <property>
     <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.HDFSStorage</value>
+    <value>org.apache.hive.hcatalog.templeton.tool.HDFSStorage</value>
     <description>The class to use as storage</description>
   </property>
 
@@ -234,5 +234,49 @@
     in the cluster are taken over by Templeton launcher tasks.
     </description>
   </property>
+  <!--
+  <property>
+    <name>webhcat.proxyuser.#USER#.hosts</name>
+    <value>www.example.com,host2</value>
+    <description>
+      List of client hosts from which the '#USER#' user is allowed to perform 
+      'doAs' operations.
+
+      The '#USER#' must be replaced with the username of the user who is
+      allowed to perform 'doAs' operations.
+
+      The value can be the '*' wildcard, which means every host is allowed,
+      or a comma-separated list of hostnames.
+   
+      If value is a blank string or webhcat.proxyuser.#USER#.hosts is missing,
+      no hosts will be allowed.  
 
+      For multiple users copy this property and replace the user name
+      in the property name.
+    </description>
+  </property>
+  <property>
+    <name>webhcat.proxyuser.#USER#.groups</name>
+    <value>group1, group2</value>
+    <description>
+      List of groups the '#USER#' user is allowed to impersonate users
+      from to perform 'doAs' operations.
+
+      The '#USER#' must be replaced with the username of the user who is
+      allowed to perform 'doAs' operations.
+
+      The value can be the '*' wildcard, which means any doAs value is
+      allowed, or a comma-separated list of groups.
+
+      If value is an empty list or webhcat.proxyuser.#USER#.groups is missing,
+      every doAs call value will fail.
+
+      For multiple users copy this property and replace the user name
+      in the property name.
+      
+      The username->usergroup mapping is performed using Hadoop API which is 
+      controlled by hadoop.security.group.mapping property.
+    </description>
+  </property>
+-->
 </configuration>

Modified: hive/branches/vectorization/ivy/ivysettings.xml
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ivy/ivysettings.xml?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/ivy/ivysettings.xml (original)
+++ hive/branches/vectorization/ivy/ivysettings.xml Thu Sep 12 01:21:10 2013
@@ -47,6 +47,10 @@
              checkmodified="${ivy.checkmodified}" 
              changingPattern="${ivy.changingPattern}"/>
 
+    <ibiblio name="sonatype-snapshot" root="https://oss.sonatype.org/content/repositories/snapshots/" m2compatible="true"
+             checkmodified="${ivy.checkmodified}" 
+             changingPattern="${ivy.changingPattern}"/>
+
     <url name="datanucleus-repo" m2compatible="true">
       <artifact pattern="${datanucleus.repo}/[organisation]/[module]/[revision]/[module]-[revision](-[classifier]).[ext]"/>
     </url>
@@ -68,6 +72,7 @@
       <resolver ref="maven2"/>
       <resolver ref="datanucleus-repo"/>
       <resolver ref="sourceforge"/>
+      <resolver ref="sonatype-snapshot"/>
     </chain>
 
     <chain name="internal" dual="true">
@@ -77,11 +82,13 @@
       <resolver ref="maven2"/>
       <resolver ref="datanucleus-repo"/>
       <resolver ref="sourceforge"/>
+      <resolver ref="sonatype-snapshot"/>
     </chain>
 
     <chain name="external">
       <resolver ref="maven2"/>
       <resolver ref="datanucleus-repo"/>
+      <resolver ref="sonatype-snapshot"/>
     </chain>
 
   </resolvers>

Modified: hive/branches/vectorization/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ivy/libraries.properties?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/ivy/libraries.properties (original)
+++ hive/branches/vectorization/ivy/libraries.properties Thu Sep 12 01:21:10 2013
@@ -51,12 +51,13 @@ jetty.version=6.1.26
 jline.version=0.9.94
 json.version=20090211
 junit.version=4.10
+kryo.version=2.22-SNAPSHOT
 libfb303.version=0.9.0
 libthrift.version=0.9.0
 log4j.version=1.2.16
 maven-ant-tasks.version=2.1.0
 mockito-all.version=1.8.2
-protobuf.version=2.4.1
+protobuf.version=2.5.0
 rat.version=0.8
 slf4j-api.version=1.6.1
 slf4j-log4j12.version=1.6.1

Modified: hive/branches/vectorization/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java (original)
+++ hive/branches/vectorization/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java Thu Sep 12 01:21:10 2013
@@ -34,12 +34,12 @@ import java.sql.Savepoint;
 import java.sql.Statement;
 import java.sql.Struct;
 import java.util.HashMap;
-import java.util.concurrent.Executor;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
+import java.util.concurrent.Executor;
 
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
@@ -101,9 +101,11 @@ public class HiveConnection implements j
       openTransport(uri, connParams.getHost(), connParams.getPort(), connParams.getSessionVars());
     }
 
-    // currently only V1 is supported
+    // add supported protocols: V1 and V2 supported
     supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1);
 
+    supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V2);
+
     // open client session
     openSession(uri);
 
@@ -124,6 +126,12 @@ public class HiveConnection implements j
         stmt.execute("set " + hiveConf.getKey() + "=" + hiveConf.getValue());
         stmt.close();
       }
+
+      // For remote JDBC client, try to set the hive var using 'set hivevar:key=value'
+      for (Entry<String, String> hiveVar : connParams.getHiveVars().entrySet()) {
+        stmt.execute("set hivevar:" + hiveVar.getKey() + "=" + hiveVar.getValue());
+        stmt.close();
+      }
     }
   }
 

Modified: hive/branches/vectorization/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java (original)
+++ hive/branches/vectorization/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java Thu Sep 12 01:21:10 2013
@@ -32,6 +32,8 @@ import org.apache.hive.service.cli.thrif
 import org.apache.hive.service.cli.thrift.TCloseOperationResp;
 import org.apache.hive.service.cli.thrift.TExecuteStatementReq;
 import org.apache.hive.service.cli.thrift.TExecuteStatementResp;
+import org.apache.hive.service.cli.thrift.TGetOperationStatusReq;
+import org.apache.hive.service.cli.thrift.TGetOperationStatusResp;
 import org.apache.hive.service.cli.thrift.TOperationHandle;
 import org.apache.hive.service.cli.thrift.TSessionHandle;
 
@@ -193,6 +195,44 @@ public class HiveStatement implements ja
     }
 
     if (!stmtHandle.isHasResultSet()) {
+      // Poll until the query has completed one way or another. DML queries will not return a result
+      // set, but we should not return from this method until the query has completed to avoid
+      // racing with possible subsequent session shutdown, or queries that depend on the results
+      // materialised here.
+      TGetOperationStatusReq statusReq = new TGetOperationStatusReq(stmtHandle);
+      boolean requestComplete = false;
+      while (!requestComplete) {
+        try {
+          TGetOperationStatusResp statusResp = client.GetOperationStatus(statusReq);
+          Utils.verifySuccessWithInfo(statusResp.getStatus());
+          if (statusResp.isSetOperationState()) {
+            switch (statusResp.getOperationState()) {
+            case CLOSED_STATE:
+            case FINISHED_STATE:
+              return false;
+            case CANCELED_STATE:
+              // 01000 -> warning
+              throw new SQLException("Query was cancelled", "01000");
+            case ERROR_STATE:
+              // HY000 -> general error
+              throw new SQLException("Query failed", "HY000");
+            case UKNOWN_STATE:
+              throw new SQLException("Unknown query", "HY000");
+            case INITIALIZED_STATE:
+            case RUNNING_STATE:
+              break;
+            }
+          }
+        } catch (Exception ex) {
+          throw new SQLException(ex.toString(), "08S01", ex);
+        }
+
+        try {
+          Thread.sleep(100);
+        } catch (InterruptedException ex) {
+          // Ignore
+        }
+      }
       return false;
     }
     resultSet =  new HiveQueryResultSet.Builder().setClient(client).setSessionHandle(sessHandle)

Modified: hive/branches/vectorization/jdbc/src/test/org/apache/hive/jdbc/TestJdbcDriver2.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/jdbc/src/test/org/apache/hive/jdbc/TestJdbcDriver2.java?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/jdbc/src/test/org/apache/hive/jdbc/TestJdbcDriver2.java (original)
+++ hive/branches/vectorization/jdbc/src/test/org/apache/hive/jdbc/TestJdbcDriver2.java Thu Sep 12 01:21:10 2013
@@ -31,8 +31,9 @@ import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Types;
+import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
@@ -460,6 +461,85 @@ public class TestJdbcDriver2 extends Tes
     }
   }
 
+  // executeQuery should always throw a SQLException,
+  // when it executes a non-ResultSet query (like create)
+  public void testExecuteQueryException() throws Exception {
+    Statement stmt = con.createStatement();
+    try {
+      stmt.executeQuery("create table test_t2 (under_col int, value string)");
+      fail("Expecting SQLException");
+    }
+    catch (SQLException e) {
+      System.out.println("Caught an expected SQLException: " + e.getMessage());
+    }
+    finally {
+      stmt.close();
+    }
+  }
+
+  private void checkResultSetExpected(Statement stmt, List<String> setupQueries, String testQuery,
+      boolean isExpectedResultSet) throws Exception {
+    boolean hasResultSet;
+    // execute the setup queries
+    for(String setupQuery: setupQueries) {
+      try {
+        stmt.execute(setupQuery);
+      } catch (Exception e) {
+        failWithExceptionMsg(e);
+      }
+    }
+    // execute the test query
+    try {
+      hasResultSet = stmt.execute(testQuery);
+      assertEquals(hasResultSet, isExpectedResultSet);
+    }
+    catch(Exception e) {
+      failWithExceptionMsg(e);
+    }
+  }
+
+  private void failWithExceptionMsg(Exception e) {
+    e.printStackTrace();
+    fail(e.toString());
+  }
+
+  public void testNullResultSet() throws Exception {
+    List<String> setupQueries = new ArrayList<String>();
+    String testQuery;
+    boolean hasResultSet;
+    Statement stmt = con.createStatement();
+
+    // -select- should return a ResultSet
+    try {
+      stmt.executeQuery("select * from " + tableName);
+      System.out.println("select: success");
+    }
+    catch(SQLException e) {
+      failWithExceptionMsg(e);
+    }
+
+    // -create- should not return a ResultSet
+    setupQueries.add("drop table test_t1");
+    testQuery = "create table test_t1 (under_col int, value string)";
+    checkResultSetExpected(stmt, setupQueries, testQuery, false);
+    setupQueries.clear();
+
+    // -create table as select- should not return a ResultSet
+    setupQueries.add("drop table test_t1");
+    testQuery = "create table test_t1 as select * from " + tableName;
+    checkResultSetExpected(stmt, setupQueries, testQuery, false);
+    setupQueries.clear();
+
+    // -insert table as select- should not return a ResultSet
+    setupQueries.add("drop table test_t1");
+    setupQueries.add("create table test_t1 (under_col int, value string)");
+    testQuery = "insert into table test_t1 select under_col, value from "  + tableName;
+    checkResultSetExpected(stmt, setupQueries, testQuery, false);
+    setupQueries.clear();
+
+    stmt.close();
+  }
+
   public void testDataTypes() throws Exception {
     Statement stmt = con.createStatement();
 

Modified: hive/branches/vectorization/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/metastore/if/hive_metastore.thrift?rev=1522098&r1=1522097&r2=1522098&view=diff
==============================================================================
--- hive/branches/vectorization/metastore/if/hive_metastore.thrift (original)
+++ hive/branches/vectorization/metastore/if/hive_metastore.thrift Thu Sep 12 01:21:10 2013
@@ -131,16 +131,11 @@ struct Order {
   2: i32    order // asc(1) or desc(0)
 }
 
-// Workaround for HIVE-4322
-struct SkewedValueList {
-  1: list<string> skewedValueList
-}
-
 // this object holds all the information about skewed table
 struct SkewedInfo {
   1: list<string> skewedColNames, // skewed column names
   2: list<list<string>> skewedColValues, //skewed values
-  3: map<SkewedValueList, string> skewedColValueLocationMaps, //skewed value to location mappings
+  3: map<list<string>, string> skewedColValueLocationMaps, //skewed value to location mappings
 }
 
 // this object holds all the information about physical storage of the data belonging to a table