You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2016/09/16 00:30:44 UTC

[5/5] phoenix git commit: PHOENIX-3249 Make changes in LocalIndexIT for method level parallelization in BaseHBaseManagedTimeTableReuseIT

PHOENIX-3249 Make changes in LocalIndexIT for method level parallelization in BaseHBaseManagedTimeTableReuseIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/dc713aa2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/dc713aa2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/dc713aa2

Branch: refs/heads/4.x-HBase-0.98
Commit: dc713aa2a5a8281fdeac51d150a369ef492c0fa3
Parents: 4bac204
Author: James Taylor <ja...@apache.org>
Authored: Thu Sep 15 17:27:54 2016 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Thu Sep 15 17:31:02 2016 -0700

----------------------------------------------------------------------
 .../end2end/index/IndexOnOwnClusterIT.java      | 37 +++++++++++++-------
 1 file changed, 25 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/dc713aa2/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexOnOwnClusterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexOnOwnClusterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexOnOwnClusterIT.java
index 4f0da4d..0c3b168 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexOnOwnClusterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexOnOwnClusterIT.java
@@ -33,8 +33,9 @@ import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
@@ -174,15 +175,14 @@ public class IndexOnOwnClusterIT extends BaseOwnClusterHBaseManagedTimeIT {
             
             HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
             for (int i = 1; i < 5; i++) {
-                admin.split(physicalTableName, ByteUtil.concat(Bytes.toBytes(strings[3*i])));
+                CatalogTracker ct = new CatalogTracker(admin.getConfiguration());
+                admin.split(physicalTableName.getName(), ByteUtil.concat(Bytes.toBytes(strings[3*i])));
                 List<HRegionInfo> regionsOfUserTable =
-                        MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), admin.getConnection(),
-                                physicalTableName, false);
+                        MetaReader.getTableRegions(ct, physicalTableName, false);
 
                 while (regionsOfUserTable.size() != (4+i)) {
                     Thread.sleep(100);
-                    regionsOfUserTable = MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
-                            admin.getConnection(), physicalTableName, false);
+                    regionsOfUserTable = MetaReader.getTableRegions(ct, physicalTableName, false);
                 }
                 assertEquals(4+i, regionsOfUserTable.size());
                 String[] tIdColumnValues = new String[26]; 
@@ -244,7 +244,7 @@ public class IndexOnOwnClusterIT extends BaseOwnClusterHBaseManagedTimeIT {
             conn1.close();
         }
     }
-
+    
     // Moved from LocalIndexIT because it was causing parallel runs to hang
     @Test
     public void testLocalIndexScanAfterRegionsMerge() throws Exception {
@@ -271,20 +271,33 @@ public class IndexOnOwnClusterIT extends BaseOwnClusterHBaseManagedTimeIT {
             assertTrue(rs.next());
 
             HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+            CatalogTracker ct = new CatalogTracker(admin.getConfiguration());
             List<HRegionInfo> regionsOfUserTable =
-                    MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), admin.getConnection(),
-                        physicalTableName, false);
+                    MetaReader.getTableRegions(ct,
+                            physicalTableName, false);
             admin.mergeRegions(regionsOfUserTable.get(0).getEncodedNameAsBytes(),
                 regionsOfUserTable.get(1).getEncodedNameAsBytes(), false);
             regionsOfUserTable =
-                    MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), admin.getConnection(),
+                    MetaReader.getTableRegions(ct,
                             physicalTableName, false);
 
             while (regionsOfUserTable.size() != 3) {
                 Thread.sleep(100);
-                regionsOfUserTable = MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
-                        admin.getConnection(), physicalTableName, false);
+                regionsOfUserTable =
+                        MetaReader.getTableRegions(ct,
+                                physicalTableName, false);
+            }
+            assertEquals(3, regionsOfUserTable.size());
+            TableName indexTable =
+                    TableName.valueOf(indexPhysicalTableName);
+            List<HRegionInfo> regionsOfIndexTable =
+                    MetaReader.getTableRegions(ct, indexTable, false);
+
+            while (regionsOfIndexTable.size() != 3) {
+                Thread.sleep(100);
+                regionsOfIndexTable = MetaReader.getTableRegions(ct, indexTable, false);
             }
+            assertEquals(3, regionsOfIndexTable.size());
             String query = "SELECT t_id,k1,v1 FROM " + tableName;
             rs = conn1.createStatement().executeQuery(query);
             Thread.sleep(1000);