You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by td...@apache.org on 2018/01/19 18:03:12 UTC

[04/50] [abbrv] phoenix git commit: Store parent -> child view links in SYSTEM.CHILD_LINK table

Store parent -> child view links in SYSTEM.CHILD_LINK table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/758efd9b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/758efd9b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/758efd9b

Branch: refs/heads/system-catalog
Commit: 758efd9b14e61bcb99ebbb0724a948b5f4e8ed11
Parents: 57d8bc4
Author: Thomas D'Silva <td...@apache.org>
Authored: Sat Nov 11 22:32:35 2017 -0800
Committer: Thomas D'Silva <td...@apache.org>
Committed: Sat Nov 11 22:32:35 2017 -0800

----------------------------------------------------------------------
 .../phoenix/end2end/AlterTableWithViewsIT.java  |   2 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java  |   2 +-
 .../MigrateSystemTablesToSystemNamespaceIT.java |   4 +-
 .../end2end/QueryDatabaseMetaDataIT.java        |   5 +
 .../end2end/TenantSpecificTablesDDLIT.java      |   2 +
 .../java/org/apache/phoenix/end2end/ViewIT.java |   1 -
 .../SystemCatalogWALEntryFilterIT.java          |  18 +-
 .../apache/phoenix/coprocessor/CatalogInfo.java |  90 ----------
 .../coprocessor/MetaDataEndpointImpl.java       | 174 +++++++++++--------
 .../phoenix/coprocessor/MetaDataProtocol.java   |   3 +-
 .../phoenix/coprocessor/OrphanCleaner.java      |   3 +-
 .../apache/phoenix/coprocessor/ViewFinder.java  |   6 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   5 +
 .../query/ConnectionQueryServicesImpl.java      |   8 +-
 .../query/ConnectionlessQueryServicesImpl.java  |   5 +
 .../apache/phoenix/query/QueryConstants.java    |  17 ++
 .../SystemCatalogWALEntryFilter.java            |   7 +-
 .../java/org/apache/phoenix/schema/PTable.java  |   5 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |  29 +++-
 .../org/apache/phoenix/util/SchemaUtil.java     |   6 +
 .../org/apache/phoenix/util/UpgradeUtil.java    |  29 ++++
 .../coprocessor/MetaDataEndpointImplTest.java   |   6 +-
 22 files changed, 232 insertions(+), 195 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
index 51e664b..94dc02d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
@@ -792,7 +792,7 @@ public class AlterTableWithViewsIT extends ParallelStatsDisabledIT {
 
 			// PK2 should be in both views
 			sql = "SELECT PK2 FROM " + view1;
-			viewConn2.createStatement().execute(sql);
+			viewConn.createStatement().execute(sql);
 			sql = "SELECT PK2 FROM " + view2;
 			viewConn2.createStatement().execute(sql);
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index de5d993..478b234 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -74,7 +74,7 @@ public abstract class BaseViewIT extends ParallelStatsEnabledIT {
     
     @Parameters(name="transactional = {0}")
     public static Collection<Boolean> data() {
-        return Arrays.asList(new Boolean[] { /*false, */true });
+        return Arrays.asList(new Boolean[] { false, true });
     }
     
     protected void testUpdatableViewWithIndex(Integer saltBuckets, boolean localIndex) throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java
index 91e34be..ebd4c35 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java
@@ -58,10 +58,10 @@ public class MigrateSystemTablesToSystemNamespaceIT extends BaseTest {
 
     private static final Set<String> PHOENIX_SYSTEM_TABLES = new HashSet<>(Arrays.asList(
             "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
-            "SYSTEM.MUTEX"));
+            "SYSTEM.MUTEX", "SYSTEM.CHILD_LINK"));
     private static final Set<String> PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new HashSet<>(
             Arrays.asList("SYSTEM:CATALOG", "SYSTEM:SEQUENCE", "SYSTEM:STATS", "SYSTEM:FUNCTION",
-                    "SYSTEM:MUTEX"));
+                    "SYSTEM:MUTEX", "SYSTEM:CHILD_LINK"));
     private static final String SCHEMA_NAME = "MIGRATETEST";
     private static final String TABLE_NAME =
             SCHEMA_NAME + "." + MigrateSystemTablesToSystemNamespaceIT.class.getSimpleName().toUpperCase();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index eff053c..eb5bb4b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_TABLE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
 import static org.apache.phoenix.util.TestUtil.ATABLE_NAME;
@@ -134,6 +135,10 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
             assertEquals(PTableType.SYSTEM.toString(), rs.getString("TABLE_TYPE"));
             assertTrue(rs.next());
             assertEquals(SYSTEM_CATALOG_SCHEMA, rs.getString("TABLE_SCHEM"));
+            assertEquals(SYSTEM_CHILD_LINK_TABLE, rs.getString("TABLE_NAME"));
+            assertEquals(PTableType.SYSTEM.toString(), rs.getString("TABLE_TYPE"));
+            assertTrue(rs.next());
+            assertEquals(SYSTEM_CATALOG_SCHEMA, rs.getString("TABLE_SCHEM"));
             assertEquals(SYSTEM_FUNCTION_TABLE, rs.getString("TABLE_NAME"));
             assertEquals(PTableType.SYSTEM.toString(), rs.getString("TABLE_TYPE"));
             assertTrue(rs.next());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index 1e6f59c..6830d54 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -491,6 +491,8 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
             assertTrue(rs.next());
             assertTableMetaData(rs, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, PTableType.SYSTEM);
             assertTrue(rs.next());
+            assertTableMetaData(rs, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_TABLE, PTableType.SYSTEM);
+            assertTrue(rs.next());
             assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, SYSTEM_FUNCTION_TABLE, SYSTEM);
             assertTrue(rs.next());
             assertTableMetaData(rs, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.TYPE_SEQUENCE, PTableType.SYSTEM);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index f32ec9b..e74ddba 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -51,7 +51,6 @@ import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class ViewIT extends BaseViewIT {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
index 776e300..0be5665 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
@@ -69,9 +69,12 @@ public class SystemCatalogWALEntryFilterIT extends ParallelStatsDisabledIT {
   private static final String DROP_TENANT_VIEW_SQL = "DROP VIEW IF EXISTS " + TENANT_VIEW_NAME;
   private static final String DROP_NONTENANT_VIEW_SQL = "DROP VIEW IF EXISTS " + NONTENANT_VIEW_NAME;
   private static PTable catalogTable;
+  private static PTable childLinkTable;
   private static WALKey walKey = null;
   private static TableName systemCatalogTableName =
       TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
+  private static TableName systemChildLinkTableName =
+	      TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME);
 
 
   @BeforeClass
@@ -85,6 +88,7 @@ public class SystemCatalogWALEntryFilterIT extends ParallelStatsDisabledIT {
       ensureTableCreated(getUrl(), TestUtil.ENTITY_HISTORY_TABLE_NAME);
       connection.createStatement().execute(CREATE_TENANT_VIEW_SQL);
       catalogTable = PhoenixRuntime.getTable(connection, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
+      childLinkTable = PhoenixRuntime.getTable(connection, PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME);
       walKey = new WALKey(REGION, TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME), 0, 0, uuid);
     };
     Assert.assertNotNull(catalogTable);
@@ -127,15 +131,15 @@ public class SystemCatalogWALEntryFilterIT extends ParallelStatsDisabledIT {
     Get nonTenantViewGet = getTenantViewGet(catalogTable,
         DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
 
-    Get tenantLinkGet = getParentChildLinkGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
-    Get nonTenantLinkGet = getParentChildLinkGet(catalogTable,
+    Get tenantLinkGet = getParentChildLinkGet(childLinkTable, TENANT_BYTES, TENANT_VIEW_NAME);
+    Get nonTenantLinkGet = getParentChildLinkGet(childLinkTable,
         DEFAULT_TENANT_BYTES, NONTENANT_VIEW_NAME);
 
     WAL.Entry nonTenantViewEntry = getEntry(systemCatalogTableName, nonTenantViewGet);
     WAL.Entry tenantViewEntry = getEntry(systemCatalogTableName, tenantViewGet);
 
-    WAL.Entry nonTenantLinkEntry = getEntry(systemCatalogTableName, nonTenantLinkGet);
-    WAL.Entry tenantLinkEntry = getEntry(systemCatalogTableName, tenantLinkGet);
+    WAL.Entry nonTenantLinkEntry = getEntry(systemChildLinkTableName, nonTenantLinkGet);
+    WAL.Entry tenantLinkEntry = getEntry(systemChildLinkTableName, tenantLinkGet);
 
     //verify that the tenant view WAL.Entry passes the filter and the non-tenant view does not
     SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
@@ -191,8 +195,8 @@ public class SystemCatalogWALEntryFilterIT extends ParallelStatsDisabledIT {
     return new Get(key.copyBytes());
   }
 
-  public Get getParentChildLinkGet(PTable catalogTable, byte[] tenantBytes, String viewName) {
-    /* For parent-child link, the system.catalog key becomes
+  public Get getParentChildLinkGet(PTable linkTable, byte[] tenantBytes, String viewName) {
+    /* For parent-child link, the system.child_link key becomes
       1. Parent tenant id
       2. Parent Schema
       3. Parent Table name
@@ -206,7 +210,7 @@ public class SystemCatalogWALEntryFilterIT extends ParallelStatsDisabledIT {
     tenantKeyParts[3] = tenantBytes;
     tenantKeyParts[4] = Bytes.toBytes(SchemaUtil.getTableName(SCHEMA_NAME.toUpperCase(), viewName.toUpperCase()));
     ImmutableBytesWritable key = new ImmutableBytesWritable();
-    catalogTable.newKey(key, tenantKeyParts);
+    linkTable.newKey(key, tenantKeyParts);
     //the backing byte array of key might have extra space at the end.
     // need to just slice "the good parts" which we do by calling copyBytes
     return new Get(key.copyBytes());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/CatalogInfo.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/CatalogInfo.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/CatalogInfo.java
deleted file mode 100644
index 52d7fad..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/CatalogInfo.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.coprocessor;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.util.MetaDataUtil;
-
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-public class CatalogInfo {
-
-    private List<Mutation> allMutations;
-
-    public CatalogInfo(List<Mutation> mutations) {
-        this.allMutations = mutations;
-    }
-
-    public LinkedList<MutationType> getGroupedMutations() {
-        LinkedList<MutationType> result = Lists.newLinkedList();
-        Iterator<Map.Entry<String, MutationType>> iterator = groupByTable().entrySet().iterator();
-        while (iterator.hasNext()) {
-            result.addLast(iterator.next().getValue());
-        }
-        return result;
-    }
-
-    private LinkedHashMap<String, MutationType> groupByTable() {
-        LinkedHashMap<String, MutationType> map = Maps.newLinkedHashMap();
-        for (Mutation dataMutation : allMutations) {
-            String groupBy = Bytes.toString(MetaDataUtil.getTenantIdAndSchemaAndTableName(dataMutation));
-            if (!map.containsKey(groupBy)) {
-                map.put(groupBy, new MutationType());
-            }
-            map.get(groupBy).addMutation(dataMutation);
-        }
-        return map;
-    }
-
-    static class MutationType {
-        private final List<Mutation> dataMutations;
-        private final List<Mutation> linkMutations;
-
-        public MutationType() {
-            this.dataMutations = Lists.newArrayList();
-            this.linkMutations = Lists.newArrayList();
-        }
-
-        public void addMutation(Mutation mutation) {
-            if (MetaDataUtil.isLinkingRow(mutation)) {
-                this.linkMutations.add(mutation);
-            } else {
-                this.dataMutations.add(mutation);
-            }
-        }
-
-        public List<Mutation> getDataMutations() {
-            return dataMutations;
-        }
-
-        public List<Mutation> getLinkMutations() {
-            return linkMutations;
-        }
-
-        public List<Mutation> getAllMutations() {
-            List<Mutation> result = Lists.newArrayList(dataMutations);
-            result.addAll(linkMutations);
-            return result;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 1ccd1fb..9f7a629 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -104,10 +104,13 @@ import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -1199,7 +1202,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
               } else if (linkType == LinkType.PARENT_TABLE) {
                   parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(famName.getBytes()));
                   parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(famName.getBytes()));
-              } else if (linkType == LinkType.DROPPED_COLUMN) {
+              } else if (linkType == LinkType.EXCLUDED_COLUMN) {
                   // add the excludedColumn
                   addExcludedColumnToTable(columns, colName, famName, colKv.getTimestamp());
               }
@@ -1583,13 +1586,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
             schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
             tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
-            String fullName = SchemaUtil.getTableName(schemaName, tableName);
-            // no need to run OrpanCleaner (which cleans up orphaned views) on SYSTEM tables 
-            if (!schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME)) {
+            // no need to run OrpanCleaner (which cleans up orphaned views) while creating SYSTEM tables  env.getTable
+            if (Bytes.compareTo(schemaName,PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME_BYTES)!=0) {
 	            HTableInterface systemCatalog = null;
 	            try {
-	                systemCatalog = env.getTable(SchemaUtil
-	                        .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()));
+	            	// can't use SchemaUtil.getPhysicalTableName on server side as we don't know whether 
+	            	// the system tables have been migrated to the system namespaces
+	            	TableName systemCatalogTableName = env.getRegion().getTableDesc().getTableName();
+	                systemCatalog = env.getTable(systemCatalogTableName);
 	                OrphanCleaner.reapOrphans(systemCatalog, tenantIdBytes, schemaName, tableName);
 	            } finally {
 	                if (systemCatalog != null) {
@@ -1870,7 +1874,24 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         indexId = (short) seqValue;
                     }
                 }
-
+                
+                // the child links are stored in a separate table SYSTEM.CHILD_LINK from 4.14 onwards
+                List<Mutation> childLinkMutations = MetaDataUtil.removeChildLinks(tableMetadata);
+                HTableInterface hTable = null;
+                try {
+                	hTable = env.getTable(SchemaUtil
+	                        .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, env.getConfiguration()));
+                    hTable.batch(childLinkMutations);
+                } catch (Throwable t) {
+                    logger.error("creating child links failed", t);
+                    ProtobufUtil.setControllerException(controller,
+                        ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
+                } finally {
+                    if (hTable != null) {
+                        hTable.close();
+                    }
+                }
+                
                 // TODO: Switch this to HRegion#batchMutate when we want to support indexes on the
                 // system table. Basically, we get all the locks that we don't already hold for all the
                 // tableMetadata rows. This ensures we don't have deadlock situations (ensuring
@@ -1980,7 +2001,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 
     private void findAllChildViews(byte[] tenantId, byte[] schemaName, byte[] tableName, TableViewFinderResult result) throws IOException {
     	HTableInterface hTable = env.getTable(SchemaUtil
-                .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()));
+                .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, env.getConfiguration()));
         try {
             ViewFinder.findAllRelatives(hTable, tenantId, schemaName, tableName, LinkType.CHILD_TABLE, result);
         } finally {
@@ -1988,8 +2009,17 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
     
-    private static final byte[] PHYSICAL_TABLE_BYTES =
-            new byte[] { PTable.LinkType.PHYSICAL_TABLE.getSerializedValue() };
+	private void separateLocalAndRemoteMutations(Region region, List<Mutation> mutations,
+			List<Mutation> localRegionMutations, List<Mutation> remoteRegionMutations) {
+		HRegionInfo regionInfo = region.getRegionInfo();
+		for (Mutation mutation : mutations) {
+			if (regionInfo.containsRow(mutation.getRow())) {
+				localRegionMutations.add(mutation);
+			} else {
+				remoteRegionMutations.add(mutation);
+			}
+		}
+	}
 
     @Override
     public void dropTable(RpcController controller, DropTableRequest request,
@@ -2002,8 +2032,11 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         byte[] tableName = null;
 
         try {
-            List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
-            MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
+            List<Mutation> catalogMutations = ProtobufUtil.getMutations(request);
+            List<Mutation> childLinkMutations = Lists.newArrayList();
+        	List<Mutation> localRegionMutations = Lists.newArrayList();
+			List<Mutation> remoteRegionMutations = Lists.newArrayList();
+            MetaDataUtil.getTenantIdAndSchemaAndTableName(catalogMutations, rowKeyMetaData);
             byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
             schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
             tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
@@ -2016,7 +2049,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             }
             List<byte[]> tableNamesToDelete = Lists.newArrayList();
             List<SharedTableState> sharedTablesToDelete = Lists.newArrayList();
-            byte[] parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
+            byte[] parentTableName = MetaDataUtil.getParentTableName(catalogMutations);
             byte[] lockTableName = parentTableName == null ? tableName : parentTableName;
             byte[] lockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, lockTableName);
             byte[] key =
@@ -2030,7 +2063,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 return;
             }
             List<RowLock> locks = Lists.newArrayList();
-            LinkedList<CatalogInfo.MutationType> groupedMutations = Lists.newLinkedList();
             try {
                 acquireLock(region, lockKey, locks);
                 if (key != lockKey) {
@@ -2039,21 +2071,26 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
                 result =
                         doDropTable(key, tenantIdBytes, schemaName, tableName, parentTableName,
-                            PTableType.fromSerializedValue(tableType), tableMetadata,
-                            invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, isCascade, request.getClientVersion());
+                            PTableType.fromSerializedValue(tableType), catalogMutations, childLinkMutations,
+                            invalidateList, tableNamesToDelete, sharedTablesToDelete, isCascade, request.getClientVersion());
                 if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                     done.run(MetaDataMutationResult.toProto(result));
                     return;
                 }
-                Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
-                groupedMutations = new CatalogInfo(tableMetadata).getGroupedMutations();
-                // for the parent table, we don't want to delete the link, this happens last.
-                CatalogInfo.MutationType originalMutations = groupedMutations.getFirst();
-                region.mutateRowsWithLocks(originalMutations.getDataMutations(),
-                    Collections.<byte[]>emptyList(), HConstants.NO_NONCE, HConstants.NO_NONCE);
-                logger.debug("Issuing Deletes: " + originalMutations.getDataMutations());
-
-                long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
+				Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env)
+						.getMetaDataCache();
+				// since the mutations in catalogMutations can span multiple
+				// regions first we first process process mutations local to
+				// this region, then we process the remaining mutations, finally
+				// we process the child link mutations if any of the mutations
+				// fail, we can will clean them up later using
+				// OrphanCleaner.reapOrphans()
+				separateLocalAndRemoteMutations(region, catalogMutations, localRegionMutations, remoteRegionMutations);
+				// drop rows from catalog on this region
+				region.mutateRowsWithLocks(localRegionMutations, Collections.<byte[]> emptyList(), HConstants.NO_NONCE,
+						HConstants.NO_NONCE);
+
+                long currentTime = MetaDataUtil.getClientTimeStamp(catalogMutations);
                 for (ImmutableBytesPtr ckey : invalidateList) {
                     metaDataCache.put(ckey, newDeletedTableMarker(currentTime));
                 }
@@ -2065,35 +2102,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 return;
             } finally {
                 region.releaseRowLocks(locks);
-                HTableInterface hTable = null;
-                try {
-                	hTable = env.getTable(SchemaUtil
-	                        .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()));
-                    // Now batch each table and delete, any table / view should not span more than a single
-                    // region so either this succeeds or fails.
-                    for (int i = groupedMutations.size() - 1; i >= 1; i--) {
-                        List<Mutation> mutationsToApply = groupedMutations.get(i).getAllMutations();
-                        logger.info("Issuing Deletes: " + mutationsToApply);
-                        Object[] appliedMutations = new Object[mutationsToApply.size()];
-                        hTable.batch(mutationsToApply, appliedMutations);
-                    }
-                    // now we can finally delete that linking row for the original table and we are done
-                    // if there are deletes:
-                    if (!groupedMutations.isEmpty()) {
-                        List<Mutation> linkMutations = groupedMutations.getFirst().getLinkMutations();
-                        logger.info("Issuing Deletes: " + linkMutations);
-                        Object[] appliedMutations = new Object[linkMutations.size()];
-                        hTable.batch(linkMutations, appliedMutations);
-                    }
-                } catch (Throwable t) {
-                    logger.error("dropTable failed", t);
-                    ProtobufUtil.setControllerException(controller,
-                        ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
-                } finally {
-                    if (hTable != null) {
-                        hTable.close();
-                    }
-                }
+                // drop rows from catalog on remote regions
+                processMutations(controller, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, SchemaUtil.getTableName(schemaName, tableName), remoteRegionMutations);
+                // drop all child links 
+                processMutations(controller, PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, SchemaUtil.getTableName(schemaName, tableName), childLinkMutations);
             }
         } catch (Throwable t) {
           logger.error("dropTable failed", t);
@@ -2102,13 +2114,28 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
 
-    private MetaDataMutationResult doDropTable(byte[] key, byte[] tenantId, byte[] schemaName,
-        byte[] tableName, byte[] parentTableName, PTableType tableType, List<Mutation> rowsToDelete,
-        List<ImmutableBytesPtr> invalidateList, List<RowLock> locks,
-        List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete, boolean isCascade, int clientVersion) throws IOException, SQLException {
-
+	private void processMutations(RpcController controller, byte[] systemTableName, String droppedTableName,
+			List<Mutation> childLinkMutations) throws IOException {
+		HTableInterface hTable = null;
+		try {
+			hTable = env.getTable(SchemaUtil.getPhysicalTableName(systemTableName, env.getConfiguration()));
+			hTable.batch(childLinkMutations);
+		} catch (Throwable t) {
+			logger.error("dropTable failed", t);
+			ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(droppedTableName, t));
+		} finally {
+			if (hTable != null) {
+				hTable.close();
+			}
+		}
+	}
 
-        long clientTimeStamp = MetaDataUtil.getClientTimeStamp(rowsToDelete);
+	private MetaDataMutationResult doDropTable(byte[] key, byte[] tenantId, byte[] schemaName, byte[] tableName,
+			byte[] parentTableName, PTableType tableType, List<Mutation> catalogMutations,
+			List<Mutation> childLinkMutations, List<ImmutableBytesPtr> invalidateList, List<byte[]> tableNamesToDelete,
+			List<SharedTableState> sharedTablesToDelete, boolean isCascade, int clientVersion)
+			throws IOException, SQLException {
+        long clientTimeStamp = MetaDataUtil.getClientTimeStamp(catalogMutations);
 
         Region region = env.getRegion();
         ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
@@ -2159,17 +2186,16 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 findAllChildViews(tenantId, table.getSchemaName().getBytes(), table.getTableName().getBytes(), tableViewFinderResult);
                 if (tableViewFinderResult.hasViews()) {
                     if (isCascade) {
-                        // Recursively delete views - safe as all the views as all in the same region
-
+                        // Recursively delete views adding the mutations to delete child views to rowsToDelete
                         for (TableInfo tableInfo : tableViewFinderResult.getResults()) {
                             byte[] viewTenantId = tableInfo.getTenantId();
                             byte[] viewSchemaName = tableInfo.getSchemaName();
                             byte[] viewName = tableInfo.getTableName();
                             byte[] viewKey = tableInfo.getRowKeyPrefix();
                             Delete delete = new Delete(tableInfo.getRowKeyPrefix(), clientTimeStamp);
-                            rowsToDelete.add(delete);
+                            catalogMutations.add(delete);
                             MetaDataMutationResult result = doDropTable(viewKey, viewTenantId, viewSchemaName,
-                                    viewName, null, PTableType.VIEW, rowsToDelete, invalidateList, locks,
+                                    viewName, null, PTableType.VIEW, catalogMutations, childLinkMutations, invalidateList,
                                     tableNamesToDelete, sharedTablesToDelete, false, clientVersion);
                             if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                                 return result;
@@ -2208,7 +2234,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                             PName parentTenantId = parentTenantIdCell!=null ? PNameFactory.newName(parentTenantIdCell.getValueArray(), parentTenantIdCell.getValueOffset(), parentTenantIdCell.getValueLength()) : null;
                             byte[] linkKey = MetaDataUtil.getChildLinkKey(parentTenantId, table.getParentSchemaName(), table.getParentTableName(), table.getTenantId(), table.getName());
                             Delete linkDelete = new Delete(linkKey, clientTimeStamp);
-                            rowsToDelete.add(linkDelete);
+                            childLinkMutations.add(linkDelete);
                         }
                 }
                 // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
@@ -2216,7 +2242,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
                 // of the client.
                 Delete delete = new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp);
-                rowsToDelete.add(delete);
+                catalogMutations.add(delete);
                 results.clear();
                 scanner.next(results);
             } while (!results.isEmpty());
@@ -2230,10 +2256,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
             // of the client.
             Delete delete = new Delete(indexKey, clientTimeStamp);
-            rowsToDelete.add(delete);
+            catalogMutations.add(delete);
             MetaDataMutationResult result =
                     doDropTable(indexKey, tenantId, schemaName, indexName, tableName, PTableType.INDEX,
-                        rowsToDelete, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, false, clientVersion);
+                        catalogMutations, childLinkMutations, invalidateList, tableNamesToDelete, sharedTablesToDelete, false, clientVersion);
             if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                 return result;
             }
@@ -3165,16 +3191,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 										continue;
                                     deletePKColumn = columnToDelete.getFamilyName() == null;
 									if (isView) {
-                                        // convert a delete into a put row.  I know this is weird but see Phoenix-xxxx jira for
-                                        // reasoning.  Only drop if its in a view and its not created by the view
+                                        // if we are dropping a derived column add it to the excluded column list
                                         if (columnToDelete.isDerived()) {
                                             mutation = MetaDataUtil
-                                                .cloneDeleteToPutAndAddColumn((Delete) mutation, TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, LinkType.DROPPED_COLUMN.getSerializedValueAsByteArray());
+                                                .cloneDeleteToPutAndAddColumn((Delete) mutation, TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, LinkType.EXCLUDED_COLUMN.getSerializedValueAsByteArray());
                                             iterator.set(mutation);
                                         }
 
                                         if (table.getBaseColumnCount() != DIVERGED_VIEW_BASE_COLUMN_COUNT
-                                            && columnToDelete.getPosition() < table.getBaseColumnCount()) {
+                                            && columnToDelete.isDerived()) {
                                             /*
                                              * If the column being dropped is inherited from the base table, then the
                                              * view is about to diverge itself from the base table. The consequence of
@@ -3275,9 +3300,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 // Drop the link between the data table and the
                 // index table
                 additionalTableMetaData.add(new Delete(linkKey, clientTimeStamp));
+                List<Mutation> childLinksMutations = Lists.newArrayList();
                 doDropTable(indexKey, tenantId, index.getSchemaName().getBytes(), index
                         .getTableName().getBytes(), tableName, index.getType(),
-                    additionalTableMetaData, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, false, clientVersion);
+                    additionalTableMetaData, childLinksMutations, invalidateList, tableNamesToDelete, sharedTablesToDelete, false, clientVersion);
+                // there should be no child links to delete since we are just dropping an index
+                assert(childLinksMutations.isEmpty());
                 invalidateList.add(new ImmutableBytesPtr(indexKey));
             }
             // If the dropped column is a covered index column, invalidate the index

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index fd438a8..dd619eb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -92,8 +92,9 @@ public abstract class MetaDataProtocol extends MetaDataService {
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0 = MIN_TABLE_TIMESTAMP + 27;
     // Since there's no upgrade code, keep the version the same as the previous version
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0 = MIN_TABLE_TIMESTAMP + 28;
     // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_* constants
-    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0;
     
     // ALWAYS update this map whenever rolling out a new release (major, minor or patch release). 
     // Key is the SYSTEM.CATALOG timestamp for the version and value is the version string.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/OrphanCleaner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/OrphanCleaner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/OrphanCleaner.java
index bd6627a..f8ba130 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/OrphanCleaner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/OrphanCleaner.java
@@ -42,7 +42,8 @@ class OrphanCleaner {
         }
         for (int i = listOBytes.size() - 1; i >= 0; i--) {
             List<Delete> deletes = traverseUpAndDelete(hTable, listOBytes.get(i));
-            // add the linking row as well if needed
+            // TODO ask rahul if this delete is required
+            // add the linking row as well if needed 
             deletes.add(new Delete(listOBytes.get(i)));
             hTable.delete(deletes);
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ViewFinder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ViewFinder.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ViewFinder.java
index 267faed..36a3f9c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ViewFinder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ViewFinder.java
@@ -60,9 +60,9 @@ class ViewFinder {
                 HConstants.LATEST_TIMESTAMP);
         }
 
-    static void findAllRelatives(Table systemCatalog, byte[] tenantId, byte[] schema, byte[] table,
+    static void findAllRelatives(Table systemTable, byte[] tenantId, byte[] schema, byte[] table,
         PTable.LinkType linkType, TableViewFinderResult result) throws IOException {
-        findAllRelatives(systemCatalog, tenantId, schema, table, linkType, HConstants.LATEST_TIMESTAMP, result);
+        findAllRelatives(systemTable, tenantId, schema, table, linkType, HConstants.LATEST_TIMESTAMP, result);
     }
 
     static void findAllRelatives(Table systemCatalog, byte[] tenantId, byte[] schema, byte[] table,
@@ -77,7 +77,7 @@ class ViewFinder {
 
     static TableViewFinderResult findRelatedViews(Table systemCatalog, byte[] tenantId, byte[] schema, byte[] table,
         PTable.LinkType linkType, long timestamp) throws IOException {
-        if (linkType==PTable.LinkType.INDEX_TABLE || linkType==PTable.LinkType.DROPPED_COLUMN) {
+        if (linkType==PTable.LinkType.INDEX_TABLE || linkType==PTable.LinkType.EXCLUDED_COLUMN) {
             throw new IllegalArgumentException("findAllRelatives does not support link type "+linkType);
         }
         Scan scan = new Scan();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 3f5b8d4..fbb5a42 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -368,6 +368,11 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
     public static final byte[] COLUMN_QUALIFIER_COUNTER_BYTES = Bytes.toBytes(COLUMN_QUALIFIER_COUNTER);
     public static final String USE_STATS_FOR_PARALLELIZATION = "USE_STATS_FOR_PARALLELIZATION";
     public static final byte[] USE_STATS_FOR_PARALLELIZATION_BYTES = Bytes.toBytes(USE_STATS_FOR_PARALLELIZATION);
+    
+    public static final String SYSTEM_CHILD_LINK_TABLE = "CHILD_LINK";
+    public static final String SYSTEM_CHILD_LINK_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_CHILD_LINK_TABLE);
+    public static final byte[] SYSTEM_CHILD_LINK_NAME_BYTES = Bytes.toBytes(SYSTEM_CHILD_LINK_NAME);
+    public static final TableName SYSTEM_LINK_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_CHILD_LINK_NAME);
 
     PhoenixDatabaseMetaData(PhoenixConnection connection) throws SQLException {
         this.emptyResultSet = new PhoenixResultSet(ResultIterator.EMPTY_ITERATOR, RowProjector.EMPTY_PROJECTOR, new StatementContext(new PhoenixStatement(connection), false));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index a198437..3a4a5d6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2564,6 +2564,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try {
             metaConnection.createStatement().execute(QueryConstants.CREATE_FUNCTION_METADATA);
         } catch (TableAlreadyExistsException ignore) {}
+        try {
+            metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_CHILD_LINK_METADATA);
+        } catch (TableAlreadyExistsException e) {}
 
         // Catch the IOException to log the error message and then bubble it up for the client to retry.
         try {
@@ -2822,7 +2825,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                 + PBoolean.INSTANCE.getSqlTypeName());
                     addParentToChildLinks(metaConnection);
                 }
-                if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0) {
+                if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0) {
                     metaConnection = addColumnsIfNotExists(
                         metaConnection,
                         PhoenixDatabaseMetaData.SYSTEM_CATALOG,
@@ -2916,6 +2919,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             try {
                 metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_FUNCTION_METADATA);
             } catch (NewerTableAlreadyExistsException e) {} catch (TableAlreadyExistsException e) {}
+            try {
+                metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_CHILD_LINK_METADATA);
+            } catch (NewerTableAlreadyExistsException e) {} catch (TableAlreadyExistsException e) {}
             ConnectionQueryServicesImpl.this.upgradeRequired.set(false);
             success = true;
         } catch (UpgradeInProgressException | UpgradeNotRequiredException e) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index b8a2427..440ff47 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -321,6 +321,11 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
                    metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_FUNCTION_METADATA);
                 } catch (NewerTableAlreadyExistsException ignore) {
                 }
+                
+                try {
+                    metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_CHILD_LINK_METADATA);
+                 } catch (NewerTableAlreadyExistsException ignore) {
+                 }
             } catch (SQLException e) {
                 sqlE = e;
             } finally {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 7607388..aed22cd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -93,6 +93,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORE_NULLS;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_TABLE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
@@ -401,4 +402,20 @@ public interface QueryConstants {
     public static final byte[] UPGRADE_MUTEX = "UPGRADE_MUTEX".getBytes();
     public static final String HASH_JOIN_CACHE_RETRIES = "hashjoin.client.retries.number";
     public static final int DEFAULT_HASH_JOIN_CACHE_RETRIES = 5;
+    
+    // Links from parent to child views are stored in a separate table for scalability
+    public static final String CREATE_CHILD_LINK_METADATA =
+            "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CHILD_LINK_TABLE + "\"(\n" +
+            // PK columns
+            TENANT_ID + " VARCHAR NULL," +
+            TABLE_SCHEM + " VARCHAR NULL," +
+            TABLE_NAME + " VARCHAR NOT NULL," +
+            COLUMN_NAME + " VARCHAR NULL," + 
+            COLUMN_FAMILY + " VARCHAR NULL," +
+            LINK_TYPE + " UNSIGNED_TINYINT,\n" +
+            "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + ","
+            + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" +
+            HConstants.VERSIONS + "=" + MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +
+            HColumnDescriptor.KEEP_DELETED_CELLS + "="  + MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n" +
+            PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilter.java
index 1dd79be..bc19fb6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilter.java
@@ -35,7 +35,7 @@ import java.util.List;
  * during cluster upgrades. However, tenant-owned data such as tenant-owned views need to
  * be copied. This WALEntryFilter will only allow tenant-owned rows in SYSTEM.CATALOG to
  * be replicated. Data from all other tables is automatically passed. It will also copy
- * child links in SYSTEM.CATALOG that are globally-owned but point to tenant-owned views.
+ * child links in SYSTEM.CHILD_LINK that are globally-owned but point to tenant-owned views.
  *
  */
 public class SystemCatalogWALEntryFilter implements WALEntryFilter {
@@ -43,9 +43,10 @@ public class SystemCatalogWALEntryFilter implements WALEntryFilter {
   @Override
   public WAL.Entry filter(WAL.Entry entry) {
 
-    //if the WAL.Entry's table isn't System.Catalog, it auto-passes this filter
+    //if the WAL.Entry's table isn't System.Catalog or System.Child_Link, it auto-passes this filter
     //TODO: when Phoenix drops support for pre-1.3 versions of HBase, redo as a WALCellFilter
-    if (!SchemaUtil.isMetaTable(entry.getKey().getTablename().getName())){
+    byte[] tableName = entry.getKey().getTablename().getName();
+	if (!SchemaUtil.isMetaTable(tableName) && !SchemaUtil.isChildLinkTable(tableName)){
       return entry;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index db43c75..4cd6dd1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -150,12 +150,13 @@ public interface PTable extends PMetaDataEntity {
         PARENT_TABLE((byte)3),
         /**
          * Link from a parent table to its child view
+         * (these are stored in SYSTEM.CHILD_LINK for scalability)
          */
         CHILD_TABLE((byte)4),
         /**
-         * Link for an dropped column
+         * Link for an excluded (dropped) column
          */
-        DROPPED_COLUMN((byte)5),
+        EXCLUDED_COLUMN((byte)5),
         /**
          * Link from an index on a view to its parent table
          */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index ea05f7f..f860d68 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -24,6 +24,7 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.List;
 import java.util.NavigableMap;
 
@@ -734,13 +735,6 @@ public class MetaDataUtil {
         return null;
     }
 
-    public static boolean isLinkingRow(Mutation tableMutation) {
-        byte[][] array = new byte[5][];
-        getVarChars(tableMutation.getRow(), array);
-        return array[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX] != null && Bytes
-            .equals(array[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX], HConstants.EMPTY_BYTE_ARRAY);
-    }
-
     public static boolean isLocalIndex(String physicalName) {
         if (physicalName.contains(LOCAL_INDEX_TABLE_PREFIX)) { return true; }
         return false;
@@ -800,4 +794,25 @@ public class MetaDataUtil {
         byte[] physicalTableName = Bytes.toBytes(SchemaUtil.getTableNameFromFullName(view.getPhysicalName().getString()));
         return SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, physicalTableSchemaName, physicalTableName);
     }
+    
+	public static List<Mutation> removeChildLinks(List<Mutation> catalogMutations) {
+		List<Mutation> childLinks = Lists.newArrayList();
+		Iterator<Mutation> iter = catalogMutations.iterator();
+		while (iter.hasNext()) {
+			Mutation m = iter.next();
+			for (KeyValue kv : m.getFamilyMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES)) {
+				// remove mutations of link type LinkType.CHILD_TABLE
+				if ((Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(),
+						PhoenixDatabaseMetaData.LINK_TYPE_BYTES, 0,
+						PhoenixDatabaseMetaData.LINK_TYPE_BYTES.length) == 0)
+						&& ((Bytes.compareTo(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength(),
+								LinkType.CHILD_TABLE.getSerializedValueAsByteArray(), 0,
+								LinkType.CHILD_TABLE.getSerializedValueAsByteArray().length) == 0))) {
+					childLinks.add(m);
+					iter.remove();
+				}
+			}
+		}
+		return childLinks;
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 3e1a2f2..0060993 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -21,6 +21,7 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkNotNull;
 import static com.google.common.base.Strings.isNullOrEmpty;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES;
 import static org.apache.phoenix.query.QueryConstants.SEPARATOR_BYTE;
@@ -504,6 +505,11 @@ public class SchemaUtil {
                 || Bytes.compareTo(tableName, SchemaUtil
                         .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, true).getName()) == 0;
     }
+    
+    public static boolean isChildLinkTable(byte[] tableName) {
+        return Bytes.compareTo(tableName, SYSTEM_CHILD_LINK_NAME_BYTES) == 0 || Bytes.compareTo(tableName,
+                SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, true).getName()) == 0;
+    }
 
     public static boolean isSequenceTable(PTable table) {
         return PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME.equals(table.getName().getString());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index f8d98b9..cd574dc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -1146,6 +1146,35 @@ public class UpgradeUtil {
         }
     }
     
+    /**
+     * Move child links form SYSTEM.CATALOG to SYSTEM.CHILD_LINK
+     * @param oldMetaConnection caller should take care of closing the passed connection appropriately
+     * @throws SQLException
+     */
+    public static void moveChildLinks(PhoenixConnection oldMetaConnection) throws SQLException {
+        PhoenixConnection metaConnection = null;
+        try {
+            // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG 
+            metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
+            logger.info("Upgrading metadata to add parent to child links for views");
+            metaConnection.commit();
+            String createChildLink = "UPSERT INTO SYSTEM.CHILD_LINK(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE)" +
+                                        "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE" + 
+                                        "FROM SYSTEM.CATALOG " + 
+                                        "WHERE LINK_TYPE = 4";
+            metaConnection.createStatement().execute(createChildLink);
+            metaConnection.commit();
+            String deleteChildLink = "DELETE FROM SYSTEM.CATALOG WHERE LINK_TYPE = 4 ";
+            metaConnection.createStatement().execute(deleteChildLink);
+            metaConnection.commit();
+            metaConnection.getQueryServices().clearCache();
+        } finally {
+            if (metaConnection != null) {
+                metaConnection.close();
+            }
+        }
+    }
+    
     public static void addViewIndexToParentLinks(PhoenixConnection oldMetaConnection) throws SQLException {
     	// Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG 
         try (PhoenixConnection queryConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/758efd9b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
index 2cecea2..fbde4b7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
@@ -16,6 +16,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableNotFoundException;
@@ -43,7 +44,8 @@ import com.google.common.collect.Maps;
  * limitations under the License.
  */
 public class MetaDataEndpointImplTest extends ParallelStatsDisabledIT {
-    private final TableName catalogTable = TableName.valueOf("SYSTEM.CATALOG");
+    private final TableName catalogTable = TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+    private final TableName linkTable = TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES);
 
     /*
       The tree structure is as follows: Where ParentTable is the Base Table
@@ -77,7 +79,7 @@ public class MetaDataEndpointImplTest extends ParallelStatsDisabledIT {
         System.err.println(rightChildTable);
 
         TableViewFinderResult childViews = new TableViewFinderResult();
-        ViewFinder.findAllRelatives(getTable(catalogTable), HConstants.EMPTY_BYTE_ARRAY, table.getSchemaName().getBytes(),
+        ViewFinder.findAllRelatives(getTable(linkTable), HConstants.EMPTY_BYTE_ARRAY, table.getSchemaName().getBytes(),
             table.getTableName().getBytes(), PTable.LinkType.CHILD_TABLE, childViews);
         assertEquals(3, childViews.getResults().size());