You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/02/28 07:11:23 UTC

[1/3] git commit: PHOENIX-86 Coalesce does not work in a where clause with null values (JamesTaylor)

Repository: incubator-phoenix
Updated Branches:
  refs/heads/master d919a6c21 -> 2cd63e56a


PHOENIX-86 Coalesce does not work in a where clause with null values (JamesTaylor)


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/463ef95c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/463ef95c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/463ef95c

Branch: refs/heads/master
Commit: 463ef95c963fa4d9b1807d14a3b3bece01e5dc6d
Parents: 8bbce3c
Author: James Taylor <ja...@apache.org>
Authored: Thu Feb 27 21:58:32 2014 -0800
Committer: James Taylor <ja...@apache.org>
Committed: Thu Feb 27 21:58:32 2014 -0800

----------------------------------------------------------------------
 .../expression/function/CoalesceFunction.java   |  9 ++-
 .../filter/EvaluateOnCompletionVisitor.java     | 10 +++-
 .../org/apache/phoenix/end2end/QueryTest.java   | 59 ++++++++++++++++----
 3 files changed, 64 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/463ef95c/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CoalesceFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CoalesceFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CoalesceFunction.java
index 792bd30..360fd07 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CoalesceFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CoalesceFunction.java
@@ -21,7 +21,6 @@ import java.sql.SQLException;
 import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.Expression;
@@ -63,10 +62,14 @@ public class CoalesceFunction extends ScalarFunction {
 
     @Override
     public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
-        if (children.get(0).evaluate(tuple, ptr)) {
+        boolean evaluated = children.get(0).evaluate(tuple, ptr);
+        if (evaluated) {
             return true;
         }
-        return children.get(1).evaluate(tuple, ptr);
+        if (tuple.isImmutable()) {
+            return children.get(1).evaluate(tuple, ptr);
+        }
+        return false;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/463ef95c/phoenix-core/src/main/java/org/apache/phoenix/filter/EvaluateOnCompletionVisitor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/EvaluateOnCompletionVisitor.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/EvaluateOnCompletionVisitor.java
index 7a236a8..994f60a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/EvaluateOnCompletionVisitor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/EvaluateOnCompletionVisitor.java
@@ -25,6 +25,8 @@ import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.IsNullExpression;
 import org.apache.phoenix.expression.RowKeyColumnExpression;
 import org.apache.phoenix.expression.RowValueConstructorExpression;
+import org.apache.phoenix.expression.function.CoalesceFunction;
+import org.apache.phoenix.expression.function.ScalarFunction;
 import org.apache.phoenix.expression.visitor.TraverseAllExpressionVisitor;
 
 
@@ -42,6 +44,7 @@ import org.apache.phoenix.expression.visitor.TraverseAllExpressionVisitor;
  * key value column of interest, but the expression may evaluate to true
  * just based on the row key columns.
  * 
+ * TODO: this really should become a method on Expression
  * @since 0.1
  */
 public class EvaluateOnCompletionVisitor extends TraverseAllExpressionVisitor<Void> {
@@ -77,5 +80,10 @@ public class EvaluateOnCompletionVisitor extends TraverseAllExpressionVisitor<Vo
         evaluateOnCompletion = true;
         return null;
     }
-
+    
+    @Override
+    public Iterator<Expression> visitEnter(ScalarFunction node) {
+        evaluateOnCompletion |= node instanceof CoalesceFunction;
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/463ef95c/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryTest.java
index 795638a..794e235 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryTest.java
@@ -65,6 +65,15 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.ConstraintViolationException;
+import org.apache.phoenix.schema.PDataType;
+import org.apache.phoenix.schema.SequenceNotFoundException;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -78,16 +87,6 @@ import com.google.common.collect.Sets;
 import com.google.common.primitives.Doubles;
 import com.google.common.primitives.Floats;
 
-import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.ConstraintViolationException;
-import org.apache.phoenix.schema.PDataType;
-import org.apache.phoenix.schema.SequenceNotFoundException;
-import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.ReadOnlyProps;
-
 
 
 /**
@@ -2108,6 +2107,46 @@ public class QueryTest extends BaseClientManagedTimeTest {
     }
 
     @Test
+    public void testCoalesceFunction() throws Exception {
+        String query = "SELECT entity_id FROM aTable WHERE a_integer > 0 and coalesce(X_DECIMAL,0.0) = 0.0";
+        Properties props = new Properties(TEST_PROPERTIES);
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10)); // Execute at timestamp 2
+        Connection conn = DriverManager.getConnection(PHOENIX_JDBC_URL, props);
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO aTable(organization_id,entity_id,x_decimal) values(?,?,?)");
+        stmt.setString(1, getOrganizationId());
+        stmt.setString(2, ROW1);
+        stmt.setBigDecimal(3, BigDecimal.valueOf(1.0));
+        stmt.execute();
+        stmt.setString(2, ROW3);
+        stmt.setBigDecimal(3, BigDecimal.valueOf(2.0));
+        stmt.execute();
+        stmt.setString(2, ROW4);
+        stmt.setBigDecimal(3, BigDecimal.valueOf(3.0));
+        stmt.execute();
+        stmt.setString(2, ROW5);
+        stmt.setBigDecimal(3, BigDecimal.valueOf(0.0));
+        stmt.execute();
+        stmt.setString(2, ROW6);
+        stmt.setBigDecimal(3, BigDecimal.valueOf(4.0));
+        stmt.execute();
+        conn.commit();
+
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 20)); // Execute at timestamp 2
+        conn = DriverManager.getConnection(PHOENIX_JDBC_URL, props);
+        try {
+            PreparedStatement statement = conn.prepareStatement(query);
+            ResultSet rs = statement.executeQuery();
+            assertTrue (rs.next());
+            assertEquals(rs.getString(1), ROW2);
+            assertTrue (rs.next());
+            assertEquals(rs.getString(1), ROW5);
+            assertFalse(rs.next());
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
     public void testCountIsNull() throws Exception {
         String query = "SELECT count(1) FROM aTable WHERE X_DECIMAL is null";
         Properties props = new Properties(TEST_PROPERTIES);


[3/3] git commit: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/incubator-phoenix

Posted by ja...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/incubator-phoenix


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/2cd63e56
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/2cd63e56
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/2cd63e56

Branch: refs/heads/master
Commit: 2cd63e56aaa14074c595467cecc66d11025c3877
Parents: 4a64635 d919a6c
Author: James Taylor <ja...@apache.org>
Authored: Thu Feb 27 22:11:06 2014 -0800
Committer: James Taylor <ja...@apache.org>
Committed: Thu Feb 27 22:11:06 2014 -0800

----------------------------------------------------------------------
 .../apache/phoenix/client/ClientKeyValue.java   | 16 +++++
 .../phoenix/client/ClientKeyValueBuilder.java   | 15 ++++-
 .../phoenix/client/GenericKeyValueBuilder.java  | 13 ++++
 .../apache/phoenix/client/KeyValueBuilder.java  | 21 ++++++-
 .../coprocessor/MetaDataEndpointImpl.java       |  4 +-
 .../query/ConnectionQueryServicesImpl.java      | 17 +++---
 .../org/apache/phoenix/util/MetaDataUtil.java   | 62 ++++++++++++++------
 .../apache/phoenix/util/MetaDataUtilTest.java   | 56 +++++++++++++++++-
 8 files changed, 175 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/2cd63e56/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------


[2/3] git commit: PHOENIX-87 Use size-based cache for server-side metadata cache (JamesTaylor)

Posted by ja...@apache.org.
PHOENIX-87 Use size-based cache for server-side metadata cache (JamesTaylor)


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/4a64635d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/4a64635d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/4a64635d

Branch: refs/heads/master
Commit: 4a64635d238879ab78244da96cfa2c664072936b
Parents: 463ef95
Author: James Taylor <ja...@apache.org>
Authored: Thu Feb 27 21:59:08 2014 -0800
Committer: James Taylor <ja...@apache.org>
Committed: Thu Feb 27 21:59:08 2014 -0800

----------------------------------------------------------------------
 .../org/apache/phoenix/cache/GlobalCache.java   | 60 +++++++++++++++----
 .../coprocessor/MetaDataEndpointImpl.java       | 62 ++++++++------------
 .../coprocessor/MetaDataRegionObserver.java     |  3 +-
 .../org/apache/phoenix/query/QueryServices.java |  4 +-
 .../phoenix/query/QueryServicesOptions.java     |  9 +--
 .../apache/phoenix/schema/DelegateColumn.java   |  7 +++
 .../org/apache/phoenix/schema/PBaseColumn.java  |  9 ++-
 .../java/org/apache/phoenix/schema/PColumn.java |  2 +
 .../apache/phoenix/schema/PColumnFamily.java    |  2 +
 .../phoenix/schema/PColumnFamilyImpl.java       | 17 +++++-
 .../org/apache/phoenix/schema/PColumnImpl.java  |  8 +++
 .../java/org/apache/phoenix/schema/PName.java   | 12 ++++
 .../org/apache/phoenix/schema/PNameFactory.java |  4 +-
 .../org/apache/phoenix/schema/PNameImpl.java    |  9 ++-
 .../java/org/apache/phoenix/schema/PTable.java  |  2 +
 .../org/apache/phoenix/schema/PTableImpl.java   | 40 ++++++++++++-
 .../org/apache/phoenix/schema/ValueSchema.java  |  9 +++
 .../java/org/apache/phoenix/util/SizedUtil.java | 10 +++-
 18 files changed, 204 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
index 01bbf06..dd9c275 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
@@ -23,17 +23,22 @@ import static org.apache.phoenix.query.QueryServices.MAX_TENANT_MEMORY_PERC_ATTR
 
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.memory.ChildMemoryManager;
 import org.apache.phoenix.memory.GlobalMemoryManager;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.SizedUtil;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.Weigher;
 
 
 /**
@@ -51,19 +56,48 @@ public class GlobalCache extends TenantCacheImpl {
     // TODO: Use Guava cache with auto removal after lack of access 
     private final ConcurrentMap<ImmutableBytesWritable,TenantCache> perTenantCacheMap = new ConcurrentHashMap<ImmutableBytesWritable,TenantCache>();
     // Cache for lastest PTable for a given Phoenix table
-    private final ConcurrentHashMap<ImmutableBytesPtr,PTable> metaDataCacheMap = new ConcurrentHashMap<ImmutableBytesPtr,PTable>();
+    private Cache<ImmutableBytesPtr,PTable> metaDataCache;
     
-    public static synchronized GlobalCache getInstance(RegionCoprocessorEnvironment env) {
-        // See http://www.cs.umd.edu/~pugh/java/memoryModel/DoubleCheckedLocking.html
-        // for explanation of why double locking doesn't work. 
-        if (INSTANCE == null) {
-            INSTANCE = new GlobalCache(env.getConfiguration());
+    public Cache<ImmutableBytesPtr,PTable> getMetaDataCache() {
+        // Lazy initialize QueryServices so that we only attempt to create an HBase Configuration
+        // object upon the first attempt to connect to any cluster. Otherwise, an attempt will be
+        // made at driver initialization time which is too early for some systems.
+        Cache<ImmutableBytesPtr,PTable> result = metaDataCache;
+        if (result == null) {
+            synchronized(this) {
+                result = metaDataCache;
+                if(result == null) {
+                    long maxSize = config.getLong(QueryServices.MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB,
+                            QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE);
+                    long maxTTL = config.getLong(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB,
+                            QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS);
+                    metaDataCache = result = CacheBuilder.newBuilder()
+                            .maximumWeight(maxSize)
+                            .expireAfterAccess(maxTTL, TimeUnit.MILLISECONDS)
+                            .weigher(new Weigher<ImmutableBytesPtr, PTable>() {
+                                @Override
+                                public int weigh(ImmutableBytesPtr key, PTable table) {
+                                    return SizedUtil.IMMUTABLE_BYTES_PTR_SIZE + key.getLength() + table.getEstimatedSize();
+                                }
+                            })
+                            .build();
+                }
+            }
         }
-        return INSTANCE;
+        return result;
     }
-    
-    public ConcurrentHashMap<ImmutableBytesPtr,PTable> getMetaDataCache() {
-        return metaDataCacheMap;
+
+    public static GlobalCache getInstance(RegionCoprocessorEnvironment env) {
+        GlobalCache result = INSTANCE;
+        if (result == null) {
+            synchronized(GlobalCache.class) {
+                result = INSTANCE;
+                if(result == null) {
+                    INSTANCE = result = new GlobalCache(env.getConfiguration());
+                }
+            }
+        }
+        return result;
     }
     
     /**
@@ -83,7 +117,7 @@ public class GlobalCache extends TenantCacheImpl {
         super(new GlobalMemoryManager(Runtime.getRuntime().totalMemory() * 
                                           config.getInt(MAX_MEMORY_PERC_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MEMORY_PERC) / 100,
                                       config.getInt(MAX_MEMORY_WAIT_MS_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MEMORY_WAIT_MS)),
-              config.getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS));
+              config.getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS));
         this.config = config;
     }
     
@@ -100,7 +134,7 @@ public class GlobalCache extends TenantCacheImpl {
         TenantCache tenantCache = perTenantCacheMap.get(tenantId);
         if (tenantCache == null) {
             int maxTenantMemoryPerc = config.getInt(MAX_TENANT_MEMORY_PERC_ATTRIB, QueryServicesOptions.DEFAULT_MAX_TENANT_MEMORY_PERC);
-            int maxServerCacheTimeToLive = config.getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
+            int maxServerCacheTimeToLive = config.getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
             TenantCacheImpl newTenantCache = new TenantCacheImpl(new ChildMemoryManager(getMemoryManager(), maxTenantMemoryPerc), maxServerCacheTimeToLive);
             tenantCache = perTenantCacheMap.putIfAbsent(tenantId, newTenantCache);
             if (tenantCache == null) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 09d2c6b..0809b4e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -61,7 +61,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -112,6 +111,7 @@ import org.apache.phoenix.util.ServerUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.cache.Cache;
 import com.google.common.collect.Lists;
 
 /**
@@ -244,9 +244,9 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
     private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region, long clientTimeStamp) throws IOException, SQLException {
         Scan scan = newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
         RegionScanner scanner = region.getScanner(scan);
-        Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+        Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
         try {
-            PTable oldTable = metaDataCache.get(cacheKey);
+            PTable oldTable = metaDataCache.getIfPresent(cacheKey);
             long tableTimeStamp = oldTable == null ? MIN_TABLE_TIMESTAMP-1 : oldTable.getTimeStamp();
             PTable newTable;
             newTable = getTable(scanner, clientTimeStamp, tableTimeStamp);
@@ -257,14 +257,7 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
                 if (logger.isDebugEnabled()) {
                     logger.debug("Caching table " + Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(), cacheKey.getLength()) + " at seqNum " + newTable.getSequenceNumber() + " with newer timestamp " + newTable.getTimeStamp() + " versus " + tableTimeStamp);
                 }
-                oldTable = metaDataCache.put(cacheKey, newTable);
-                if (logger.isDebugEnabled()) {
-                    if (oldTable == null) {
-                        logger.debug("No previously cached table " + Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(), cacheKey.getLength()));
-                    } else {
-                        logger.debug("Previously cached table " + Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(), cacheKey.getLength()) + " was at seqNum " + oldTable.getSequenceNumber() + " with timestamp " + oldTable.getTimeStamp());
-                    }
-                }
+                metaDataCache.put(cacheKey, newTable);
             }
             return newTable;
         } finally {
@@ -460,7 +453,7 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
         if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
             KeyValue kv = results.get(0);
             if (kv.isDelete()) {
-                Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+                Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
                 PTable table = newDeletedTableMarker(kv.getTimestamp());
                 metaDataCache.put(cacheKey, table);
                 return table;
@@ -479,8 +472,8 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
 
     private PTable loadTable(RegionCoprocessorEnvironment env, byte[] key, ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp) throws IOException, SQLException {
         HRegion region = env.getRegion();
-        Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
-        PTable table = metaDataCache.get(cacheKey);
+        Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+        PTable table = metaDataCache.getIfPresent(cacheKey);
         // We always cache the latest version - fault in if not in cache
         if (table != null || (table = buildTable(key, cacheKey, region, asOfTimeStamp)) != null) {
             return table;
@@ -561,11 +554,11 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
                 
                 // Invalidate the cache - the next getTable call will add it
                 // TODO: consider loading the table that was just created here, patching up the parent table, and updating the cache
-                Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+                Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
                 if (parentCacheKey != null) {
-                    metaDataCache.remove(parentCacheKey);
+                    metaDataCache.invalidate(parentCacheKey);
                 }
-                metaDataCache.remove(cacheKey);
+                metaDataCache.invalidate(cacheKey);
                 // Get timeStamp from mutations - the above method sets it if it's unset
                 long currentTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
                 return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, currentTimeStamp, null);
@@ -664,7 +657,7 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
                 if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS || result.getTable() == null) {
                     return result;
                 }
-                Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+                Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
                 // Commit the list of deletion.
                 region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet());
                 long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
@@ -673,7 +666,7 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
                 }
                 if (parentTableName != null) {
                     ImmutableBytesPtr parentCacheKey = new ImmutableBytesPtr(lockKey);
-                    metaDataCache.remove(parentCacheKey);
+                    metaDataCache.invalidate(parentCacheKey);
                 }
                 return result;
             } finally {
@@ -693,8 +686,8 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
         HRegion region = env.getRegion();
         ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
         
-        Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
-        PTable table = metaDataCache.get(cacheKey);
+        Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+        PTable table = metaDataCache.getIfPresent(cacheKey);
         
         // We always cache the latest version - fault in if not in cache
         if (table != null || (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP)) != null) {
@@ -803,8 +796,8 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
                 ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
                 List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
                 invalidateList.add(cacheKey);
-                Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
-                PTable table = metaDataCache.get(cacheKey);
+                Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+                PTable table = metaDataCache.getIfPresent(cacheKey);
                 if (logger.isDebugEnabled()) {
                     if (table == null) {
                         logger.debug("Table " + Bytes.toStringBinary(key) + " not found in cache. Will build through scan");
@@ -861,14 +854,7 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
                 region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet());
                 // Invalidate from cache
                 for (ImmutableBytesPtr invalidateKey : invalidateList) {
-                    PTable invalidatedTable = metaDataCache.remove(invalidateKey);
-                    if (logger.isDebugEnabled()) {
-                        if (invalidatedTable == null) {
-                            logger.debug("Attempted to invalidated table key " + Bytes.toStringBinary(cacheKey.get(),cacheKey.getOffset(),cacheKey.getLength()) + " but found no cached table");
-                        } else {
-                            logger.debug("Invalidated table key " + Bytes.toStringBinary(cacheKey.get(),cacheKey.getOffset(),cacheKey.getLength()) + " with timestamp " + invalidatedTable.getTimeStamp() + " and seqNum " + invalidatedTable.getSequenceNumber());
-                        }
-                    }
+                    metaDataCache.invalidate(invalidateKey);
                 }
                 // Get client timeStamp from mutations, since it may get updated by the mutateRowsWithLocks call
                 long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
@@ -1045,8 +1031,8 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
 
     private PTable doGetTable(byte[] key, long clientTimeStamp) throws IOException, SQLException {
         ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
-        Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
-        PTable table = metaDataCache.get(cacheKey);
+        Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+        PTable table = metaDataCache.getIfPresent(cacheKey);
         // We only cache the latest, so we'll end up building the table with every call if the client connection has specified an SCN.
         // TODO: If we indicate to the client that we're returning an older version, but there's a newer version available, the client
         // can safely not call this, since we only allow modifications to the latest.
@@ -1074,7 +1060,7 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
         }
         try {
             // Try cache again in case we were waiting on a lock
-            table = metaDataCache.get(cacheKey);
+            table = metaDataCache.getIfPresent(cacheKey);
             // We only cache the latest, so we'll end up building the table with every call if the client connection has specified an SCN.
             // TODO: If we indicate to the client that we're returning an older version, but there's a newer version available, the client
             // can safely not call this, since we only allow modifications to the latest.
@@ -1099,8 +1085,8 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
 
     @Override
     public void clearCache() {
-        Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
-        metaDataCache.clear();
+        Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+        metaDataCache.invalidateAll();
     }
 
     @Override
@@ -1180,8 +1166,8 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
                 if (currentState != newState) {
                     region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet());
                     // Invalidate from cache
-                    Map<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
-                    metaDataCache.remove(cacheKey);
+                    Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+                    metaDataCache.invalidate(cacheKey);
                 }
                 // Get client timeStamp from mutations, since it may get updated by the mutateRowsWithLocks call
                 long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index e60a6a1..957209f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.coprocessor;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-
 import org.apache.phoenix.cache.GlobalCache;
 
 
@@ -33,6 +32,6 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
     @Override
     public void preClose(final ObserverContext<RegionCoprocessorEnvironment> c,
             boolean abortRequested) {
-        GlobalCache.getInstance(c.getEnvironment()).getMetaDataCache().clear();
+        GlobalCache.getInstance(c.getEnvironment()).getMetaDataCache().invalidateAll();
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index b6a65c0..0904be7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -70,7 +70,7 @@ public interface QueryServices extends SQLCloseable {
     public static final String SCAN_CACHE_SIZE_ATTRIB = "hbase.client.scanner.caching";
     public static final String MAX_MUTATION_SIZE_ATTRIB = "phoenix.mutate.maxSize";
     public static final String MUTATE_BATCH_SIZE_ATTRIB = "phoenix.mutate.batchSize";
-    public static final String MAX_SERVER_CACHE_TIME_TO_LIVE_MS = "phoenix.coprocessor.maxServerCacheTimeToLiveMs";
+    public static final String MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB = "phoenix.coprocessor.maxServerCacheTimeToLiveMs";
     public static final String MAX_INTRA_REGION_PARALLELIZATION_ATTRIB  = "phoenix.query.maxIntraRegionParallelization";
     public static final String ROW_KEY_ORDER_SALTED_TABLE_ATTRIB  = "phoenix.query.rowKeyOrderSaltedTable";
     public static final String USE_INDEXES_ATTRIB  = "phoenix.query.useIndexes";
@@ -93,6 +93,8 @@ public interface QueryServices extends SQLCloseable {
     public static final String DISTINCT_VALUE_COMPRESS_THRESHOLD_ATTRIB = "phoenix.distinct.value.compress.threshold";
     public static final String SEQUENCE_CACHE_SIZE_ATTRIB = "phoenix.sequence.cacheSize";
     public static final String INDEX_MAX_FILESIZE_PERC_ATTRIB = "phoenix.index.maxDataFileSizePerc";
+    public static final String MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB = "phoenix.coprocessor.maxMetaDataCacheSize";
+    public static final String MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB = "phoenix.coprocessor.maxMetaDataCacheTimeToLiveMs";
 
     
     /**

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index f9fdbfe..d52cd5c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -34,7 +34,7 @@ import static org.apache.phoenix.query.QueryServices.MAX_MEMORY_WAIT_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_MUTATION_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_QUERY_CONCURRENCY_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB;
-import static org.apache.phoenix.query.QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS;
+import static org.apache.phoenix.query.QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_SPOOL_TO_DISK_BYTES_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_TENANT_MEMORY_PERC_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB;
@@ -73,7 +73,7 @@ public class QueryServicesOptions {
 	public static final int DEFAULT_QUEUE_SIZE = 500;
 	public static final int DEFAULT_THREAD_TIMEOUT_MS = 600000; // 10min
 	public static final int DEFAULT_SPOOL_THRESHOLD_BYTES = 1024 * 1024 * 20; // 20m
-	public static final int DEFAULT_MAX_MEMORY_PERC = 20; // 20% of heap
+	public static final int DEFAULT_MAX_MEMORY_PERC = 15; // 15% of heap
 	public static final int DEFAULT_MAX_MEMORY_WAIT_MS = 10000;
 	public static final int DEFAULT_MAX_TENANT_MEMORY_PERC = 100;
 	public static final long DEFAULT_MAX_SERVER_CACHE_SIZE = 1024*1024*100;  // 100 Mb
@@ -111,7 +111,8 @@ public class QueryServicesOptions {
     
     public static final int DEFAULT_SEQUENCE_CACHE_SIZE = 100;  // reserve 100 sequences at a time
     public static final int DEFAULT_INDEX_MAX_FILESIZE_PERC = 50; // % of data table max file size for index table
-    
+    public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE =  1024L*1024L*20L; // 20 Mb
+    public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS =  60000 * 30; // 30 mins   
     
     private final Configuration config;
     
@@ -374,7 +375,7 @@ public class QueryServicesOptions {
     }
 
     public QueryServicesOptions setMaxServerCacheTTLMs(int ttl) {
-        return set(MAX_SERVER_CACHE_TIME_TO_LIVE_MS, ttl);
+        return set(MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, ttl);
     }
     
     public QueryServicesOptions setMasterInfoPort(int port) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
index aa2fb3e..2238991 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
@@ -21,6 +21,8 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.phoenix.util.SizedUtil;
+
 public class DelegateColumn extends DelegateDatum implements PColumn {
     
     public DelegateColumn(PColumn delegate) {
@@ -71,4 +73,9 @@ public class DelegateColumn extends DelegateDatum implements PColumn {
     public byte[] getViewConstant() {
         return getDelegate().getViewConstant();
     }
+
+    @Override
+    public int getEstimatedSize() {
+        return SizedUtil.OBJECT_SIZE + getDelegate().getEstimatedSize();
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PBaseColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PBaseColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PBaseColumn.java
index 2d8a384..e7e5c2c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PBaseColumn.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PBaseColumn.java
@@ -17,6 +17,8 @@
  */
 package org.apache.phoenix.schema;
 
+import org.apache.phoenix.util.SizedUtil;
+
 
 /**
  * 
@@ -47,4 +49,9 @@ public abstract class PBaseColumn implements PColumn {
     public boolean isNullable() {
         return false;
     }
- }
+    
+    @Override
+    public int getEstimatedSize() {
+        return SizedUtil.OBJECT_SIZE; // Not really interested in sized of these
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
index a2d8718..199449f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
@@ -48,4 +48,6 @@ public interface PColumn extends PDatum, Writable {
     Integer getArraySize();
     
     byte[] getViewConstant();
+    
+    int getEstimatedSize();
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamily.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamily.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamily.java
index 965ecf7..24da14d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamily.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamily.java
@@ -49,4 +49,6 @@ public interface PColumnFamily {
      * @throws ColumnNotFoundException if the column cannot be found
      */
     PColumn getColumn(String name) throws ColumnNotFoundException;
+    
+    int getEstimatedSize();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java
index 0846530..6f75ba4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java
@@ -21,26 +21,41 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.util.SizedUtil;
 
-import com.google.common.collect.*;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSortedMap;
 
 public class PColumnFamilyImpl implements PColumnFamily {
     private final PName name;
     private final List<PColumn> columns;
     private final Map<String, PColumn> columnByString;
     private final Map<byte[], PColumn> columnByBytes;
+    private final int estimatedSize;
+
+    @Override
+    public int getEstimatedSize() {
+        return estimatedSize;
+    }
     
     public PColumnFamilyImpl(PName name, List<PColumn> columns) {
+        Preconditions.checkNotNull(name);
+        int estimatedSize = SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE * 4 + SizedUtil.INT_SIZE + name.getEstimatedSize() +
+                SizedUtil.sizeOfMap(columns.size()) * 2 + SizedUtil.sizeOfArrayList(columns.size());
         this.name = name;
         this.columns = ImmutableList.copyOf(columns);
         ImmutableMap.Builder<String, PColumn> columnByStringBuilder = ImmutableMap.builder();
         ImmutableSortedMap.Builder<byte[], PColumn> columnByBytesBuilder = ImmutableSortedMap.orderedBy(Bytes.BYTES_COMPARATOR);
         for (PColumn column : columns) {
+            estimatedSize += column.getEstimatedSize();
             columnByBytesBuilder.put(column.getName().getBytes(), column);
             columnByStringBuilder.put(column.getName().getString(), column);
         }
         this.columnByBytes = columnByBytesBuilder.build();
         this.columnByString = columnByStringBuilder.build();
+        this.estimatedSize = estimatedSize;
     }
     
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
index 081e37e..d702507 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.SizedUtil;
 
 import com.google.common.base.Preconditions;
 
@@ -96,6 +97,13 @@ public class PColumnImpl implements PColumn {
     }
 
     @Override
+    public int getEstimatedSize() {
+        return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE * 8 + SizedUtil.INT_OBJECT_SIZE * 3 + SizedUtil.INT_SIZE + 
+                name.getEstimatedSize() + (familyName == null ? 0 : familyName.getEstimatedSize()) +
+                (viewConstant == null ? 0 : (SizedUtil.ARRAY_SIZE + viewConstant.length));
+    }
+
+    @Override
     public PName getName() {
         return name;
     }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java
index 361ac36..0e1337c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java
@@ -51,6 +51,11 @@ public interface PName {
         public ImmutableBytesPtr getBytesPtr() {
             return ByteUtil.EMPTY_BYTE_ARRAY_PTR;
         }
+
+        @Override
+        public int getEstimatedSize() {
+            return 0;
+        }
     };
     public static PName EMPTY_COLUMN_NAME = new PName() {
         @Override
@@ -72,6 +77,11 @@ public interface PName {
         public ImmutableBytesPtr getBytesPtr() {
             return QueryConstants.EMPTY_COLUMN_BYTES_PTR;
         }
+
+        @Override
+        public int getEstimatedSize() {
+            return 0;
+        }
     };
     /**
      * Get the client-side, normalized name as referenced
@@ -91,4 +101,6 @@ public interface PName {
      * @return a pointer to the underlying bytes
      */
     ImmutableBytesPtr getBytesPtr();
+    
+    int getEstimatedSize();
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java
index 9a362e4..c4941ab 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameFactory.java
@@ -19,10 +19,12 @@
 package org.apache.phoenix.schema;
 
 import org.apache.hadoop.hbase.util.Bytes;
-
 import org.apache.phoenix.query.QueryConstants;
 
 public class PNameFactory {
+    public static int getEstimatedSize(PName name) {
+        return name == null ? 0 : name.getEstimatedSize();
+    }
 
     private PNameFactory() {
     }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
index 9d4a48e..e350909 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PNameImpl.java
@@ -19,8 +19,8 @@ package org.apache.phoenix.schema;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.http.annotation.Immutable;
-
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.util.SizedUtil;
 
 @Immutable
 public class PNameImpl implements PName {
@@ -42,6 +42,13 @@ public class PNameImpl implements PName {
     }
     private PNameImplData data = new PNameImplData();
 
+
+    @Override
+    public int getEstimatedSize() {
+        return SizedUtil.OBJECT_SIZE * 3 + SizedUtil.ARRAY_SIZE + SizedUtil.IMMUTABLE_BYTES_PTR_SIZE +
+                data.stringName.length() * SizedUtil.CHAR_SIZE + data.bytesName.length;
+    }
+
     PNameImpl(String name) {
         this.data.stringName = name;
         this.data.bytesName = Bytes.toBytes(name);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index d76a116..1369f0c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -277,4 +277,6 @@ public interface PTable extends Writable {
     ViewType getViewType();
     String getViewStatement();
     Short getViewIndexId();
+    
+    int getEstimatedSize();
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 601f6c8..fac6892 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -50,6 +50,7 @@ import org.apache.phoenix.schema.stat.PTableStats;
 import org.apache.phoenix.schema.stat.PTableStatsImpl;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.SizedUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 
@@ -110,6 +111,7 @@ public class PTableImpl implements PTable {
     private boolean multiTenant;
     private ViewType viewType;
     private Short viewIndexId;
+    private int estimatedSize;
     
     public PTableImpl() {
     }
@@ -221,12 +223,26 @@ public class PTableImpl implements PTable {
         return viewType;
     }
     
+
+    @Override
+    public int getEstimatedSize() {
+        return estimatedSize;
+    }
+    
     private void init(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state, long timeStamp, long sequenceNumber,
             PName pkName, Integer bucketNum, List<PColumn> columns, PTableStats stats, PName parentTableName, List<PTable> indexes,
-            boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant, ViewType viewType, Short viewIndexId) throws SQLException {
+            boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant,
+            ViewType viewType, Short viewIndexId) throws SQLException {
         if (schemaName == null) {
             throw new NullPointerException();
         }
+        int estimatedSize = SizedUtil.OBJECT_SIZE + 26 * SizedUtil.POINTER_SIZE + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE +
+              PNameFactory.getEstimatedSize(tenantId) + 
+              PNameFactory.getEstimatedSize(schemaName) + 
+              PNameFactory.getEstimatedSize(tableName) + 
+              PNameFactory.getEstimatedSize(pkName) +
+              PNameFactory.getEstimatedSize(parentTableName) +
+              PNameFactory.getEstimatedSize(defaultFamilyName);
         this.tenantId = tenantId;
         this.schemaName = schemaName;
         this.tableName = tableName;
@@ -278,9 +294,12 @@ public class PTableImpl implements PTable {
                 }
             }
         }
+        estimatedSize += SizedUtil.sizeOfMap(allColumns.length, SizedUtil.POINTER_SIZE, SizedUtil.sizeOfArrayList(1)); // for multi-map
+        
         this.bucketNum = bucketNum;
         this.pkColumns = ImmutableList.copyOf(pkColumns);
         this.allColumns = ImmutableList.copyOf(allColumns);
+        estimatedSize += SizedUtil.sizeOfMap(pkColumns.size()) + SizedUtil.sizeOfMap(allColumns.length);
         
         RowKeySchemaBuilder builder = new RowKeySchemaBuilder(pkColumns.size());
         // Two pass so that column order in column families matches overall column order
@@ -291,6 +310,7 @@ public class PTableImpl implements PTable {
         for (PColumn column : allColumns) {
             PName familyName = column.getFamilyName();
             if (familyName == null) {            	
+                estimatedSize += column.getEstimatedSize(); // PK columns
                 builder.addField(column, column.isNullable(), column.getSortOrder());
             } else {
                 List<PColumn> columnsInFamily = familyMap.get(familyName);
@@ -303,6 +323,7 @@ public class PTableImpl implements PTable {
         }
         
         this.rowKeySchema = builder.build();
+        estimatedSize += rowKeySchema.getEstimatedSize();
         Iterator<Map.Entry<PName,List<PColumn>>> iterator = familyMap.entrySet().iterator();
         PColumnFamily[] families = new PColumnFamily[familyMap.size()];
         ImmutableMap.Builder<String, PColumnFamily> familyByString = ImmutableMap.builder();
@@ -313,15 +334,30 @@ public class PTableImpl implements PTable {
             families[i] = family;
             familyByString.put(family.getName().getString(), family);
             familyByBytes.put(family.getName().getBytes(), family);
+            estimatedSize += family.getEstimatedSize();
         }
         this.families = ImmutableList.copyOf(families);
         this.familyByBytes = familyByBytes.build();
         this.familyByString = familyByString.build();
+        estimatedSize += SizedUtil.sizeOfArrayList(families.length);
+        estimatedSize += SizedUtil.sizeOfMap(families.length) * 2;
+        
         this.stats = stats;
-        this.indexes = indexes;
+        this.indexes = indexes == null ? Collections.<PTable>emptyList() : indexes;
+        for (PTable index : this.indexes) {
+            estimatedSize += index.getEstimatedSize();
+        }
+        
         this.parentTableName = parentTableName;
         this.parentName = parentTableName == null ? null : PNameFactory.newName(SchemaUtil.getTableName(schemaName.getString(), parentTableName.getString()));
+        estimatedSize += PNameFactory.getEstimatedSize(this.parentName);
+        
         this.physicalNames = physicalNames == null ? ImmutableList.<PName>of() : ImmutableList.copyOf(physicalNames);
+        for (PName name : this.physicalNames) {
+            estimatedSize += name.getEstimatedSize();
+        }
+
+        this.estimatedSize = estimatedSize;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/schema/ValueSchema.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/ValueSchema.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/ValueSchema.java
index 7170bab..8e13fd7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/ValueSchema.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/ValueSchema.java
@@ -26,6 +26,7 @@ import java.util.List;
 
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.phoenix.util.SizedUtil;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
@@ -54,6 +55,12 @@ public abstract class ValueSchema implements Writable {
     protected ValueSchema(int minNullable, List<Field> fields) {
         init(minNullable, fields);
     }
+    
+    public int getEstimatedSize() { // Memory size of ValueSchema
+        int count = fieldIndexByPosition.length;
+        return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.INT_SIZE * (4 + count) + 
+                SizedUtil.ARRAY_SIZE + count * Field.ESTIMATED_SIZE + SizedUtil.sizeOfArrayList(count);
+    }
 
     private void init(int minNullable, List<Field> fields) {
         this.minNullable = minNullable;
@@ -155,6 +162,8 @@ public abstract class ValueSchema implements Writable {
             if (type != other.type) return false;
             return true;
         }
+        
+        public static final int ESTIMATED_SIZE = SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE * 2 + SizedUtil.INT_SIZE * 3;
 
         private int count;
         private PDataType type;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/4a64635d/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java
index 5edc40c..955bd8a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SizedUtil.java
@@ -52,8 +52,16 @@ public class SizedUtil {
     private SizedUtil() {
     }
     
+    public static int sizeOfArrayList(int capacity) {
+        return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.INT_SIZE + SizedUtil.ARRAY_SIZE + SizedUtil.POINTER_SIZE * capacity;
+    }
+    
+    public static int sizeOfMap(int nRows) {
+        return sizeOfMap(nRows, SizedUtil.POINTER_SIZE, SizedUtil.POINTER_SIZE);
+    }
+    
     public static int sizeOfMap(int nRows, int keySize, int valueSize) {
-        return nRows * (
+        return SizedUtil.OBJECT_SIZE + nRows * (
                 SizedUtil.MAP_ENTRY_SIZE + // entry
                 keySize + // key size
                 valueSize); // value size