You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/08/10 03:56:20 UTC

[1/2] git commit: PHOENIX-1157 Improve abstraction for meta data cache

Repository: phoenix
Updated Branches:
  refs/heads/4.0 152ce872c -> 64d136d15


PHOENIX-1157 Improve abstraction for meta data cache


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d5a78038
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d5a78038
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d5a78038

Branch: refs/heads/4.0
Commit: d5a78038b85058ab0667ab669c863ce1aeb34ef2
Parents: 152ce87
Author: James Taylor <jt...@salesforce.com>
Authored: Sat Aug 9 15:09:12 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat Aug 9 18:24:35 2014 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/schema/PMetaData.java    |  12 +-
 .../apache/phoenix/schema/PMetaDataImpl.java    | 412 +++++++++++--------
 .../phoenix/compile/ViewCompilerTest.java       |   2 +-
 .../java/org/apache/phoenix/query/BaseTest.java |   4 -
 .../phoenix/schema/PMetaDataImplTest.java       |  78 ++--
 5 files changed, 279 insertions(+), 229 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5a78038/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
index 5ddd5bb..c104473 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
@@ -20,18 +20,12 @@ package org.apache.phoenix.schema;
 import org.apache.phoenix.query.MetaDataMutated;
 
 
-public interface PMetaData extends MetaDataMutated {
-    public static interface Cache extends Iterable<PTable> {
-        public Cache clone();
-        public PTable get(PTableKey key);
-        public PTable put(PTableKey key, PTable value);
-        public PTable remove(PTableKey key);
-        public int size();
-    }
+public interface PMetaData extends MetaDataMutated, Iterable<PTable>, Cloneable {
     public static interface Pruner {
         public boolean prune(PTable table);
     }
-    public Cache getTables();
+    public int size();
+    public PMetaData clone();
     public PTable getTable(PTableKey key) throws TableNotFoundException;
     public PMetaData pruneTables(Pruner pruner);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5a78038/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index a487020..dff0e40 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -19,9 +19,9 @@ package org.apache.phoenix.schema;
 
 import java.sql.SQLException;
 import java.util.Comparator;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.phoenix.util.TimeKeeper;
 
@@ -39,220 +39,270 @@ import com.google.common.primitives.Longs;
  *
  */
 public class PMetaDataImpl implements PMetaData {
-    private final Cache metaData;
-    
-    public PMetaDataImpl(int initialCapacity, long maxByteSize) {
-        this.metaData = new CacheImpl(initialCapacity, maxByteSize, TimeKeeper.SYSTEM);
-    }
-
-    public PMetaDataImpl(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper) {
-        this.metaData = new CacheImpl(initialCapacity, maxByteSize, timeKeeper);
-    }
-
-    public PMetaDataImpl(Cache tables) {
-        this.metaData = tables.clone();
-    }
-    
-    private static class CacheImpl implements Cache, Cloneable {
-        private static final int MIN_REMOVAL_SIZE = 3;
-        private static final Comparator<PTableAccess> COMPARATOR = new Comparator<PTableAccess>() {
-            @Override
-            public int compare(PTableAccess tableAccess1, PTableAccess tableAccess2) {
-                return Longs.compare(tableAccess1.lastAccessTime, tableAccess2.lastAccessTime);
+        private static final class PTableRef {
+            public final PTable table;
+            public final int estSize;
+            public volatile long lastAccessTime;
+            
+            public PTableRef(PTable table, long lastAccessTime, int estSize) {
+                this.table = table;
+                this.lastAccessTime = lastAccessTime;
+                this.estSize = estSize;
             }
-        };
-        private static final MinMaxPriorityQueue.Builder<PTableAccess> BUILDER = MinMaxPriorityQueue.orderedBy(COMPARATOR);
-        
-        private long currentSize;
-        private final long maxByteSize;
-        private final int expectedCapacity;
-        private final TimeKeeper timeKeeper;
 
-        // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
-        // safely across multiple threads (as the underlying collection is not thread safe).
-        // Instead, we track access time and prune it based on the copy we've made.
-        private final HashMap<PTableKey,PTableAccess> tables;
-        
-        private static HashMap<PTableKey,PTableAccess> newMap(int expectedCapacity) {
-            return Maps.newHashMapWithExpectedSize(expectedCapacity);
-        }
+            public PTableRef(PTable table, long lastAccessTime) {
+                this (table, lastAccessTime, table.getEstimatedSize());
+            }
 
-        private static HashMap<PTableKey,PTableAccess> cloneMap(HashMap<PTableKey,PTableAccess> tables, int expectedCapacity) {
-            HashMap<PTableKey,PTableAccess> newTables = Maps.newHashMapWithExpectedSize(Math.max(tables.size(),expectedCapacity));
-            // Copy value so that access time isn't changing anymore
-            for (PTableAccess tableAccess : tables.values()) {
-                newTables.put(tableAccess.table.getKey(), new PTableAccess(tableAccess));
+            public PTableRef(PTableRef tableRef) {
+                this (tableRef.table, tableRef.lastAccessTime, tableRef.estSize);
             }
-            return newTables;
         }
 
-        private CacheImpl(CacheImpl toClone) {
-            this.timeKeeper = toClone.timeKeeper;
-            this.maxByteSize = toClone.maxByteSize;
-            this.currentSize = toClone.currentSize;
-            this.expectedCapacity = toClone.expectedCapacity;
-            this.tables = cloneMap(toClone.tables, toClone.expectedCapacity);
-        }
-        
-        public CacheImpl(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper) {
-            this.currentSize = 0;
-            this.maxByteSize = maxByteSize;
-            this.expectedCapacity = initialCapacity;
-            this.tables = newMap(initialCapacity);
-            this.timeKeeper = timeKeeper;
-        }
-        
-        @Override
-        public Cache clone() {
-            return new CacheImpl(this);
-        }
-        
-        @Override
-        public PTable get(PTableKey key) {
-            PTableAccess tableAccess = tables.get(key);
-            if (tableAccess == null) {
-                return null;
+        private static class PTableCache implements Cloneable {
+            private static final int MIN_REMOVAL_SIZE = 3;
+            private static final Comparator<PTableRef> COMPARATOR = new Comparator<PTableRef>() {
+                @Override
+                public int compare(PTableRef tableRef1, PTableRef tableRef2) {
+                    return Longs.compare(tableRef1.lastAccessTime, tableRef2.lastAccessTime);
+                }
+            };
+            private static final MinMaxPriorityQueue.Builder<PTableRef> BUILDER = MinMaxPriorityQueue.orderedBy(COMPARATOR);
+            
+            private long currentByteSize;
+            private final long maxByteSize;
+            private final int expectedCapacity;
+            private final TimeKeeper timeKeeper;
+
+            private final Map<PTableKey,PTableRef> tables;
+            
+            private static Map<PTableKey,PTableRef> newMap(int expectedCapacity) {
+                // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
+                // safely across multiple threads (as the underlying collection is not thread safe).
+                // Instead, we track access time and prune it based on the copy we've made.
+                return Maps.newHashMapWithExpectedSize(expectedCapacity);
             }
-            tableAccess.lastAccessTime = timeKeeper.getCurrentTime();
-            return tableAccess.table;
-        }
-        
-        private void pruneIfNecessary() {
-            // We have our own copy of the Map, as we copy on write, so its safe to remove from it.
-            while (currentSize > maxByteSize && size() > 1) {
-                // Estimate how many we need to remove by dividing the <number of bytes we're over the max>
-                // by the <average size of an entry>. We'll keep at least MIN_REMOVAL_SIZE.
-                int nToRemove = Math.max(MIN_REMOVAL_SIZE, (int)Math.ceil((currentSize-maxByteSize) / ((double)currentSize / size())));
-                MinMaxPriorityQueue<PTableAccess> toRemove = BUILDER.expectedSize(nToRemove+1).create();
-                // Make one pass through to find the <nToRemove> least recently accessed tables
-                for (PTableAccess tableAccess : this.tables.values()) {
-                    toRemove.add(tableAccess);
-                    if (toRemove.size() > nToRemove) {
-                        toRemove.removeLast();
-                    }
+
+            private static Map<PTableKey,PTableRef> cloneMap(Map<PTableKey,PTableRef> tables, int expectedCapacity) {
+                Map<PTableKey,PTableRef> newTables = newMap(Math.max(tables.size(),expectedCapacity));
+                // Copy value so that access time isn't changing anymore
+                for (PTableRef tableAccess : tables.values()) {
+                    newTables.put(tableAccess.table.getKey(), new PTableRef(tableAccess));
                 }
-                // Of those least recently accessed tables, remove the least recently used
-                // until we're under our size capacity
-                do {
-                    PTableAccess tableAccess = toRemove.removeFirst();
-                    remove(tableAccess.table.getKey());
-                } while (currentSize > maxByteSize && size() > 1 && !toRemove.isEmpty());
+                return newTables;
             }
-        }
-        
-        @Override
-        public PTable put(PTableKey key, PTable value) {
-            currentSize += value.getEstimatedSize();
-            PTableAccess oldTableAccess = tables.put(key, new PTableAccess(value, timeKeeper.getCurrentTime()));
-            PTable oldTable = null;
-            if (oldTableAccess != null) {
-                currentSize -= oldTableAccess.table.getEstimatedSize();
-                oldTable = oldTableAccess.table;
+
+            private PTableCache(PTableCache toClone) {
+                this.timeKeeper = toClone.timeKeeper;
+                this.maxByteSize = toClone.maxByteSize;
+                this.currentByteSize = toClone.currentByteSize;
+                this.expectedCapacity = toClone.expectedCapacity;
+                this.tables = cloneMap(toClone.tables, toClone.expectedCapacity);
             }
-            pruneIfNecessary();
-            return oldTable;
-        }
-        
-        @Override
-        public PTable remove(PTableKey key) {
-            PTableAccess value = tables.remove(key);
-            if (value == null) {
-                return null;
+            
+            public PTableCache(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper) {
+                this.currentByteSize = 0;
+                this.maxByteSize = maxByteSize;
+                this.expectedCapacity = initialCapacity;
+                this.tables = newMap(initialCapacity);
+                this.timeKeeper = timeKeeper;
             }
-            currentSize -= value.table.getEstimatedSize();
-            return value.table;
-        }
-        
-        @Override
-        public Iterator<PTable> iterator() {
-            final Iterator<PTableAccess> iterator = tables.values().iterator();
-            return new Iterator<PTable>() {
-
-                @Override
-                public boolean hasNext() {
-                    return iterator.hasNext();
+            
+            public PTableRef get(PTableKey key) {
+                PTableRef tableAccess = tables.get(key);
+                if (tableAccess == null) {
+                    return null;
                 }
+                tableAccess.lastAccessTime = timeKeeper.getCurrentTime();
+                return tableAccess;
+            }
+            
+            @Override
+            public PTableCache clone() {
+                return new PTableCache(this);
+            }
 
-                @Override
-                public PTable next() {
-                    return iterator.next().table;
+            /**
+             * Used when the cache is growing past its max size to clone in a single pass.
+             * Removes least recently used tables to get size of cache below its max size by
+             * the overage amount.
+             */
+            public PTableCache cloneMinusOverage(long overage) {
+                assert(overage > 0);
+                int nToRemove = Math.max(MIN_REMOVAL_SIZE, (int)Math.ceil((currentByteSize-maxByteSize) / ((double)currentByteSize / size())) + 1);
+                MinMaxPriorityQueue<PTableRef> toRemove = BUILDER.expectedSize(nToRemove).create();
+                PTableCache newCache = new PTableCache(this.size(), this.maxByteSize, this.timeKeeper);
+                
+                long toRemoveBytes = 0;
+                // Add to new cache, but track references to remove when done
+                // to bring cache at least overage amount below it's max size.
+                for (PTableRef tableRef : tables.values()) {
+                    newCache.put(tableRef.table.getKey(), new PTableRef(tableRef));
+                    toRemove.add(tableRef);
+                    toRemoveBytes += tableRef.estSize;
+                    if (toRemoveBytes - toRemove.peekLast().estSize > overage) {
+                        PTableRef removedRef = toRemove.removeLast();
+                        toRemoveBytes -= removedRef.estSize;
+                    }
+                }
+                for (PTableRef toRemoveRef : toRemove) {
+                    newCache.remove(toRemoveRef.table.getKey());
                 }
+                return newCache;
+            }
 
-                @Override
-                public void remove() {
-                    throw new UnsupportedOperationException();
+            private PTable put(PTableKey key, PTableRef ref) {
+                currentByteSize += ref.estSize;
+                PTableRef oldTableAccess = tables.put(key, ref);
+                PTable oldTable = null;
+                if (oldTableAccess != null) {
+                    currentByteSize -= oldTableAccess.estSize;
+                    oldTable = oldTableAccess.table;
                 }
-                
-            };
-        }
+                return oldTable;
+            }
 
-        @Override
-        public int size() {
-            return tables.size();
-        }
-        
-        private static class PTableAccess {
-            public PTable table;
-            public volatile long lastAccessTime;
+            public PTable put(PTableKey key, PTable value) {
+                return put(key, new PTableRef(value, timeKeeper.getCurrentTime()));
+            }
             
-            public PTableAccess(PTable table, long lastAccessTime) {
-                this.table = table;
-                this.lastAccessTime = lastAccessTime;
+            public PTable putDuplicate(PTableKey key, PTable value) {
+                return put(key, new PTableRef(value, timeKeeper.getCurrentTime(), 0));
+            }
+            
+            public PTable remove(PTableKey key) {
+                PTableRef value = tables.remove(key);
+                if (value == null) {
+                    return null;
+                }
+                currentByteSize -= value.estSize;
+                return value.table;
+            }
+            
+            public Iterator<PTable> iterator() {
+                final Iterator<PTableRef> iterator = tables.values().iterator();
+                return new Iterator<PTable>() {
+
+                    @Override
+                    public boolean hasNext() {
+                        return iterator.hasNext();
+                    }
+
+                    @Override
+                    public PTable next() {
+                        return iterator.next().table;
+                    }
+
+                    @Override
+                    public void remove() {
+                        throw new UnsupportedOperationException();
+                    }
+                    
+                };
+            }
+
+            public int size() {
+                return tables.size();
+            }
+
+            public long getCurrentSize() {
+                return this.currentByteSize;
             }
 
-            public PTableAccess(PTableAccess tableAccess) {
-                this.table = tableAccess.table;
-                this.lastAccessTime = tableAccess.lastAccessTime;
+            public long getMaxSize() {
+                return this.maxByteSize;
             }
         }
+            
+    private final PTableCache metaData;
+    
+    public PMetaDataImpl(int initialCapacity, long maxByteSize) {
+        this.metaData = new PTableCache(initialCapacity, maxByteSize, TimeKeeper.SYSTEM);
+    }
+
+    public PMetaDataImpl(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper) {
+        this.metaData = new PTableCache(initialCapacity, maxByteSize, timeKeeper);
+    }
+
+    private PMetaDataImpl(PTableCache tables) {
+        this.metaData = tables.clone();
+    }
+    
+    @Override
+    public PMetaDataImpl clone() {
+        return new PMetaDataImpl(this.metaData);
     }
     
     @Override
     public PTable getTable(PTableKey key) throws TableNotFoundException {
-        PTable table = metaData.get(key);
-        if (table == null) {
+        PTableRef ref = metaData.get(key);
+        if (ref == null) {
             throw new TableNotFoundException(key.getName());
         }
-        return table;
+        return ref.table;
     }
 
     @Override
-    public Cache getTables() {
-        return metaData;
+    public int size() {
+        return metaData.size();
     }
 
 
     @Override
     public PMetaData addTable(PTable table) throws SQLException {
-        Cache tables = metaData.clone();
-        PTable oldTable = tables.put(table.getKey(), table);
+        int netGain = 0;
+        PTableKey key = table.getKey();
+        PTableRef oldTableRef = metaData.get(key);
+        if (oldTableRef != null) {
+            netGain -= oldTableRef.estSize;
+        }
+        PTable newParentTable = null;
         if (table.getParentName() != null) { // Upsert new index table into parent data table list
             String parentName = table.getParentName().getString();
-            PTable parentTable = tables.get(new PTableKey(table.getTenantId(), parentName));
+            PTableRef oldParentRef = metaData.get(new PTableKey(table.getTenantId(), parentName));
             // If parentTable isn't cached, that's ok we can skip this
-            if (parentTable != null) {
-                List<PTable> oldIndexes = parentTable.getIndexes();
+            if (oldParentRef != null) {
+                List<PTable> oldIndexes = oldParentRef.table.getIndexes();
                 List<PTable> newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size() + 1);
                 newIndexes.addAll(oldIndexes);
-                if (oldTable != null) {
-                    newIndexes.remove(oldTable);
+                for (int i = 0; i < newIndexes.size(); i++) {
+                    PTable index = newIndexes.get(i);
+                    if (index.getName().equals(table.getName())) {
+                        newIndexes.remove(i);
+                        break;
+                    }
                 }
                 newIndexes.add(table);
-                parentTable = PTableImpl.makePTable(parentTable, table.getTimeStamp(), newIndexes);
-                tables.put(parentTable.getKey(), parentTable);
+                netGain -= oldParentRef.estSize;
+                newParentTable = PTableImpl.makePTable(oldParentRef.table, table.getTimeStamp(), newIndexes);
+                netGain += newParentTable.getEstimatedSize();
             }
         }
+        if (newParentTable == null) { // Don't count in gain if we found a parent table, as its accounted for in newParentTable
+            netGain += table.getEstimatedSize();
+        }
+        long overage = metaData.getCurrentSize() + netGain - metaData.getMaxSize();
+        PTableCache tables = overage <= 0 ? metaData.clone() : metaData.cloneMinusOverage(overage);
+        
+        if (newParentTable != null) { // Upsert new index table into parent data table list
+            tables.put(newParentTable.getKey(), newParentTable);
+            tables.putDuplicate(table.getKey(), table);
+        } else {
+            tables.put(table.getKey(), table);
+        }
         for (PTable index : table.getIndexes()) {
-            tables.put(index.getKey(), index);
+            tables.putDuplicate(index.getKey(), index);
         }
         return new PMetaDataImpl(tables);
     }
 
     @Override
     public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columnsToAdd, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows) throws SQLException {
-        PTable table = getTable(new PTableKey(tenantId, tableName));
-        Cache tables = metaData.clone();
-        List<PColumn> oldColumns = PTableImpl.getColumnsToClone(table);
+        PTableRef oldTableRef = metaData.get(new PTableKey(tenantId, tableName));
+        if (oldTableRef == null) {
+            return this;
+        }
+        List<PColumn> oldColumns = PTableImpl.getColumnsToClone(oldTableRef.table);
         List<PColumn> newColumns;
         if (columnsToAdd.isEmpty()) {
             newColumns = oldColumns;
@@ -261,15 +311,14 @@ public class PMetaDataImpl implements PMetaData {
             newColumns.addAll(oldColumns);
             newColumns.addAll(columnsToAdd);
         }
-        PTable newTable = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, newColumns, isImmutableRows);
-        tables.put(newTable.getKey(), newTable);
-        return new PMetaDataImpl(tables);
+        PTable newTable = PTableImpl.makePTable(oldTableRef.table, tableTimeStamp, tableSeqNum, newColumns, isImmutableRows);
+        return addTable(newTable);
     }
 
     @Override
     public PMetaData removeTable(PName tenantId, String tableName) throws SQLException {
         PTable table;
-        Cache tables = metaData.clone();
+        PTableCache tables = metaData.clone();
         if ((table=tables.remove(new PTableKey(tenantId, tableName))) == null) {
             return this;
         } else {
@@ -278,14 +327,20 @@ public class PMetaDataImpl implements PMetaData {
             }
             // also remove its reference from parent table
             PName parent = table.getParentName();
-            PTable parentTable = null;
-            if(parent != null && (parentTable=tables.get(new PTableKey(tenantId, parent.getString()))) != null) {
-                List<PTable> oldIndexes = parentTable.getIndexes();
+            PTableRef parentTableRef = null;
+            if(parent != null && (parentTableRef=tables.get(new PTableKey(tenantId, parent.getString()))) != null) {
+                List<PTable> oldIndexes = parentTableRef.table.getIndexes();
                 if(oldIndexes != null && !oldIndexes.isEmpty()) {
 	                List<PTable> newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size());
 	                newIndexes.addAll(oldIndexes);
-	                newIndexes.remove(table);
-	                parentTable = PTableImpl.makePTable(parentTable, table.getTimeStamp(), newIndexes);
+	                for (int i = 0; i < newIndexes.size(); i++) {
+	                    PTable index = newIndexes.get(i);
+	                    if (index.getName().equals(table.getName())) {
+	                        newIndexes.remove(i);
+	                        break;
+	                    }
+	                }
+	                PTable parentTable = PTableImpl.makePTable(parentTableRef.table, table.getTimeStamp(), newIndexes);
 	                tables.put(parentTable.getKey(), parentTable);
                 }
             }
@@ -296,7 +351,7 @@ public class PMetaDataImpl implements PMetaData {
     @Override
     public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException {
         PTable table = getTable(new PTableKey(tenantId, tableName));
-        Cache tables = metaData.clone();
+        PTableCache tables = metaData.clone();
         PColumn column;
         if (familyName == null) {
             column = table.getPKColumn(columnName);
@@ -327,8 +382,8 @@ public class PMetaDataImpl implements PMetaData {
 
     @Override
     public PMetaData pruneTables(Pruner pruner) {
-        List<PTableKey> keysToPrune = Lists.newArrayListWithExpectedSize(this.getTables().size());
-        for (PTable table : this.getTables()) {
+        List<PTableKey> keysToPrune = Lists.newArrayListWithExpectedSize(this.size());
+        for (PTable table : this) {
             if (pruner.prune(table)) {
                 keysToPrune.add(table.getKey());
             }
@@ -336,10 +391,15 @@ public class PMetaDataImpl implements PMetaData {
         if (keysToPrune.isEmpty()) {
             return this;
         }
-        Cache tables = metaData.clone();
+        PTableCache tables = metaData.clone();
         for (PTableKey key : keysToPrune) {
             tables.remove(key);
         }
         return new PMetaDataImpl(tables);
     }
+
+    @Override
+    public Iterator<PTable> iterator() {
+        return metaData.iterator();
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5a78038/phoenix-core/src/test/java/org/apache/phoenix/compile/ViewCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/ViewCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/ViewCompilerTest.java
index 3ce2759..651d58d 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/ViewCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/ViewCompilerTest.java
@@ -64,7 +64,7 @@ public class ViewCompilerTest extends BaseConnectionlessQueryTest {
         
         StringBuilder buf = new StringBuilder();
         int count = 0;
-        for (PTable table : conn.getMetaDataCache().getTables()) {
+        for (PTable table : conn.getMetaDataCache()) {
             if (table.getType() == PTableType.VIEW) {
                 assertEquals(viewType, table.getViewType());
                 conn.createStatement().execute("DROP VIEW " + table.getName().getString());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5a78038/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 90f3490..2147026 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -119,11 +119,9 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseClientManagedTimeIT;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
-import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
-import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableAlreadyExistsException;
 import org.apache.phoenix.schema.TableNotFoundException;
@@ -680,8 +678,6 @@ public abstract class BaseTest {
     
     private static void deletePriorTables(long ts, Connection globalConn, String url) throws Exception {
         DatabaseMetaData dbmd = globalConn.getMetaData();
-        PMetaData cache = globalConn.unwrap(PhoenixConnection.class).getMetaDataCache();
-        cache.getTables();
         // Drop VIEWs first, as we don't allow a TABLE with views to be dropped
         // Tables are sorted by TENANT_ID
         List<String[]> tableTypesList = Arrays.asList(new String[] {PTableType.VIEW.toString()}, new String[] {PTableType.TABLE.toString()});

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5a78038/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
index d6ed351..5584bac 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.schema;
 
 import static org.junit.Assert.assertEquals;
 
+import java.sql.SQLException;
 import java.util.Set;
 
 import org.apache.phoenix.util.TimeKeeper;
@@ -28,22 +29,22 @@ import com.google.common.collect.Sets;
 
 public class PMetaDataImplTest {
     
-    private static void addToTable(PMetaData.Cache cache, String name, int size) {
+    private static PMetaData addToTable(PMetaData metaData, String name, int size) throws SQLException {
         PTable table = new PSizedTable(new PTableKey(null,name), size);
-        cache.put(table.getKey(), table);
+        return metaData.addTable(table);
     }
     
-    private static PTable removeFromTable(PMetaData.Cache cache, String name) {
-        return cache.remove(new PTableKey(null,name));
+    private static PMetaData removeFromTable(PMetaData metaData, String name) throws SQLException {
+        return metaData.removeTable(null, name);
     }
     
-    private static PTable getFromTable(PMetaData.Cache cache, String name) {
-        return cache.get(new PTableKey(null,name));
+    private static PTable getFromTable(PMetaData metaData, String name) throws TableNotFoundException {
+        return metaData.getTable(new PTableKey(null,name));
     }
     
-    private static void assertNames(PMetaData.Cache cache, String... names) {
+    private static void assertNames(PMetaData metaData, String... names) {
         Set<String> actualTables = Sets.newHashSet();
-        for (PTable table : cache) {
+        for (PTable table : metaData) {
             actualTables.add(table.getKey().getName());
         }
         Set<String> expectedTables = Sets.newHashSet(names);
@@ -64,44 +65,43 @@ public class PMetaDataImplTest {
     public void testEviction() throws Exception {
         long maxSize = 10;
         PMetaData metaData = new PMetaDataImpl(5, maxSize, new TestTimeKeeper());
-        PMetaData.Cache cache = metaData.getTables();
-        addToTable(cache, "a", 5);
-        assertEquals(1, cache.size());
-        addToTable(cache, "b", 4);
-        assertEquals(2, cache.size());
-        addToTable(cache, "c", 3);
-        assertEquals(2, cache.size());
-        assertNames(cache, "b","c");
+        metaData = addToTable(metaData, "a", 5);
+        assertEquals(1, metaData.size());
+        metaData = addToTable(metaData, "b", 4);
+        assertEquals(2, metaData.size());
+        metaData = addToTable(metaData, "c", 3);
+        assertEquals(2, metaData.size());
+        assertNames(metaData, "b","c");
 
-        addToTable(cache, "b", 8);
-        assertEquals(1, cache.size());
-        assertNames(cache, "b");
+        metaData = addToTable(metaData, "b", 8);
+        assertEquals(1, metaData.size());
+        assertNames(metaData, "b");
 
-        addToTable(cache, "d", 11);
-        assertEquals(1, cache.size());
-        assertNames(cache, "d");
+        metaData = addToTable(metaData, "d", 11);
+        assertEquals(1, metaData.size());
+        assertNames(metaData, "d");
         
-        removeFromTable(cache, "d");
-        assertNames(cache);
+        metaData = removeFromTable(metaData, "d");
+        assertNames(metaData);
         
-        addToTable(cache, "a", 4);
-        assertEquals(1, cache.size());
-        addToTable(cache, "b", 3);
-        assertEquals(2, cache.size());
-        addToTable(cache, "c", 2);
-        assertEquals(3, cache.size());
-        assertNames(cache, "a", "b","c");
+        metaData = addToTable(metaData, "a", 4);
+        assertEquals(1, metaData.size());
+        metaData = addToTable(metaData, "b", 3);
+        assertEquals(2, metaData.size());
+        metaData = addToTable(metaData, "c", 2);
+        assertEquals(3, metaData.size());
+        assertNames(metaData, "a", "b","c");
         
-        getFromTable(cache, "a");
-        addToTable(cache, "d", 3);
-        assertEquals(3, cache.size());
-        assertNames(cache, "c", "a","d");
+        getFromTable(metaData, "a");
+        metaData = addToTable(metaData, "d", 3);
+        assertEquals(3, metaData.size());
+        assertNames(metaData, "c", "a","d");
         
         // Clone maintains insert order
-        cache = cache.clone();
-        addToTable(cache, "e", 6);
-        assertEquals(2, cache.size());
-        assertNames(cache, "d","e");
+        metaData = metaData.clone();
+        metaData = addToTable(metaData, "e", 6);
+        assertEquals(2, metaData.size());
+        assertNames(metaData, "d","e");
     }
     
     private static class PSizedTable extends PTableImpl {


[2/2] git commit: PHOENIX-1157 Improve abstraction for meta data cache

Posted by ja...@apache.org.
PHOENIX-1157 Improve abstraction for meta data cache

Conflicts:
	phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
	phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/64d136d1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/64d136d1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/64d136d1

Branch: refs/heads/4.0
Commit: 64d136d15707bb5122ae1cfc00717c757086385e
Parents: d5a7803
Author: James Taylor <jt...@salesforce.com>
Authored: Sat Aug 9 18:21:22 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat Aug 9 18:27:51 2014 -0700

----------------------------------------------------------------------
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  6 +-
 .../query/ConnectionQueryServicesImpl.java      | 10 +--
 .../query/ConnectionlessQueryServicesImpl.java  |  4 +-
 .../query/DelegateConnectionQueryServices.java  |  4 +-
 .../apache/phoenix/query/MetaDataMutated.java   |  2 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  7 +-
 .../apache/phoenix/schema/PMetaDataImpl.java    | 68 +++++++++++++-------
 .../phoenix/schema/PMetaDataImplTest.java       |  3 +-
 8 files changed, 64 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/64d136d1/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 364e61f..70f88f2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -726,10 +726,10 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     }
 
     @Override
-    public PMetaData removeTable(PName tenantId, String tableName) throws SQLException {
-        metaData = metaData.removeTable(tenantId, tableName);
+    public PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException {
+        metaData = metaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
         //Cascade through to connectionQueryServices too
-        getQueryServices().removeTable(tenantId, tableName);
+        getQueryServices().removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
         return metaData;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/64d136d1/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 5065632..ee0be95 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -462,7 +462,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     // and the next time it's used it'll be pulled over from the server.
                     if (waitTime <= 0) {
                         logger.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) + " seconds for " + tableName);
-                        metaData = metaData.removeTable(tenantId, tableName);
+                        // There will never be a parentTableName here, as that would only
+                        // be non null for an index an we never add/remove columns from an index.
+                        metaData = metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
                         break;
                     }
                     latestMetaDataLock.wait(waitTime);
@@ -493,10 +495,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
      }
 
     @Override
-    public PMetaData removeTable(PName tenantId, final String tableName) throws SQLException {
-        synchronized(latestMetaDataLock) {
+    public PMetaData removeTable(PName tenantId, final String tableName, String parentTableName, long tableTimeStamp) throws SQLException {
+        synchronized (latestMetaDataLock) {
             throwConnectionClosedIfNullMetaData();
-            latestMetaData = latestMetaData.removeTable(tenantId, tableName);
+            latestMetaData = latestMetaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
             latestMetaDataLock.notifyAll();
             return latestMetaData;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/64d136d1/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 223abb6..ea121dd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -160,9 +160,9 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData removeTable(PName tenantId, String tableName)
+    public PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp)
             throws SQLException {
-        return metaData = metaData.removeTable(tenantId, tableName);
+        return metaData = metaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/64d136d1/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index 3c119de..306d536 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -86,9 +86,9 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData removeTable(PName tenantId, String tableName)
+    public PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp)
             throws SQLException {
-        return getDelegate().removeTable(tenantId, tableName);
+        return getDelegate().removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/64d136d1/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
index 2e6da04..1b8ebda 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
@@ -35,7 +35,7 @@ import org.apache.phoenix.schema.PTable;
  */
 public interface MetaDataMutated {
     PMetaData addTable(PTable table) throws SQLException;
-    PMetaData removeTable(PName tenantId, String tableName) throws SQLException;
+    PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException;
     PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows) throws SQLException;
     PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException;
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/64d136d1/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 0e25449..cda956d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -343,7 +343,7 @@ public class MetaDataClient {
                         return result;
                     }
                     if (code == MutationCode.TABLE_NOT_FOUND && tryCount + 1 == maxTryCount) {
-                        connection.removeTable(tenantId, fullTableName);
+                        connection.removeTable(tenantId, fullTableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
                     }
                 }
             }
@@ -1460,7 +1460,7 @@ public class MetaDataClient {
                     throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
                         .setSchemaName(schemaName).setTableName(tableName).build().buildException();
                 default:
-                    connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName));
+                    connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName, result.getMutationTime());
                                         
                     if (result.getTable() != null && tableType != PTableType.VIEW) {
                         connection.setAutoCommit(true);
@@ -1527,7 +1527,8 @@ public class MetaDataClient {
         PName tenantId = connection.getTenantId();
         switch (mutationCode) {
         case TABLE_NOT_FOUND:
-            connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName));
+            // Only called for add/remove column so parentTableName will always be null
+            connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), null, HConstants.LATEST_TIMESTAMP);
             throw new TableNotFoundException(schemaName, tableName);
         case UNALLOWED_TABLE_MUTATION:
             String columnName = null;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/64d136d1/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index dff0e40..8b26709 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -23,6 +23,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.util.TimeKeeper;
 
 import com.google.common.collect.Lists;
@@ -316,41 +317,60 @@ public class PMetaDataImpl implements PMetaData {
     }
 
     @Override
-    public PMetaData removeTable(PName tenantId, String tableName) throws SQLException {
-        PTable table;
-        PTableCache tables = metaData.clone();
-        if ((table=tables.remove(new PTableKey(tenantId, tableName))) == null) {
-            return this;
+    public PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException {
+        PTableCache tables = null;
+        PTableRef parentTableRef = null;
+        PTableKey key = new PTableKey(tenantId, tableName);
+        if (metaData.get(key) == null) {
+            if (parentTableName != null) {
+                parentTableRef = metaData.get(new PTableKey(tenantId, parentTableName));
+            }
+            if (parentTableRef == null) {
+                return this;
+            }
         } else {
+            tables = metaData.clone();
+            PTable table = tables.remove(key);
             for (PTable index : table.getIndexes()) {
                 tables.remove(index.getKey());
             }
-            // also remove its reference from parent table
-            PName parent = table.getParentName();
-            PTableRef parentTableRef = null;
-            if(parent != null && (parentTableRef=tables.get(new PTableKey(tenantId, parent.getString()))) != null) {
-                List<PTable> oldIndexes = parentTableRef.table.getIndexes();
-                if(oldIndexes != null && !oldIndexes.isEmpty()) {
-	                List<PTable> newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size());
-	                newIndexes.addAll(oldIndexes);
-	                for (int i = 0; i < newIndexes.size(); i++) {
-	                    PTable index = newIndexes.get(i);
-	                    if (index.getName().equals(table.getName())) {
-	                        newIndexes.remove(i);
-	                        break;
-	                    }
-	                }
-	                PTable parentTable = PTableImpl.makePTable(parentTableRef.table, table.getTimeStamp(), newIndexes);
-	                tables.put(parentTable.getKey(), parentTable);
+            if (table.getParentName() != null) {
+                parentTableRef = tables.get(new PTableKey(tenantId, table.getParentName().getString()));
+            }
+        }
+        // also remove its reference from parent table
+        if (parentTableRef != null) {
+            List<PTable> oldIndexes = parentTableRef.table.getIndexes();
+            if(oldIndexes != null && !oldIndexes.isEmpty()) {
+                List<PTable> newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size());
+                newIndexes.addAll(oldIndexes);
+                for (int i = 0; i < newIndexes.size(); i++) {
+                    PTable index = newIndexes.get(i);
+                    if (index.getName().getString().equals(tableName)) {
+                        newIndexes.remove(i);
+                        PTable parentTable = PTableImpl.makePTable(
+                                parentTableRef.table,
+                                tableTimeStamp == HConstants.LATEST_TIMESTAMP ? parentTableRef.table.getTimeStamp() : tableTimeStamp,
+                                newIndexes);
+                        if (tables == null) { 
+                            tables = metaData.clone();
+                        }
+                        tables.put(parentTable.getKey(), parentTable);
+                        break;
+                    }
                 }
             }
         }
-        return new PMetaDataImpl(tables);
+        return tables == null ? this : new PMetaDataImpl(tables);
     }
     
     @Override
     public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException {
-        PTable table = getTable(new PTableKey(tenantId, tableName));
+        PTableRef tableRef = metaData.get(new PTableKey(tenantId, tableName));
+        if (tableRef == null) {
+            return this;
+        }
+        PTable table = tableRef.table;
         PTableCache tables = metaData.clone();
         PColumn column;
         if (familyName == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/64d136d1/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
index 5584bac..9379ef3 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.sql.SQLException;
 import java.util.Set;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.util.TimeKeeper;
 import org.junit.Test;
 
@@ -35,7 +36,7 @@ public class PMetaDataImplTest {
     }
     
     private static PMetaData removeFromTable(PMetaData metaData, String name) throws SQLException {
-        return metaData.removeTable(null, name);
+        return metaData.removeTable(null, name, null, HConstants.LATEST_TIMESTAMP);
     }
     
     private static PTable getFromTable(PMetaData metaData, String name) throws TableNotFoundException {