You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@usergrid.apache.org by to...@apache.org on 2014/08/05 00:14:54 UTC

[01/11] git commit: Merge branch 'USERGRID-154' into USERGRID-188

Repository: incubator-usergrid
Updated Branches:
  refs/heads/USERGRID-188 [created] cd043b143


Merge branch 'USERGRID-154' into USERGRID-188


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/4cfc849f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/4cfc849f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/4cfc849f

Branch: refs/heads/USERGRID-188
Commit: 4cfc849fdde7eafe4fbc0b1ef08302dd1406fa27
Parents: 998dd71 2a98cae
Author: Todd Nine <tn...@apigee.com>
Authored: Tue Jul 1 18:13:25 2014 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Tue Jul 1 18:13:25 2014 -0600

----------------------------------------------------------------------
 .../impl/EntityCollectionManagerImpl.java       |   2 +-
 .../core/hystrix/HystrixCassandra.java          | 132 +++++
 .../core/hystrix/HystrixObservable.java         |  92 ---
 .../persistence/core/rx/ObservableIterator.java |  15 +-
 stack/corepersistence/graph/pom.xml             |   8 +-
 .../usergrid/persistence/graph/GraphFig.java    |  38 +-
 .../graph/exception/GraphRuntimeException.java  |  52 ++
 .../graph/guice/CommitLogEdgeSerialization.java |  34 --
 .../persistence/graph/guice/GraphModule.java    | 164 +-----
 .../graph/impl/GraphManagerImpl.java            | 230 ++++----
 .../graph/impl/stage/EdgeDeleteRepairImpl.java  |  27 +-
 .../graph/impl/stage/EdgeMetaRepairImpl.java    |  55 +-
 .../graph/impl/stage/EdgeWriteCompact.java      |  48 --
 .../graph/impl/stage/EdgeWriteCompactImpl.java  | 140 -----
 .../impl/stage/NodeDeleteListenerImpl.java      |  76 +--
 .../impl/EdgeSerializationImpl.java             |  34 +-
 .../serialization/impl/MergedEdgeReader.java    |  90 ---
 .../impl/MergedEdgeReaderImpl.java              | 501 ----------------
 .../impl/NodeSerializationImpl.java             |   5 +-
 .../shard/EdgeShardCounterSerialization.java    |  63 ---
 .../impl/shard/NodeShardAllocation.java         |   4 +-
 .../impl/shard/NodeShardApproximation.java      |   6 +
 .../impl/shard/NodeShardCache.java              |  10 -
 .../serialization/impl/shard/count/Counter.java | 125 ++++
 .../shard/count/NodeShardApproximationImpl.java | 207 +++++++
 .../count/NodeShardCounterSerialization.java    |  48 ++
 .../NodeShardCounterSerializationImpl.java      | 139 +++++
 .../impl/shard/count/ShardKey.java              | 102 ++++
 .../impl/EdgeShardCounterSerializationImpl.java | 141 -----
 .../shard/impl/NodeShardAllocationImpl.java     | 137 +++--
 .../shard/impl/NodeShardApproximationImpl.java  | 158 ------
 .../impl/shard/impl/NodeShardCacheImpl.java     |  32 +-
 .../shard/impl/SizebasedEdgeShardStrategy.java  |  16 +-
 .../shard/impl/TimebasedEdgeShardStrategy.java  |  96 ----
 .../persistence/graph/GraphManagerIT.java       |  10 +-
 .../graph/GraphManagerShardingIT.java           | 189 +++++++
 .../graph/StorageGraphManagerIT.java            |   8 +-
 .../graph/impl/EdgeDeleteListenerTest.java      | 185 +-----
 .../graph/impl/EdgeWriteListenerTest.java       | 451 ---------------
 .../graph/impl/NodeDeleteListenerTest.java      |   6 +-
 .../graph/impl/stage/EdgeDeleteRepairTest.java  |  29 +-
 .../graph/impl/stage/EdgeMetaRepairTest.java    |   8 +-
 .../CommitlogSerializationTest.java             |  47 --
 .../MergedEdgeReaderImplComparatorTest.java     | 254 ---------
 .../impl/MergedEdgeReaderTest.java              | 507 -----------------
 .../impl/shard/CountMinSketchTest.java          | 249 --------
 .../EdgeShardCounterSerializationTest.java      | 148 -----
 .../impl/shard/NodeShardAllocationTest.java     |  59 +-
 .../impl/shard/NodeShardApproximationTest.java  | 142 -----
 .../shard/count/NodeShardApproximationTest.java | 565 +++++++++++++++++++
 .../NodeShardCounterSerializationTest.java      | 126 +++++
 stack/corepersistence/pom.xml                   |   2 +-
 52 files changed, 2087 insertions(+), 3925 deletions(-)
----------------------------------------------------------------------



[04/11] git commit: Checkpoint

Posted by to...@apache.org.
Checkpoint


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/7425ba5d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/7425ba5d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/7425ba5d

Branch: refs/heads/USERGRID-188
Commit: 7425ba5df6b79c40c7632824b42e1444b5c91780
Parents: 72f0d7d
Author: Todd Nine <tn...@apigee.com>
Authored: Mon Jul 14 15:01:37 2014 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Mon Jul 14 15:01:37 2014 -0600

----------------------------------------------------------------------
 .../usergrid/persistence/graph/GraphFig.java    |  10 +-
 .../impl/EdgeSerializationImpl.java             |  37 ++--
 .../impl/shard/NodeShardApproximation.java      |  11 +-
 .../serialization/impl/shard/count/Counter.java |   8 +-
 .../shard/count/NodeShardApproximationImpl.java | 169 ++++++++++++-------
 .../impl/shard/count/ShardKey.java              |  56 +++---
 .../shard/impl/EdgeShardSerializationImpl.java  |   2 +-
 .../graph/serialization/util/EdgeHasher.java    |  86 ++++++++++
 .../graph/GraphManagerShardingIT.java           |  13 +-
 .../impl/shard/EdgeShardSerializationTest.java  |  39 +++--
 .../impl/shard/NodeShardAllocationTest.java     |  36 ++--
 .../shard/count/NodeShardApproximationTest.java |  79 +++++----
 .../NodeShardCounterSerializationTest.java      |   7 +-
 13 files changed, 382 insertions(+), 171 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java
index e0ce45c..f84fef4 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java
@@ -41,9 +41,11 @@ public interface GraphFig extends GuicyFig {
 
     public static final String SHARD_CACHE_TIMEOUT = "usergrid.graph.shard.cache.timeout";
 
-    public static final String COUNTER_WRITE_FLUSH_COUNT = "usergrid.graph.shard.counter.flush.count";
+    public static final String COUNTER_WRITE_FLUSH_COUNT = "usergrid.graph.shard.counter.beginFlush.count";
 
-    public static final String COUNTER_WRITE_FLUSH_INTERVAL = "usergrid.graph.shard.counter.flush.interval";
+    public static final String COUNTER_WRITE_FLUSH_INTERVAL = "usergrid.graph.shard.counter.beginFlush.interval";
+
+    public static final String COUNTER_WRITE_FLUSH_QUEUE_SIZE = "usergrid.graph.shard.counter.queue.size";
 
 
 
@@ -82,5 +84,9 @@ public interface GraphFig extends GuicyFig {
     @Default( "30000" )
     @Key( COUNTER_WRITE_FLUSH_INTERVAL )
     long getCounterFlushInterval();
+
+    @Default( "1000" )
+    @Key(COUNTER_WRITE_FLUSH_QUEUE_SIZE  )
+    int getCounterFlushQueueSize();
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
index 7c630c1..e49b99d 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
@@ -55,6 +55,7 @@ import org.apache.usergrid.persistence.graph.SearchByIdType;
 import org.apache.usergrid.persistence.graph.impl.SimpleMarkedEdge;
 import org.apache.usergrid.persistence.graph.serialization.EdgeSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardStrategy;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
 import org.apache.usergrid.persistence.graph.serialization.util.EdgeHasher;
@@ -211,9 +212,9 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
 
             @Override
-            public void countEdge( final Id rowId, final long shardId, final String... types ) {
+            public void countEdge( final Id rowId, final NodeType nodeType, final long shardId, final String... types ) {
                 if ( !isDeleted ) {
-                    edgeShardStrategy.increment( scope, rowId, shardId, 1l, types );
+                    edgeShardStrategy.increment( scope, rowId, nodeType, shardId, 1l, types );
                 }
             }
 
@@ -250,8 +251,8 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
 
             @Override
-            public void countEdge( final Id rowId, final long shardId, final String... types ) {
-                edgeShardStrategy.increment( scope, rowId, shardId, -1, types );
+            public void countEdge( final Id rowId, final NodeType nodeType,  final long shardId,  final String... types ) {
+                edgeShardStrategy.increment( scope, rowId, nodeType, shardId, -1, types );
             }
 
 
@@ -298,7 +299,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
         final DirectedEdge sourceEdge = new DirectedEdge( targetNodeId, timestamp );
 
-        final ShardEntries sourceRowKeyShard = edgeShardStrategy.getWriteShards( scope, sourceNodeId, timestamp, type );
+        final ShardEntries sourceRowKeyShard = edgeShardStrategy.getWriteShards( scope, sourceNodeId, NodeType.SOURCE, timestamp, type );
 
 
         for ( Shard shard : sourceRowKeyShard.getEntries() ) {
@@ -306,12 +307,12 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
             final long shardId = shard.getShardIndex();
             final RowKey sourceRowKey = new RowKey( sourceNodeId, type, shardId );
             op.writeEdge( sourceNodeEdgesCf, sourceRowKey, sourceEdge );
-            op.countEdge( sourceNodeId, shardId, type );
+            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type );
         }
 
 
         final ShardEntries sourceWithTypeRowKeyShard =
-                edgeShardStrategy.getWriteShards( scope, sourceNodeId, timestamp, type, targetNodeType );
+                edgeShardStrategy.getWriteShards( scope, sourceNodeId, NodeType.SOURCE, timestamp, type, targetNodeType );
 
         for ( Shard shard : sourceWithTypeRowKeyShard.getEntries() ) {
 
@@ -319,7 +320,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
             final RowKeyType sourceRowKeyType = new RowKeyType( sourceNodeId, type, targetNodeId, shardId );
 
             op.writeEdge( sourceNodeTargetTypeCf, sourceRowKeyType, sourceEdge );
-            op.countEdge( sourceNodeId, shardId, type, targetNodeType );
+            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type, targetNodeType );
         }
 
 
@@ -330,19 +331,19 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
         final DirectedEdge targetEdge = new DirectedEdge( sourceNodeId, timestamp );
 
 
-        final ShardEntries targetRowKeyShard = edgeShardStrategy.getWriteShards( scope, targetNodeId, timestamp, type );
+        final ShardEntries targetRowKeyShard = edgeShardStrategy.getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type );
 
         for ( Shard shard : targetRowKeyShard.getEntries() ) {
             final long shardId = shard.getShardIndex();
             final RowKey targetRowKey = new RowKey( targetNodeId, type, shardId );
 
             op.writeEdge( targetNodeEdgesCf, targetRowKey, targetEdge );
-            op.countEdge( targetNodeId, shardId, type );
+            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type );
         }
 
 
         final ShardEntries targetWithTypeRowKeyShard =
-                edgeShardStrategy.getWriteShards( scope, targetNodeId, timestamp, type, souceNodeType );
+                edgeShardStrategy.getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type, souceNodeType );
 
 
         for ( Shard shard : targetWithTypeRowKeyShard.getEntries() ) {
@@ -353,7 +354,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
 
             op.writeEdge( targetNodeSourceTypeCf, targetRowKeyType, targetEdge );
-            op.countEdge( targetNodeId, shardId, type, souceNodeType );
+            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type, souceNodeType );
         }
 
         /**
@@ -381,7 +382,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
         final EdgeSearcher<EdgeRowKey, Long, MarkedEdge> searcher =
                 new EdgeSearcher<EdgeRowKey, Long, MarkedEdge>( scope, maxTimestamp, search.last(),
-                        edgeShardStrategy.getReadShards( scope, sourceId, maxTimestamp, type ) ) {
+                        edgeShardStrategy.getReadShards( scope, sourceId, NodeType.SOURCE, maxTimestamp, type ) ) {
 
                     @Override
                     protected Serializer<Long> getSerializer() {
@@ -437,7 +438,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
         final EdgeSearcher<RowKey, DirectedEdge, MarkedEdge> searcher =
                 new EdgeSearcher<RowKey, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(),
-                        edgeShardStrategy.getReadShards( scope, sourceId, maxTimestamp, type ) ) {
+                        edgeShardStrategy.getReadShards( scope, sourceId,NodeType.SOURCE, maxTimestamp, type ) ) {
 
 
                     @Override
@@ -482,7 +483,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
         final EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge> searcher =
                 new EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(),
-                        edgeShardStrategy.getReadShards( scope, targetId, maxTimestamp, type, targetType ) ) {
+                        edgeShardStrategy.getReadShards( scope, targetId, NodeType.TARGET, maxTimestamp, type, targetType ) ) {
 
                     @Override
                     protected Serializer<DirectedEdge> getSerializer() {
@@ -524,7 +525,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
         final EdgeSearcher<RowKey, DirectedEdge, MarkedEdge> searcher =
                 new EdgeSearcher<RowKey, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(),
-                        edgeShardStrategy.getReadShards( scope, targetId, maxTimestamp, type ) ) {
+                        edgeShardStrategy.getReadShards( scope, targetId, NodeType.TARGET, maxTimestamp, type ) ) {
 
                     @Override
                     protected Serializer<DirectedEdge> getSerializer() {
@@ -570,7 +571,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
         final EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge> searcher =
                 new EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(),
-                        edgeShardStrategy.getReadShards( scope, targetId, maxTimestamp, type, sourceType ) ) {
+                        edgeShardStrategy.getReadShards( scope, targetId, NodeType.TARGET,  maxTimestamp, type, sourceType ) ) {
                     @Override
                     protected Serializer<DirectedEdge> getSerializer() {
                         return EDGE_SERIALIZER;
@@ -994,7 +995,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
         /**
          * Perform the count on the edge
          */
-        void countEdge( final Id rowId, long shardId, String... types );
+        void countEdge( final Id rowId, NodeType type, long shardId,  String... types );
 
         /**
          * Write the edge into the version cf

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
index 311e605..f2f51ef 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
@@ -55,7 +55,14 @@ public interface NodeShardApproximation {
 
 
     /**
-     * Flush the current counters in the Approximation
+     * Flush the current counters in the Approximation.  Will return immediately after the flush. You can then use flushPending
+     * to check the state.
      */
-    public void flush();
+    public void beginFlush();
+
+    /**
+     * Return true if there is data to be flushed
+     * @return
+     */
+    public boolean flushPending();
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/Counter.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/Counter.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/Counter.java
index 4318200..f5666a2 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/Counter.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/Counter.java
@@ -24,6 +24,8 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
+import com.google.common.base.Preconditions;
+
 
 /**
  * This class is synchronized for addition.  It is meant to be used across multiple threads
@@ -35,7 +37,7 @@ public class Counter {
     private final AtomicLong invokeCounter;
 
     /**
-     * Pointer to our "current" counter map.  We flush this when time expires or we hit our count
+     * Pointer to our "current" counter map.  We beginFlush this when time expires or we hit our count
      */
     private final ConcurrentHashMap<ShardKey, AtomicLong> counts;
 
@@ -94,6 +96,10 @@ public class Counter {
      * @param other
      */
     public void merge(final Counter other){
+
+        Preconditions.checkNotNull(other, "other cannot be null");
+        Preconditions.checkNotNull( other.counts, "other.counts cannot be null" );
+
         for(Map.Entry<ShardKey, AtomicLong> entry: other.counts.entrySet()){
             add(entry.getKey(), entry.getValue().get());
         }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java
index c7197b5..e0740fa 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java
@@ -20,10 +20,15 @@
 package org.apache.usergrid.persistence.graph.serialization.impl.shard.count;
 
 
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.inject.Inject;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.usergrid.persistence.core.consistency.TimeService;
 import org.apache.usergrid.persistence.core.hystrix.HystrixCassandra;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
@@ -41,10 +46,12 @@ import rx.schedulers.Schedulers;
 
 /**
  * Implementation for doing edge approximation based on counters.  Uses a guava loading cache to load values from
- * cassandra, and flush them on cache eviction.
+ * cassandra, and beginFlush them on cache eviction.
  */
 public class NodeShardApproximationImpl implements NodeShardApproximation {
 
+    private static final Logger LOG = LoggerFactory.getLogger(NodeShardApproximationImpl.class);
+
     /**
      * Read write locks to ensure we atomically swap correctly
      */
@@ -64,7 +71,9 @@ public class NodeShardApproximationImpl implements NodeShardApproximation {
     /**
      * The counter that is currently in process of flushing to Cassandra.  Can be null
      */
-    private volatile Counter flushPending;
+    private final BlockingQueue<Counter> flushQueue;
+
+    private final FlushWorker worker;
 
 
     /**
@@ -78,15 +87,21 @@ public class NodeShardApproximationImpl implements NodeShardApproximation {
         this.nodeShardCounterSerialization = nodeShardCounterSerialization;
         this.timeService = timeService;
         this.currentCounter = new Counter();
+        this.flushQueue = new LinkedBlockingQueue<>( graphFig.getCounterFlushQueueSize() );
+
+        this.worker = new FlushWorker( this.flushQueue, nodeShardCounterSerialization );
+
+        Schedulers.newThread().createWorker().schedule( worker );
+
     }
 
 
     @Override
-    public void increment( final ApplicationScope scope, final Id nodeId,  final NodeType nodeType, final long shardId, final long count,
-                           final String... edgeType ) {
+    public void increment( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long shardId,
+                           final long count, final String... edgeType ) {
 
 
-        final ShardKey key = new ShardKey( scope, nodeId, shardId, edgeType );
+        final ShardKey key = new ShardKey( scope, nodeId, nodeType, shardId, edgeType );
 
         readLock.lock();
 
@@ -103,10 +118,10 @@ public class NodeShardApproximationImpl implements NodeShardApproximation {
 
 
     @Override
-    public long getCount( final ApplicationScope scope, final Id nodeId,  final NodeType nodeType, final long shardId,
+    public long getCount( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long shardId,
                           final String... edgeType ) {
 
-        final ShardKey key = new ShardKey( scope, nodeId, shardId, edgeType );
+        final ShardKey key = new ShardKey( scope, nodeId, nodeType, shardId, edgeType );
 
 
         readLock.lock();
@@ -116,9 +131,6 @@ public class NodeShardApproximationImpl implements NodeShardApproximation {
         try {
             count = currentCounter.get( key );
 
-            if ( flushPending != null ) {
-                count += flushPending.get( key );
-            }
         }
         finally {
             readLock.unlock();
@@ -131,78 +143,121 @@ public class NodeShardApproximationImpl implements NodeShardApproximation {
 
 
     @Override
-    public void flush() {
+    public void beginFlush() {
 
         writeLockLock.lock();
 
         try {
-            flushPending = currentCounter;
-            currentCounter = new Counter();
+
+            final boolean queued = flushQueue.offer( currentCounter );
+
+            /**
+             * We were able to q the beginFlush, swap it
+             */
+            if ( queued ) {
+                currentCounter = new Counter();
+            }
         }
         finally {
             writeLockLock.unlock();
         }
+    }
 
 
-        //copy to the batch outside of the command for performance
-        final MutationBatch batch = nodeShardCounterSerialization.flush( flushPending );
+    @Override
+    public boolean flushPending() {
+        return flushQueue.size() > 0 || worker.isFlushing();
+    }
 
-        /**
-         * Execute the command in hystrix to avoid slamming cassandra
-         */
-        new HystrixCommand( HystrixCassandra.ASYNC_GROUP ) {
 
-            @Override
-            protected Void run() throws Exception {
-                /**
-                 * Execute the batch asynchronously
-                 */
-                batch.execute();
+    /**
+     * Check if we need to beginFlush.  If we do, perform the beginFlush
+     */
+    private void checkFlush() {
 
-                return null;
-            }
+        //there's no beginFlush pending and we're past the timeout or count
+        if ( currentCounter.getCreateTimestamp() + graphFig.getCounterFlushInterval() > timeService.getCurrentTime()
+                || currentCounter.getInvokeCount() >= graphFig.getCounterFlushCount() ) {
+            beginFlush();
+        }
+    }
 
 
-            @Override
-            protected Object getFallback() {
-                //we've failed to mutate.  Merge this count back into the current one
-                currentCounter.merge( flushPending );
+    /**
+     * Worker that will take from the queue
+     */
+    private static class FlushWorker implements Action0 {
 
-                return null;
-            }
-        }.execute();
+        private final BlockingQueue<Counter> counterQueue;
+        private final NodeShardCounterSerialization nodeShardCounterSerialization;
 
-        writeLockLock.lock();
+        private volatile Counter rollUp;
 
-        try {
-            flushPending = null;
-        }
-        finally {
-            writeLockLock.unlock();
+
+        private FlushWorker( final BlockingQueue<Counter> counterQueue,
+                             final NodeShardCounterSerialization nodeShardCounterSerialization ) {
+            this.counterQueue = counterQueue;
+            this.nodeShardCounterSerialization = nodeShardCounterSerialization;
         }
-    }
 
 
-    /**
-     * Check if we need to flush.  If we do, perform the flush
-     */
-    private void checkFlush() {
+        @Override
+        public void call() {
 
-        //there's no flush pending and we're past the timeout or count
-        if ( flushPending == null && (
-                currentCounter.getCreateTimestamp() + graphFig.getCounterFlushInterval() > timeService.getCurrentTime()
-                        || currentCounter.getInvokeCount() >= graphFig.getCounterFlushCount() ) ) {
 
+            while ( true ) {
+                /**
+                 * Block taking the first element.  Once we take this, batch drain and roll up the rest
+                 */
+
+                try {
+                    rollUp = null;
+                    rollUp = counterQueue.take();
+                }
+                catch ( InterruptedException e ) {
+                    LOG.error( "Unable to read from counter queue", e );
+                    throw new RuntimeException( "Unable to read from counter queue", e );
 
-            /**
-             * Fire the flush action asynchronously
-             */
-            Schedulers.immediate().createWorker().schedule( new Action0() {
-                @Override
-                public void call() {
-                    flush();
                 }
-            } );
+
+
+
+
+                //copy to the batch outside of the command for performance
+                final MutationBatch batch = nodeShardCounterSerialization.flush( rollUp );
+
+                /**
+                 * Execute the command in hystrix to avoid slamming cassandra
+                 */
+                new HystrixCommand( HystrixCassandra.ASYNC_GROUP ) {
+
+                    @Override
+                    protected Void run() throws Exception {
+                        batch.execute();
+
+                        return null;
+                    }
+
+
+                    @Override
+                    protected Object getFallback() {
+                        //we've failed to mutate.  Merge this count back into the current one
+                        counterQueue.offer( rollUp );
+
+                        return null;
+                    }
+                }.execute();
+            }
+
+        }
+
+
+        /**
+         * Return true if we're in the process of flushing
+         * @return
+         */
+        public boolean isFlushing(){
+            return rollUp != null;
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java
index 63c87d3..55a761b 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java
@@ -22,6 +22,7 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard.count;
 import java.util.Arrays;
 
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.model.entity.Id;
 
 
@@ -32,14 +33,43 @@ public class ShardKey {
     private final ApplicationScope scope;
     private final Id nodeId;
     private final long shardId;
+    private final NodeType nodeType;
     private final String[] edgeTypes;
 
 
-    public ShardKey( final ApplicationScope scope, final Id nodeId, final long shardId, final String... edgeTypes ) {
+    public ShardKey( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long shardId, final String... edgeTypes ) {
         this.scope = scope;
         this.nodeId = nodeId;
         this.shardId = shardId;
         this.edgeTypes = edgeTypes;
+        this.nodeType = nodeType;
+    }
+
+
+
+
+    public ApplicationScope getScope() {
+        return scope;
+    }
+
+
+    public Id getNodeId() {
+        return nodeId;
+    }
+
+
+    public long getShardId() {
+        return shardId;
+    }
+
+
+    public String[] getEdgeTypes() {
+        return edgeTypes;
+    }
+
+
+    public NodeType getNodeType() {
+        return nodeType;
     }
 
 
@@ -63,6 +93,9 @@ public class ShardKey {
         if ( !nodeId.equals( shardKey.nodeId ) ) {
             return false;
         }
+        if ( nodeType != shardKey.nodeType ) {
+            return false;
+        }
         if ( !scope.equals( shardKey.scope ) ) {
             return false;
         }
@@ -71,31 +104,12 @@ public class ShardKey {
     }
 
 
-    public ApplicationScope getScope() {
-        return scope;
-    }
-
-
-    public Id getNodeId() {
-        return nodeId;
-    }
-
-
-    public long getShardId() {
-        return shardId;
-    }
-
-
-    public String[] getEdgeTypes() {
-        return edgeTypes;
-    }
-
-
     @Override
     public int hashCode() {
         int result = scope.hashCode();
         result = 31 * result + nodeId.hashCode();
         result = 31 * result + ( int ) ( shardId ^ ( shardId >>> 32 ) );
+        result = 31 * result + nodeType.hashCode();
         result = 31 * result + Arrays.hashCode( edgeTypes );
         return result;
     }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
index 1e6614a..5b7c901 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
@@ -104,7 +104,7 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
         final MutationBatch batch = keyspace.prepareMutationBatch();
 
-        batch.withRow( EDGE_SHARDS, rowKey ).putColumn( shard, HOLDER ).setTimestamp( timestamp );
+        batch.withTimestamp( timestamp ).withRow( EDGE_SHARDS, rowKey ).putColumn( shard, HOLDER );
 
         return batch;
     }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/util/EdgeHasher.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/util/EdgeHasher.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/util/EdgeHasher.java
new file mode 100644
index 0000000..14a67fa
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/util/EdgeHasher.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.usergrid.persistence.graph.serialization.util;
+
+
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+
+import org.apache.cassandra.utils.MurmurHash;
+
+import org.apache.usergrid.persistence.model.entity.Id;
+
+
+/**
+ *
+ *
+ */
+public class EdgeHasher {
+
+    private static final String UTF_8 = "UTF-8";
+    private static final Charset CHARSET = Charset.forName( UTF_8 );
+
+
+    /**
+     * Create a hash based on the edge type and the type of the id that will be inserted into the column
+     *
+     *
+     * @param edgeType The name of the edge type
+     * @param idForColumn The id of the value that will be in the column
+     *
+     * @return A hash that represents a consistent one way hash of the fields
+     */
+    public static long[] createEdgeHash( final String edgeType, final Id idForColumn ) {
+
+        return createEdgeHash( edgeType, idForColumn.getType() );
+    }
+
+
+    /**
+     * Create the edge hash from the edge type and id type
+     * @param edgeTypes
+     * @return
+     */
+    public static long[] createEdgeHash(final String... edgeTypes){
+       final StringBuilder hashString =  new StringBuilder();
+
+        for(String edge: edgeTypes){
+            hashString.append(edge);
+        }
+
+        return createEdgeHash( hashString.toString() );
+    }
+
+
+    /**
+     * Create a ash based on the edge type and the type of the id that will be inserted into the column
+     *
+     * @return A hash that represents a consistent one way hash of the fields
+     */
+    public static long[] createEdgeHash( final String edgeType ) {
+
+
+        ByteBuffer key = ByteBuffer.wrap( edgeType.getBytes( CHARSET ) );
+
+        return  MurmurHash.hash3_x64_128( key, key.position(), key.remaining(), 0 );
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java
index 01e07b1..adcb42e 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java
@@ -35,6 +35,7 @@ import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.core.scope.ApplicationScopeImpl;
 import org.apache.usergrid.persistence.graph.guice.TestGraphModule;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.model.entity.Id;
 import org.apache.usergrid.persistence.model.util.UUIDGenerator;
 
@@ -101,7 +102,7 @@ public class GraphManagerShardingIT {
         //each edge causes 4 counts
         final long writeCount = flushCount/4;
 
-        assertTrue( "Shard size must be >= flush Count", maxShardSize >= flushCount );
+        assertTrue( "Shard size must be >= beginFlush Count", maxShardSize >= flushCount );
 
         Id targetId = null;
 
@@ -115,14 +116,14 @@ public class GraphManagerShardingIT {
         }
 
 
-        long shardCount = nodeShardApproximation.getCount( scope, sourceId, 0l, edgeType );
+        long shardCount = nodeShardApproximation.getCount( scope, sourceId, NodeType.SOURCE,  0l, edgeType );
 
         assertEquals("Shard count for source node should be the same as write count", writeCount, shardCount);
 
 
         //now verify it's correct for the target
 
-        shardCount = nodeShardApproximation.getCount( scope, targetId, 0l, edgeType );
+        shardCount = nodeShardApproximation.getCount( scope, targetId, NodeType.TARGET,  0l, edgeType );
 
         assertEquals(1, shardCount);
 
@@ -151,7 +152,7 @@ public class GraphManagerShardingIT {
         //each edge causes 4 counts
         final long writeCount = flushCount/4;
 
-        assertTrue( "Shard size must be >= flush Count", maxShardSize >= flushCount );
+        assertTrue( "Shard size must be >= beginFlush Count", maxShardSize >= flushCount );
 
         Id sourceId = null;
 
@@ -165,14 +166,14 @@ public class GraphManagerShardingIT {
         }
 
 
-        long shardCount = nodeShardApproximation.getCount( scope, targetId, 0l, edgeType );
+        long shardCount = nodeShardApproximation.getCount( scope, targetId, NodeType.TARGET,  0l, edgeType );
 
         assertEquals("Shard count for source node should be the same as write count", writeCount, shardCount);
 
 
         //now verify it's correct for the target
 
-        shardCount = nodeShardApproximation.getCount( scope, sourceId, 0l, edgeType );
+        shardCount = nodeShardApproximation.getCount( scope, sourceId, NodeType.SOURCE,  0l, edgeType );
 
         assertEquals(1, shardCount);
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
index 6135a2d..937a51a 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
@@ -96,16 +96,16 @@ public class EdgeShardSerializationTest {
 
         String[] types = { "edgeType", "subType" };
 
-        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, slice1, timestamp, types );
+        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice1, timestamp, types );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice2, timestamp, types ) );
+        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice2, timestamp, types ) );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice3, timestamp, types ) );
+        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice3, timestamp, types ) );
 
         batch.execute();
 
 
-        Iterator<Shard> results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Shard>absent(), types );
+        Iterator<Shard> results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
 
         Shard next = results.next();
 
@@ -129,8 +129,14 @@ public class EdgeShardSerializationTest {
 
         assertFalse( results.hasNext() );
 
+        //test we get nothing with the other node type
+        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.TARGET, Optional.<Shard>absent(), types );
+
+        assertFalse(results.hasNext());
+
+
         //test paging and size
-        results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.of( new Shard( slice2, 0l ) ), types );
+        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.of( new Shard( slice2, 0l ) ), types );
 
         next = results.next();
 
@@ -162,16 +168,16 @@ public class EdgeShardSerializationTest {
 
         String[] types = { "edgeType", "subType" };
 
-        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, slice1, timestamp, types );
+        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice1, timestamp, types );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice2, timestamp,types ) );
+        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice2, timestamp,types ) );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice3, timestamp, types ) );
+        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice3, timestamp, types ) );
 
         batch.execute();
 
 
-        Iterator<Shard> results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Shard>absent(), types );
+        Iterator<Shard> results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
 
         assertEquals( slice3, results.next().getShardIndex() );
 
@@ -181,10 +187,15 @@ public class EdgeShardSerializationTest {
 
         assertFalse( results.hasNext() );
 
+        //test nothing with other type
+        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.TARGET, Optional.<Shard>absent(), types );
+
+        assertFalse(results.hasNext());
+
         //test paging and size
-        edgeShardSerialization.removeEdgeMeta( scope, now, slice1, types ).execute();
+        edgeShardSerialization.removeEdgeMeta( scope, now, NodeType.SOURCE, slice1, types ).execute();
 
-        results = edgeShardSerialization.getEdgeMetaData( scope, now,Optional.<Shard>absent(), types );
+        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
 
         assertEquals( slice3, results.next().getShardIndex() );
 
@@ -193,11 +204,11 @@ public class EdgeShardSerializationTest {
         assertFalse( results.hasNext() );
 
 
-        edgeShardSerialization.removeEdgeMeta( scope, now, slice2, types ).execute();
+        edgeShardSerialization.removeEdgeMeta( scope, now, NodeType.SOURCE, slice2, types ).execute();
 
-        edgeShardSerialization.removeEdgeMeta( scope, now, slice3, types ).execute();
+        edgeShardSerialization.removeEdgeMeta( scope, now, NodeType.SOURCE, slice3, types ).execute();
 
-        results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Shard>absent(), types );
+        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
 
 
         assertFalse( results.hasNext() );

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
index 7546531..383e32c 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
@@ -44,6 +44,7 @@ import static org.apache.usergrid.persistence.graph.test.util.EdgeTestUtils.crea
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.same;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -136,15 +137,18 @@ public class NodeShardAllocationTest {
          * Mock up returning an empty iterator, our audit shouldn't create a new shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),  same( type ),
                         same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
 
-        final boolean result = approximation.auditMaxShard( scope, nodeId, type, subType );
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
 
         assertFalse( "No shard allocated", result );
     }
 
 
+
+
+
     @Test
     public void existingFutureShardSameTime() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
@@ -182,10 +186,10 @@ public class NodeShardAllocationTest {
          * Mock up returning a min shard, and a future shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET), any( Optional.class ),  same( type ),
                         same( subType ) ) ).thenReturn( Arrays.asList( futureShard ).iterator() );
 
-        final boolean result = approximation.auditMaxShard( scope, nodeId, type, subType );
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET,  type, subType );
 
         assertFalse( "No shard allocated", result );
     }
@@ -226,7 +230,7 @@ public class NodeShardAllocationTest {
          * Mock up returning a min shard, and a future shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
                         same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
 
 
@@ -234,9 +238,9 @@ public class NodeShardAllocationTest {
 
         final long count = graphFig.getShardSize() - 1;
 
-        when( nodeShardApproximation.getCount(scope, nodeId, 0l, type, subType )).thenReturn( count );
+        when( nodeShardApproximation.getCount(scope, nodeId, NodeType.TARGET, 0l, type, subType )).thenReturn( count );
 
-        final boolean result = approximation.auditMaxShard( scope, nodeId, type, subType );
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
 
         assertFalse( "Shard allocated", result );
     }
@@ -277,7 +281,7 @@ public class NodeShardAllocationTest {
          * Mock up returning a min shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.SOURCE), any( Optional.class ),  same( type ),
                         same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
 
 
@@ -285,7 +289,7 @@ public class NodeShardAllocationTest {
 
         //return a shard size equal to our max
         when( nodeShardApproximation
-                .getCount(   scope , nodeId, 0l,type , subType  ))
+                .getCount(   scope , nodeId, NodeType.SOURCE, 0l,type , subType  ))
                 .thenReturn( shardCount );
 
         ArgumentCaptor<Long> shardValue = ArgumentCaptor.forClass( Long.class );
@@ -294,11 +298,11 @@ public class NodeShardAllocationTest {
 
         //mock up our mutation
         when( edgeShardSerialization
-                .writeEdgeMeta( same( scope ), same( nodeId ), shardValue.capture(), timestampValue.capture(), same( type ), same( subType ) ) )
+                .writeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.SOURCE), shardValue.capture(), timestampValue.capture(), same( type ), same( subType ) ) )
                 .thenReturn( mock( MutationBatch.class ) );
 
 
-        final boolean result = approximation.auditMaxShard( scope, nodeId, type, subType );
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE,  type, subType );
 
         assertTrue( "Shard allocated", result );
 
@@ -379,7 +383,7 @@ public class NodeShardAllocationTest {
          * Mock up returning a min shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ), same( type ),
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ), same( type ),
                         same( subType ) ) ).thenReturn( Arrays.asList(futureShard3, futureShard2, futureShard1, minShard).iterator() );
 
 
@@ -391,12 +395,12 @@ public class NodeShardAllocationTest {
 
         //mock up our mutation
         when( edgeShardSerialization
-                .removeEdgeMeta( same( scope ), same( nodeId ), newLongValue.capture(), same( type ), same( subType ) ) )
+                .removeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.TARGET), newLongValue.capture(), same( type ), same( subType ) ) )
                 .thenReturn( mock( MutationBatch.class ) );
 
 
         final Iterator<Shard>
-                result = approximation.getSourceShards( scope, nodeId, Optional.<Shard>absent(), type, subType );
+                result = approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
 
 
         assertTrue( "Shards present", result.hasNext() );
@@ -453,10 +457,10 @@ public class NodeShardAllocationTest {
          * Mock up returning an empty iterator, our audit shouldn't create a new shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
                         same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
 
-        final Iterator<Shard> result = approximation.getSourceShards( scope, nodeId, Optional.<Shard>absent(), type,
+        final Iterator<Shard> result = approximation.getShards( scope, nodeId, NodeType.TARGET,  Optional.<Shard>absent(), type,
                 subType );
 
         assertEquals("0 shard allocated", 0l, result.next().getShardIndex());

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java
index da19ce5..51448b7 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java
@@ -39,12 +39,15 @@ import org.junit.Test;
 import org.safehaus.guicyfig.Bypass;
 import org.safehaus.guicyfig.OptionState;
 import org.safehaus.guicyfig.Overrides;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamilyDefinition;
 import org.apache.usergrid.persistence.core.consistency.TimeService;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.model.entity.Id;
 import org.apache.usergrid.persistence.model.util.UUIDGenerator;
 
@@ -68,6 +71,7 @@ import static org.mockito.Mockito.when;
 
 public class NodeShardApproximationTest {
 
+    private static final Logger LOG = LoggerFactory.getLogger( NodeShardApproximation.class );
 
     private GraphFig graphFig;
 
@@ -92,25 +96,24 @@ public class NodeShardApproximationTest {
 
         when( graphFig.getShardCacheSize() ).thenReturn( 10000l );
         when( graphFig.getShardSize() ).thenReturn( 250000l );
+        when( graphFig.getCounterFlushQueueSize() ).thenReturn( 10000 );
 
         nodeShardCounterSerialization = mock( NodeShardCounterSerialization.class );
 
-        when(nodeShardCounterSerialization.flush( any(Counter.class) )).thenReturn( mock( MutationBatch.class) );
-
+        when( nodeShardCounterSerialization.flush( any( Counter.class ) ) ).thenReturn( mock( MutationBatch.class ) );
 
 
         timeService = mock( TimeService.class );
 
-        when(timeService.getCurrentTime()).thenReturn( System.currentTimeMillis() );
+        when( timeService.getCurrentTime() ).thenReturn( System.currentTimeMillis() );
     }
 
 
     @Test
-    public void testSingleShard() {
-
+    public void testSingleShard() throws InterruptedException {
 
-        when(graphFig.getCounterFlushCount()).thenReturn( 100000l );
 
+        when( graphFig.getCounterFlushCount() ).thenReturn( 100000l );
         NodeShardApproximation approximation =
                 new NodeShardApproximationImpl( graphFig, nodeShardCounterSerialization, timeService );
 
@@ -120,7 +123,9 @@ public class NodeShardApproximationTest {
         final String type = "type";
         final String type2 = "subType";
 
-        long count = approximation.getCount( scope, id, shardId, type, type2 );
+        long count = approximation.getCount( scope, id, NodeType.TARGET, shardId, type, type2 );
+
+        waitForFlush( approximation );
 
         assertEquals( 0, count );
     }
@@ -130,8 +135,6 @@ public class NodeShardApproximationTest {
     public void testSingleShardMultipleThreads() throws ExecutionException, InterruptedException {
 
 
-
-
         NodeShardCounterSerialization serialization = new TestNodeShardCounterSerialization();
 
         final NodeShardApproximation approximation =
@@ -158,7 +161,7 @@ public class NodeShardApproximationTest {
                 public Long call() throws Exception {
 
                     for ( int i = 0; i < increments; i++ ) {
-                        approximation.increment( scope, id, shardId, 1, type, type2 );
+                        approximation.increment( scope, id, NodeType.TARGET, shardId, 1, type, type2 );
                     }
 
                     return 0l;
@@ -169,24 +172,25 @@ public class NodeShardApproximationTest {
         }
 
 
-
         for ( Future<Long> future : futures ) {
-           future.get();
+            future.get();
         }
 
-
+        waitForFlush( approximation );
         //get our count.  It should be accurate b/c we only have 1 instance
 
-        final long returnedCount = approximation.getCount( scope, id, shardId, type, type2);
+        final long returnedCount = approximation.getCount( scope, id, NodeType.TARGET, shardId, type, type2 );
         final long expected = workers * increments;
 
 
-        assertEquals(expected, returnedCount);
-
+        assertEquals( expected, returnedCount );
 
+        //test we get nothing with the other type
 
+        final long emptyCount = approximation.getCount( scope, id, NodeType.SOURCE, shardId, type, type2 );
 
 
+        assertEquals( 0, emptyCount );
     }
 
 
@@ -195,8 +199,6 @@ public class NodeShardApproximationTest {
     public void testMultipleShardMultipleThreads() throws ExecutionException, InterruptedException {
 
 
-
-
         NodeShardCounterSerialization serialization = new TestNodeShardCounterSerialization();
 
         final NodeShardApproximation approximation =
@@ -210,8 +212,7 @@ public class NodeShardApproximationTest {
         final String type = "type";
         final String type2 = "subType";
 
-        final AtomicLong shardIdCounter = new AtomicLong(  );
-
+        final AtomicLong shardIdCounter = new AtomicLong();
 
 
         ExecutorService executor = Executors.newFixedThreadPool( workers );
@@ -227,7 +228,7 @@ public class NodeShardApproximationTest {
                     final long threadShardId = shardIdCounter.incrementAndGet();
 
                     for ( int i = 0; i < increments; i++ ) {
-                        approximation.increment( scope, id, threadShardId, 1, type, type2 );
+                        approximation.increment( scope, id, NodeType.SOURCE, threadShardId, 1, type, type2 );
                     }
 
                     return threadShardId;
@@ -238,29 +239,41 @@ public class NodeShardApproximationTest {
         }
 
 
-
         for ( Future<Long> future : futures ) {
-           final long shardId = future.get();
+            final long shardId = future.get();
+
+            waitForFlush( approximation );
 
-            final long returnedCount = approximation.getCount( scope, id, shardId, type, type2);
+            final long returnedCount = approximation.getCount( scope, id, NodeType.SOURCE, shardId, type, type2 );
 
-            assertEquals(increments, returnedCount);
+            assertEquals( increments, returnedCount );
         }
+    }
 
 
+    private void waitForFlush( NodeShardApproximation approximation ) throws InterruptedException {
 
+        approximation.beginFlush();
 
+        while ( approximation.flushPending() ) {
+
+            LOG.info("Waiting on beginFlush to complete");
+
+            Thread.sleep( 100 );
+        }
     }
 
 
+
     /**
      * These are created b/c we can't use Mockito.  It OOM's with keeping track of all the mock invocations
      */
 
-    private static class TestNodeShardCounterSerialization implements NodeShardCounterSerialization{
+    private static class TestNodeShardCounterSerialization implements NodeShardCounterSerialization {
 
         private Counter copy = new Counter();
 
+
         @Override
         public MutationBatch flush( final Counter counter ) {
             copy.merge( counter );
@@ -281,7 +294,6 @@ public class NodeShardApproximationTest {
     }
 
 
-
     /**
      * Simple test mutation to no-op during tests
      */
@@ -415,14 +427,14 @@ public class NodeShardApproximationTest {
     }
 
 
-
-    private static class TestGraphFig implements GraphFig{
+    private static class TestGraphFig implements GraphFig {
 
         @Override
         public int getScanPageSize() {
             return 0;  //To change body of implemented methods use File | Settings | File Templates.
         }
 
+
         @Override
         public int getRepairConcurrentSize() {
             return 0;  //To change body of implemented methods use File | Settings | File Templates.
@@ -460,6 +472,12 @@ public class NodeShardApproximationTest {
 
 
         @Override
+        public int getCounterFlushQueueSize() {
+            return 10000;
+        }
+
+
+        @Override
         public void addPropertyChangeListener( final PropertyChangeListener propertyChangeListener ) {
             //To change body of implemented methods use File | Settings | File Templates.
         }
@@ -555,7 +573,8 @@ public class NodeShardApproximationTest {
         }
     }
 
-    private static class TestTimeService implements TimeService{
+
+    private static class TestTimeService implements TimeService {
 
         @Override
         public long getCurrentTime() {

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/7425ba5d/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationTest.java
index 9968f67..aad918e 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationTest.java
@@ -35,6 +35,7 @@ import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.guice.TestGraphModule;
 import org.apache.usergrid.persistence.graph.serialization.EdgeSerialization;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.model.entity.Id;
 import org.apache.usergrid.persistence.model.util.UUIDGenerator;
 
@@ -96,11 +97,11 @@ public class NodeShardCounterSerializationTest {
 
         final Id id = createId( "test" );
 
-        ShardKey key1 = new ShardKey( scope, id, 0, "type1" );
+        ShardKey key1 = new ShardKey( scope, id, NodeType.SOURCE, 0, "type1" );
 
-        ShardKey key2 = new ShardKey( scope, id, 0, "type2" );
+        ShardKey key2 = new ShardKey( scope, id, NodeType.SOURCE, 0, "type2" );
 
-        ShardKey key3 = new ShardKey( scope, id, 1, "type1" );
+        ShardKey key3 = new ShardKey( scope, id, NodeType.SOURCE, 1, "type1" );
 
 
         Counter counter = new Counter();


[03/11] git commit: Checkpoint for new apis

Posted by to...@apache.org.
Checkpoint for new apis


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/72f0d7d8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/72f0d7d8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/72f0d7d8

Branch: refs/heads/USERGRID-188
Commit: 72f0d7d82495e290d93c54bc2d7d8e46bb4528b2
Parents: b1434dd
Author: Todd Nine <tn...@apigee.com>
Authored: Fri Jul 11 14:05:07 2014 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Fri Jul 11 14:05:07 2014 -0600

----------------------------------------------------------------------
 .../impl/shard/EdgeShardSerialization.java      |  10 +-
 .../impl/shard/EdgeShardStrategy.java           |   8 +-
 .../impl/shard/NodeShardAllocation.java         |  16 +-
 .../impl/shard/NodeShardApproximation.java      |  12 +-
 .../impl/shard/NodeShardCache.java              |   6 +-
 .../serialization/impl/shard/NodeType.java      |  29 +++
 .../graph/serialization/impl/shard/Shard.java   |   9 +
 .../shard/count/NodeShardApproximationImpl.java |   5 +-
 .../impl/shard/impl/EdgeRowKey.java             |  14 +-
 .../impl/shard/impl/EdgeRowKeySerializer.java   |  33 +++-
 .../shard/impl/EdgeShardSerializationImpl.java  | 130 +++++++++++---
 .../shard/impl/NodeShardAllocationImpl.java     | 175 +++++++++++--------
 .../impl/shard/impl/NodeShardCacheImpl.java     |  26 ++-
 .../shard/impl/SizebasedEdgeShardStrategy.java  |  13 +-
 .../graph/serialization/util/EdgeHasher.java    |  86 ---------
 .../impl/shard/EdgeShardSerializationTest.java  |  46 +++--
 .../impl/shard/NodeShardAllocationTest.java     |  84 ++++++---
 .../impl/shard/NodeShardCacheTest.java          |  14 +-
 18 files changed, 445 insertions(+), 271 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
index 480b71d..e91dda3 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
@@ -39,30 +39,34 @@ public interface EdgeShardSerialization extends Migration{
      * Write a new time shard for the meta data
      * @param scope The scope to write
      * @param nodeId The id in the edge
+     * @param nodeType Is the node a source or target node
      * @param shard The next time to write
+     * @param timestamp the timestamp to set on the shard
      * @param types The types to write to.  Can be edge type, or edgeType+id type
      */
-    public MutationBatch writeEdgeMeta(ApplicationScope scope, Id nodeId, long shard,  String... types);
+    public MutationBatch writeEdgeMeta(ApplicationScope scope, Id nodeId, NodeType nodeType, long shard, long timestamp,  String... types);
 
     /**
      * Get an iterator of all meta data and types.  Returns a range from High to low
      * @param scope The organization scope
      * @param nodeId The id of the node
+     * @param nodeType The type of node
      * @param start The shard time to start seeking from.  Values <= this value will be returned.
      * @param types The types to use
      * @return
      */
-    public Iterator<Shard> getEdgeMetaData(ApplicationScope scope, Id nodeId, Optional<Shard> start,  String... types);
+    public Iterator<Shard> getEdgeMetaData(ApplicationScope scope, Id nodeId, NodeType nodeType, Optional<Shard> start,  String... types);
 
     /**
      * Remove the shard from the edge meta data from the types.
 
      * @param scope
      * @param nodeId
+     * @param nodeType The type of node, source or target
      * @param shard
      * @param types
      * @return
      */
-    public MutationBatch removeEdgeMeta(ApplicationScope scope, Id nodeId, long shard, String... types);
+    public MutationBatch removeEdgeMeta(ApplicationScope scope, Id nodeId, NodeType nodeType, long shard, String... types);
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
index 22c9470..8f5d168 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
@@ -34,10 +34,11 @@ public interface EdgeShardStrategy {
      *
      * @param scope The application's scope
      * @param rowKeyId The id being used in the row key
+     * @param nodeType
      * @param timestamp The timestamp on the edge
      * @param types The types in the edge
      */
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId, final long timestamp,
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId,final  NodeType nodeType, final long timestamp,
                                            final String... types );
 
 
@@ -46,10 +47,11 @@ public interface EdgeShardStrategy {
      *
      * @param scope The application scope
      * @param rowKeyId The id used in the row key
+     * @param nodeType
      * @param maxTimestamp The max timestamp to use
      * @param types the types in the edge
      */
-    public Iterator<ShardEntries> getReadShards(final ApplicationScope scope,final  Id rowKeyId, final long maxTimestamp,final  String... types );
+    public Iterator<ShardEntries> getReadShards(final ApplicationScope scope,final  Id rowKeyId, final NodeType nodeType,final long maxTimestamp,final  String... types );
 
     /**
      * Increment our count meta data by the passed value.  Can be a positive or a negative number.
@@ -60,7 +62,7 @@ public interface EdgeShardStrategy {
      * @param types The types
      * @return
      */
-    public void increment(final ApplicationScope scope,final  Id rowKeyId, long shardId, long count ,final  String... types );
+    public void increment(final ApplicationScope scope,final Id rowKeyId, final NodeType nodeType, long shardId, long count ,final  String... types );
 
 
     /**

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
index 6ecadbb..f17ebd5 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
@@ -36,16 +36,17 @@ public interface NodeShardAllocation {
 
 
     /**
-     * Get all shards for the given info.  If none exist, a default shard should be allocated
+     * Get all shards for the given info.  If none exist, a default shard should be allocated.  The nodeId is the source node
      *
      * @param scope The application scope
      * @param nodeId
+     * @param nodeType
      * @param maxShardId The max value to start seeking from.  Values <= this will be returned if specified
      * @param edgeTypes
      * @return A list of all shards <= the current shard.  This will always return 0l if no shards are allocated
      */
-    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, Optional<Shard> maxShardId,
-                                     final String... edgeTypes );
+    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, Optional<Shard> maxShardId,
+                                            final String... edgeTypes );
 
 
     /**
@@ -53,10 +54,17 @@ public interface NodeShardAllocation {
      *
      * @param scope The app scope
      * @param nodeId The node id
+     * @param nodeType The type of node
      * @param edgeType The edge types
      * @return True if a new shard was allocated
      */
-    public boolean auditMaxShard(final ApplicationScope scope, final Id nodeId, final String... edgeType);
+    public boolean auditMaxShard(final ApplicationScope scope, final Id nodeId,final NodeType nodeType,  final String... edgeType);
+
+    /**
+     * Get the minimum time that a created shard should be considered "new", and be used for both new writes and reads
+     * @return
+     */
+    public long getMinTime();
 
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
index 90503d4..311e605 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
@@ -36,17 +36,21 @@ public interface NodeShardApproximation {
      * @param scope The scope
      * @param nodeId The node id
      * @param shardId The shard id
-     * @param count
      * @param edgeType The edge type
      */
-    public void increment( final ApplicationScope scope, final Id nodeId, final long shardId,  final long count,
-                           final String... edgeType );
+    public void increment( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long shardId,
+                           final long count, final String... edgeType );
 
 
     /**
      * Get the approximation of the number of unique items
+     *
+     * @param scope The scope
+     * @param nodeId The node id
+     * @param shardId The shard id
+     * @param edgeType The edge type
      */
-    public long getCount( final ApplicationScope scope, final Id nodeId, final long shardId,
+    public long getCount( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long shardId,
                           final String... edgeType );
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
index 3e1675a..7ce0b96 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
@@ -37,21 +37,23 @@ public interface NodeShardCache {
     /**
      * Get the shard for the given timestamp
      * @param nodeId
+     * @param nodeType
      * @param timestamp The time to select the slice for.
      * @param edgeType
      */
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, final long timestamp,
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, NodeType nodeType, final long timestamp,
                                 final String... edgeType );
 
     /**
      * Get an iterator of all versions <= the version for iterating shard entry sets
      * @param scope
      * @param nodeId
+     * @para nodeType
      * @param maxTimestamp The highest timestamp
      * @param edgeType
      * @return
      */
-    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, final long maxTimestamp,
+    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, NodeType nodeType, final long maxTimestamp,
                                      final String... edgeType );
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeType.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeType.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeType.java
new file mode 100644
index 0000000..c0e8aa1
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeType.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+/**
+ * The node type of the source or target
+ */
+public enum NodeType {
+    SOURCE,
+    TARGET
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
index d0c3e01..80aea16 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
@@ -100,4 +100,13 @@ public class Shard implements Comparable<Shard> {
         result = 31 * result + ( int ) ( createdTime ^ ( createdTime >>> 32 ) );
         return result;
     }
+
+
+    @Override
+    public String toString() {
+        return "Shard{" +
+                "shardIndex=" + shardIndex +
+                ", createdTime=" + createdTime +
+                "} ";
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java
index 7dc763f..c7197b5 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationImpl.java
@@ -29,6 +29,7 @@ import org.apache.usergrid.persistence.core.hystrix.HystrixCassandra;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.model.entity.Id;
 
 import com.netflix.astyanax.MutationBatch;
@@ -81,7 +82,7 @@ public class NodeShardApproximationImpl implements NodeShardApproximation {
 
 
     @Override
-    public void increment( final ApplicationScope scope, final Id nodeId, final long shardId, final long count,
+    public void increment( final ApplicationScope scope, final Id nodeId,  final NodeType nodeType, final long shardId, final long count,
                            final String... edgeType ) {
 
 
@@ -102,7 +103,7 @@ public class NodeShardApproximationImpl implements NodeShardApproximation {
 
 
     @Override
-    public long getCount( final ApplicationScope scope, final Id nodeId, final long shardId,
+    public long getCount( final ApplicationScope scope, final Id nodeId,  final NodeType nodeType, final long shardId,
                           final String... edgeType ) {
 
         final ShardKey key = new ShardKey( scope, nodeId, shardId, edgeType );

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKey.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKey.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKey.java
index f4d8467..12b1d7c 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKey.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKey.java
@@ -19,7 +19,6 @@
 package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
 
 
-import org.apache.usergrid.persistence.graph.serialization.util.EdgeHasher;
 import org.apache.usergrid.persistence.model.entity.Id;
 
 
@@ -27,19 +26,16 @@ import org.apache.usergrid.persistence.model.entity.Id;
  * Used to store row keys by sourceId, targetId and edgeType
  */
 public class EdgeRowKey {
-    public final Id targetId;
-    public final long[] edgeTypesHash;
+    public final Id nodeId;
+    public final String[] edgeTypes;
 
 
-    public EdgeRowKey( final Id rowId, final String[] edgeTypes ) {
-        this( rowId, EdgeHasher.createEdgeHash( edgeTypes ) );
+    public EdgeRowKey( final Id nodeId, final String[] edgeTypes ) {
+        this.nodeId = nodeId;
+        this.edgeTypes = edgeTypes;
     }
 
 
-    public EdgeRowKey( final Id rowId, final long[] hash ) {
-        this.targetId = rowId;
-        this.edgeTypesHash = hash;
-    }
 }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java
index cc615c1..be97f95 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java
@@ -22,6 +22,7 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
 import org.apache.usergrid.persistence.core.astyanax.CompositeFieldSerializer;
 import org.apache.usergrid.persistence.core.astyanax.IdRowCompositeSerializer;
 import org.apache.usergrid.persistence.model.entity.Id;
+import org.apache.usergrid.persistence.model.entity.SimpleId;
 
 import com.netflix.astyanax.model.CompositeBuilder;
 import com.netflix.astyanax.model.CompositeParser;
@@ -32,6 +33,8 @@ import com.netflix.astyanax.model.CompositeParser;
  */
 public class EdgeRowKeySerializer implements CompositeFieldSerializer<EdgeRowKey> {
 
+    private static final EdgeRowKeySerializer INSTANCE = new EdgeRowKeySerializer();
+
     private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
 
 
@@ -39,10 +42,13 @@ public class EdgeRowKeySerializer implements CompositeFieldSerializer<EdgeRowKey
     public void toComposite( final CompositeBuilder builder, final EdgeRowKey key ) {
 
         //add the row id to the composite
-        ID_SER.toComposite( builder, key.targetId );
+        ID_SER.toComposite( builder, key.nodeId );
+
+        builder.addInteger( key.edgeTypes.length );
 
-        builder.addLong( key.edgeTypesHash[0] );
-        builder.addLong( key.edgeTypesHash[1] );
+        for(String type: key.edgeTypes){
+            builder.addString( type );
+        }
     }
 
 
@@ -51,8 +57,25 @@ public class EdgeRowKeySerializer implements CompositeFieldSerializer<EdgeRowKey
 
         final Id sourceId = ID_SER.fromComposite( composite );
 
-        final long[] hash = { composite.readLong(), composite.readLong() };
 
-        return new EdgeRowKey( sourceId, hash );
+        final int length = composite.readInteger();
+
+        String[] types = new String[length];
+
+        for(int i = 0; i < length; i++){
+            types[i] = composite.readString();
+        }
+
+        return new EdgeRowKey( sourceId, types );
+
+    }
+
+
+
+    /**
+     * Get the singleton serializer
+     */
+    public static EdgeRowKeySerializer get() {
+        return INSTANCE;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
index 63075b3..1e6614a 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
@@ -26,18 +26,21 @@ import java.util.Iterator;
 
 import org.apache.cassandra.db.marshal.BytesType;
 
-import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+import org.apache.usergrid.persistence.core.astyanax.CassandraConfig;
+import org.apache.usergrid.persistence.core.astyanax.ColumnNameIterator;
+import org.apache.usergrid.persistence.core.astyanax.ColumnParser;
+import org.apache.usergrid.persistence.core.astyanax.ColumnTypes;
+import org.apache.usergrid.persistence.core.astyanax.CompositeFieldSerializer;
+import org.apache.usergrid.persistence.core.astyanax.IdRowCompositeSerializer;
 import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
 import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamilyDefinition;
+import org.apache.usergrid.persistence.core.astyanax.OrganizationScopedRowKeySerializer;
 import org.apache.usergrid.persistence.core.astyanax.ScopedRowKey;
-import org.apache.usergrid.persistence.core.astyanax.ColumnTypes;
+import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.core.util.ValidationUtils;
 import org.apache.usergrid.persistence.graph.GraphFig;
-import org.apache.usergrid.persistence.core.astyanax.CassandraConfig;
-import org.apache.usergrid.persistence.core.astyanax.OrganizationScopedRowKeySerializer;
-import org.apache.usergrid.persistence.core.astyanax.ColumnNameIterator;
-import org.apache.usergrid.persistence.core.astyanax.ColumnParser;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardSerialization;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
 import org.apache.usergrid.persistence.model.entity.Id;
 
@@ -48,6 +51,8 @@ import com.google.inject.Singleton;
 import com.netflix.astyanax.Keyspace;
 import com.netflix.astyanax.MutationBatch;
 import com.netflix.astyanax.model.Column;
+import com.netflix.astyanax.model.CompositeBuilder;
+import com.netflix.astyanax.model.CompositeParser;
 import com.netflix.astyanax.query.RowQuery;
 import com.netflix.astyanax.serializers.LongSerializer;
 import com.netflix.astyanax.util.RangeBuilder;
@@ -59,9 +64,9 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
     /**
      * Edge shards
      */
-    private static final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> EDGE_SHARDS =
+    private static final MultiTennantColumnFamily<ApplicationScope, DirectedRowKey, Long> EDGE_SHARDS =
             new MultiTennantColumnFamily<>( "Edge_Shards",
-                    new OrganizationScopedRowKeySerializer<>( new EdgeRowKeySerializer() ), LongSerializer.get() );
+                    new OrganizationScopedRowKeySerializer<>( new DirectedEdgeRowKeySerializer() ), LongSerializer.get() );
 
 
     private static final byte HOLDER = 0x00;
@@ -84,30 +89,30 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
 
     @Override
-    public MutationBatch writeEdgeMeta( final ApplicationScope scope, final Id nodeId, final long shard,
-                                        final String... types ) {
+    public MutationBatch writeEdgeMeta( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                        final long shard, final long timestamp, final String... types ) {
 
 
         ValidationUtils.validateApplicationScope( scope );
-        ValidationUtils.verifyIdentity(nodeId);
+        ValidationUtils.verifyIdentity( nodeId );
         Preconditions.checkArgument( shard > -1, "shardId must be greater than -1" );
         Preconditions.checkNotNull( types );
 
-        final EdgeRowKey key = new EdgeRowKey( nodeId, types );
+        final DirectedRowKey key = new DirectedRowKey( nodeId, nodeType, types );
 
         final ScopedRowKey rowKey = ScopedRowKey.fromKey( scope, key );
 
         final MutationBatch batch = keyspace.prepareMutationBatch();
 
-        batch.withRow( EDGE_SHARDS, rowKey ).putColumn( shard, HOLDER );
+        batch.withRow( EDGE_SHARDS, rowKey ).putColumn( shard, HOLDER ).setTimestamp( timestamp );
 
         return batch;
     }
 
 
     @Override
-    public Iterator<Shard> getEdgeMetaData( final ApplicationScope scope, final Id nodeId, final Optional<Shard> start,
-                                           final String... types ) {
+    public Iterator<Shard> getEdgeMetaData( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                            final Optional<Shard> start, final String... types ) {
         /**
          * If the edge is present, we need to being seeking from this
          */
@@ -118,12 +123,12 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
             rangeBuilder.setStart( start.get().getShardIndex() );
         }
 
-        final EdgeRowKey key = new EdgeRowKey( nodeId, types );
+        final DirectedRowKey key = new DirectedRowKey( nodeId, nodeType, types );
 
         final ScopedRowKey rowKey = ScopedRowKey.fromKey( scope, key );
 
 
-        final RowQuery<ScopedRowKey<ApplicationScope, EdgeRowKey>, Long> query =
+        final RowQuery<ScopedRowKey<ApplicationScope, DirectedRowKey>, Long> query =
                 keyspace.prepareQuery( EDGE_SHARDS ).setConsistencyLevel( cassandraConfig.getReadCL() ).getKey( rowKey )
                         .autoPaginate( true ).withColumnRange( rangeBuilder.build() );
 
@@ -133,15 +138,15 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
 
     @Override
-    public MutationBatch removeEdgeMeta( final ApplicationScope scope, final Id nodeId, final long shard,
-                                         final String... types ) {
+    public MutationBatch removeEdgeMeta( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                         final long shard, final String... types ) {
 
         ValidationUtils.validateApplicationScope( scope );
-              ValidationUtils.verifyIdentity(nodeId);
-              Preconditions.checkArgument( shard > -1, "shard must be greater than -1" );
-              Preconditions.checkNotNull( types );
+        ValidationUtils.verifyIdentity( nodeId );
+        Preconditions.checkArgument( shard > -1, "shard must be greater than -1" );
+        Preconditions.checkNotNull( types );
 
-        final EdgeRowKey key = new EdgeRowKey( nodeId, types );
+        final DirectedRowKey key = new DirectedRowKey( nodeId, nodeType, types );
 
         final ScopedRowKey rowKey = ScopedRowKey.fromKey( scope, key );
 
@@ -164,11 +169,86 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
     }
 
 
-    private static class ShardColumnParser implements ColumnParser<Long,Shard> {
+    private static class DirectedRowKey {
+
+
+        private final Id nodeId;
+        private final NodeType nodeType;
+        private final String[] edgeTypes;
+
+
+        public DirectedRowKey( final Id nodeId, final NodeType nodeType, final String[] edgeTypes ) {
+            this.nodeId = nodeId;
+            this.nodeType = nodeType;
+            this.edgeTypes = edgeTypes;
+        }
+    }
+
+
+    private static class DirectedEdgeRowKeySerializer implements CompositeFieldSerializer<DirectedRowKey> {
+
+        private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
+
+
+        @Override
+        public void toComposite( final CompositeBuilder builder, final DirectedRowKey key ) {
+            ID_SER.toComposite( builder, key.nodeId );
+
+            builder.addInteger( getValue( key.nodeType ) );
+
+            builder.addInteger( key.edgeTypes.length );
+
+            for ( String type : key.edgeTypes ) {
+                builder.addString( type );
+            }
+        }
+
+
+        @Override
+        public DirectedRowKey fromComposite( final CompositeParser composite ) {
+            final Id sourceId = ID_SER.fromComposite( composite );
+
+            final NodeType type = getType( composite.readInteger() );
+
+
+            final int length = composite.readInteger();
+
+            String[] types = new String[length];
+
+            for ( int i = 0; i < length; i++ ) {
+                types[i] = composite.readString();
+            }
+
+            return new DirectedRowKey( sourceId, type, types );
+        }
+
+
+        private int getValue( NodeType type ) {
+            if ( type == NodeType.SOURCE ) {
+                return 0;
+            }
+
+            return 1;
+        }
+
+
+        public NodeType getType( int value ) {
+            if ( value == 0 ) {
+                return NodeType.SOURCE;
+            }
+
+            return NodeType.TARGET;
+        }
+
+
+    }
+
+
+    private static class ShardColumnParser implements ColumnParser<Long, Shard> {
 
         @Override
         public Shard parseColumn( final Column<Long> column ) {
-            return new Shard(column.getName(), column.getTimestamp());
+            return new Shard( column.getName(), column.getTimestamp() );
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
index 5cece93..c0a44a9 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
@@ -21,6 +21,8 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
 
 
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
 import java.util.Iterator;
 import java.util.List;
 
@@ -33,9 +35,8 @@ import org.apache.usergrid.persistence.graph.exception.GraphRuntimeException;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardAllocation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.count.NodeShardCounterSerialization;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.count.ShardKey;
 import org.apache.usergrid.persistence.model.entity.Id;
 
 import com.google.common.base.Optional;
@@ -51,84 +52,92 @@ import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
 public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
+    private static final MinShardTimeComparator MIN_SHARD_TIME_COMPARATOR = new MinShardTimeComparator();
+
     private final EdgeShardSerialization edgeShardSerialization;
 //    private final NodeShardCounterSerialization edgeShardCounterSerialization;
     private final NodeShardApproximation nodeShardApproximation;
     private final TimeService timeService;
     private final GraphFig graphFig;
-
+    private final Keyspace keyspace;
 
     @Inject
     public NodeShardAllocationImpl( final EdgeShardSerialization edgeShardSerialization,
-                                    final NodeShardApproximation nodeShardApproximation,
-                                    final TimeService timeService, final GraphFig graphFig ) {
+                                    final NodeShardApproximation nodeShardApproximation, final TimeService timeService,
+                                    final GraphFig graphFig, final Keyspace keyspace ) {
         this.edgeShardSerialization = edgeShardSerialization;
         this.nodeShardApproximation = nodeShardApproximation;
         this.timeService = timeService;
         this.graphFig = graphFig;
+        this.keyspace = keyspace;
     }
 
 
     @Override
-    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, Optional<Shard> maxShardId, final String... edgeTypes ) {
+    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final Optional<Shard> maxShardId,
+                                            final String... edgeTypes ) {
 
         final Iterator<Shard> existingShards =
-                edgeShardSerialization.getEdgeMetaData( scope, nodeId, maxShardId, edgeTypes );
+                edgeShardSerialization.getEdgeMetaData( scope, nodeId,nodeType, maxShardId, edgeTypes );
 
         final PushbackIterator<Shard> pushbackIterator = new PushbackIterator( existingShards );
-//
-//
-//        final long now = timeService.getCurrentTime();
-//
-//
-//        final List<Long> futures = new ArrayList<Long>();
-//
-//
-//        //loop through all shards, any shard > now+1 should be deleted
-//        while ( pushbackIterator.hasNext() ) {
-//
-//            final Long value = pushbackIterator.next();
-//
-//            //we're done, our current time uuid is greater than the value stored
-//            if ( now >= value ) {
-//                //push it back into the iterator
-//                pushbackIterator.pushback( value );
-//                break;
-//            }
-//
-//            futures.add( value );
-//        }
-//
-//
-//        //we have more than 1 future value, we need to remove it
-//
-//        MutationBatch cleanup = keyspace.prepareMutationBatch();
-//
-//        //remove all futures except the last one, it is the only value we shouldn't lazy remove
-//        for ( int i = 0; i < futures.size() -1; i++ ) {
-//            final long toRemove = futures.get( i );
-//
-//            final MutationBatch batch = edgeShardSerialization.removeEdgeMeta( scope, nodeId, toRemove, edgeTypes );
-//
-//            cleanup.mergeShallow( batch );
-//        }
-//
-//
-//        try {
-//            cleanup.execute();
-//        }
-//        catch ( ConnectionException e ) {
-//            throw new GraphRuntimeException( "Unable to remove future shards, mutation error", e );
-//        }
-//
-//
-//        final int futuresSize =  futures.size();
-//
-//        if ( futuresSize > 0 ) {
-//            pushbackIterator.pushback( futures.get( futuresSize - 1 ) );
-//        }
-//
-//
+
+
+        final long minConflictTime = getMinTime();
+
+
+        final List<Shard> futures = new ArrayList<>();
+
+
+        //loop through all shards, any shard > now+1 should be deleted
+        while ( pushbackIterator.hasNext() ) {
+
+            final Shard shard = pushbackIterator.next();
+
+            //we're done, our current time uuid is greater than the value stored
+            if ( shard.getCreatedTime() < minConflictTime  ) {
+                //push it back into the iterator
+                pushbackIterator.pushback( shard );
+                break;
+            }
+
+            futures.add( shard );
+        }
+
+
+        //clean up our future
+        Collections.sort(futures, MIN_SHARD_TIME_COMPARATOR);
+
+
+        //we have more than 1 future value, we need to remove it
+
+        MutationBatch cleanup = keyspace.prepareMutationBatch();
+
+        //remove all futures except the last one, it is the only value we shouldn't lazy remove
+        for ( int i = 1; i < futures.size() ; i++ ) {
+            final Shard toRemove = futures.get( i );
+
+            final MutationBatch batch = edgeShardSerialization.removeEdgeMeta( scope, nodeId, nodeType, toRemove.getShardIndex(), edgeTypes );
+
+            cleanup.mergeShallow( batch );
+        }
+
+
+        try {
+            cleanup.execute();
+        }
+        catch ( ConnectionException e ) {
+            throw new GraphRuntimeException( "Unable to remove future shards, mutation error", e );
+        }
+
+
+        final int futuresSize =  futures.size();
+
+        if ( futuresSize > 0 ) {
+            pushbackIterator.pushback( futures.get( 0 ) );
+        }
+
+
         /**
          * Nothing to iterate, return an iterator with 0.
          */
@@ -141,9 +150,9 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
     @Override
-    public boolean auditMaxShard( final ApplicationScope scope, final Id nodeId, final String... edgeType ) {
+    public boolean auditMaxShard( final ApplicationScope scope, final Id nodeId,final NodeType nodeType,  final String... edgeType ) {
 
-        final Iterator<Shard> maxShards = getShards( scope, nodeId, Optional.<Shard>absent(), edgeType );
+        final Iterator<Shard> maxShards = getShards( scope, nodeId, nodeType, Optional.<Shard>absent(), edgeType );
 
 
         //if the first shard has already been allocated, do nothing.
@@ -160,7 +169,7 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
          */
 
 
-        final long count = nodeShardApproximation.getCount( scope, nodeId, maxShard.getShardIndex(), edgeType );
+        final long count = nodeShardApproximation.getCount( scope, nodeId, nodeType,  maxShard.getShardIndex(), edgeType );
 
         if ( count < graphFig.getShardSize() ) {
             return false;
@@ -168,18 +177,42 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
         //try to get a lock here, and fail if one isn't present
 
-        final long newShardTime = timeService.getCurrentTime() + graphFig.getShardCacheTimeout() * 2;
+//        final long newShardTime = timeService.getCurrentTime() + graphFig.getShardCacheTimeout() * 2;
+//
+//
+//        try {
+//            this.edgeShardSerialization.writeEdgeMeta( scope, nodeId, newShardTime, edgeType ).execute();
+//        }
+//        catch ( ConnectionException e ) {
+//            throw new GraphRuntimeException( "Unable to write the new edge metadata" );
+//        }
 
 
-        try {
-            this.edgeShardSerialization.writeEdgeMeta( scope, nodeId, newShardTime, edgeType ).execute();
-        }
-        catch ( ConnectionException e ) {
-            throw new GraphRuntimeException( "Unable to write the new edge metadata" );
-        }
+        return true;
+    }
 
 
-        return true;
+    @Override
+    public long getMinTime() {
+        return timeService.getCurrentTime() - (2 * graphFig.getShardCacheTimeout());
     }
 
+
+    /**
+     * Sorts by minimum time first.  If 2 times are equal, the min shard value is taken
+     */
+    private static final class MinShardTimeComparator implements Comparator<Shard> {
+
+            @Override
+            public int compare( final Shard s1, final Shard s2 ) {
+                int result =  Long.compare( s1.getCreatedTime(), s2.getCreatedTime() );
+
+                if(result == 0){
+                    result = Long.compare( s1.getShardIndex(), s2.getShardIndex() );
+                }
+
+                return result;
+            }
+        }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
index eeefb3a..a0bb0ef 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
@@ -25,7 +25,6 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.TreeSet;
-import java.util.UUID;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
@@ -34,6 +33,7 @@ import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.exception.GraphRuntimeException;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardAllocation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardCache;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
 import org.apache.usergrid.persistence.graph.serialization.util.IterableUtil;
@@ -95,11 +95,11 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
     @Override
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, final long timestamp,
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long timestamp,
                                final String... edgeType ) {
 
 
-        final CacheKey key = new CacheKey( scope, nodeId, edgeType );
+        final CacheKey key = new CacheKey( scope, nodeId, nodeType, edgeType );
         CacheEntry entry;
 
         try {
@@ -121,9 +121,9 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
     @Override
-    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, final long maxTimestamp,
+    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long maxTimestamp,
                                      final String... edgeType ) {
-        final CacheKey key = new CacheKey( scope, nodeId, edgeType );
+        final CacheKey key = new CacheKey( scope, nodeId, nodeType, edgeType );
               CacheEntry entry;
 
               try {
@@ -165,7 +165,7 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
                           final Iterator<Shard> edges = nodeShardAllocation
-                                  .getShards( key.scope, key.id, Optional.<Shard>absent(), key.types );
+                                  .getShards( key.scope, key.id, key.nodeType,  Optional.<Shard>absent(), key.types );
 
                           return new CacheEntry( edges );
                       }
@@ -179,12 +179,14 @@ public class NodeShardCacheImpl implements NodeShardCache {
     private static class CacheKey {
         private final ApplicationScope scope;
         private final Id id;
+        private final NodeType nodeType;
         private final String[] types;
 
 
-        private CacheKey( final ApplicationScope scope, final Id id, final String[] types ) {
+        private CacheKey( final ApplicationScope scope, final Id id, final NodeType nodeType, final String[] types ) {
             this.scope = scope;
             this.id = id;
+            this.nodeType = nodeType;
             this.types = types;
         }
 
@@ -203,6 +205,12 @@ public class NodeShardCacheImpl implements NodeShardCache {
             if ( !id.equals( cacheKey.id ) ) {
                 return false;
             }
+            if ( nodeType != cacheKey.nodeType ) {
+                return false;
+            }
+            if ( !scope.equals( cacheKey.scope ) ) {
+                return false;
+            }
             if ( !Arrays.equals( types, cacheKey.types ) ) {
                 return false;
             }
@@ -213,7 +221,9 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
         @Override
         public int hashCode() {
-            int result = id.hashCode();
+            int result = scope.hashCode();
+            result = 31 * result + id.hashCode();
+            result = 31 * result + nodeType.hashCode();
             result = 31 * result + Arrays.hashCode( types );
             return result;
         }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
index 17791cb..7a55c53 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
@@ -26,6 +26,7 @@ import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardStrategy;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardCache;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
 import org.apache.usergrid.persistence.model.entity.Id;
 
@@ -53,23 +54,23 @@ public class SizebasedEdgeShardStrategy implements EdgeShardStrategy {
 
 
     @Override
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId, final long timestamp,
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId,  final NodeType nodeType, final long timestamp,
                                 final String... types ) {
-        return shardCache.getWriteShards( scope, rowKeyId, timestamp, types );
+        return shardCache.getWriteShards( scope, rowKeyId, nodeType, timestamp, types );
     }
 
 
     @Override
-    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id rowKeyId, final long maxTimestamp,
+    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id rowKeyId,   final NodeType nodeType,final long maxTimestamp,
                                          final String... types ) {
-        return shardCache.getReadShards( scope, rowKeyId, maxTimestamp, types );
+        return shardCache.getReadShards( scope, rowKeyId, nodeType, maxTimestamp, types );
     }
 
 
     @Override
-    public void increment( final ApplicationScope scope, final Id rowKeyId, final long shardId, final long count,
+    public void increment( final ApplicationScope scope, final Id rowKeyId,  final NodeType nodeType, final long shardId, final long count,
                            final String... types ) {
-        shardApproximation.increment( scope, rowKeyId, shardId, count, types );
+        shardApproximation.increment( scope, rowKeyId, nodeType, shardId, count, types );
     }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/util/EdgeHasher.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/util/EdgeHasher.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/util/EdgeHasher.java
deleted file mode 100644
index 14a67fa..0000000
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/util/EdgeHasher.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.usergrid.persistence.graph.serialization.util;
-
-
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-
-import org.apache.cassandra.utils.MurmurHash;
-
-import org.apache.usergrid.persistence.model.entity.Id;
-
-
-/**
- *
- *
- */
-public class EdgeHasher {
-
-    private static final String UTF_8 = "UTF-8";
-    private static final Charset CHARSET = Charset.forName( UTF_8 );
-
-
-    /**
-     * Create a hash based on the edge type and the type of the id that will be inserted into the column
-     *
-     *
-     * @param edgeType The name of the edge type
-     * @param idForColumn The id of the value that will be in the column
-     *
-     * @return A hash that represents a consistent one way hash of the fields
-     */
-    public static long[] createEdgeHash( final String edgeType, final Id idForColumn ) {
-
-        return createEdgeHash( edgeType, idForColumn.getType() );
-    }
-
-
-    /**
-     * Create the edge hash from the edge type and id type
-     * @param edgeTypes
-     * @return
-     */
-    public static long[] createEdgeHash(final String... edgeTypes){
-       final StringBuilder hashString =  new StringBuilder();
-
-        for(String edge: edgeTypes){
-            hashString.append(edge);
-        }
-
-        return createEdgeHash( hashString.toString() );
-    }
-
-
-    /**
-     * Create a ash based on the edge type and the type of the id that will be inserted into the column
-     *
-     * @return A hash that represents a consistent one way hash of the fields
-     */
-    public static long[] createEdgeHash( final String edgeType ) {
-
-
-        ByteBuffer key = ByteBuffer.wrap( edgeType.getBytes( CHARSET ) );
-
-        return  MurmurHash.hash3_x64_128( key, key.position(), key.remaining(), 0 );
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
index 89d89b8..6135a2d 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
@@ -92,33 +92,55 @@ public class EdgeShardSerializationTest {
 
         final long slice3 = slice2 * 2;
 
+        final long timestamp = 10000l;
+
         String[] types = { "edgeType", "subType" };
 
-        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, slice1, types );
+        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, slice1, timestamp, types );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice2, types ) );
+        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice2, timestamp, types ) );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice3, types ) );
+        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice3, timestamp, types ) );
 
         batch.execute();
 
 
         Iterator<Shard> results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Shard>absent(), types );
 
-        assertEquals( slice3, results.next().getShardIndex() );
+        Shard next = results.next();
 
-        assertEquals( slice2, results.next().getShardIndex() );
+        assertEquals( slice3, next.getShardIndex() );
 
-        assertEquals( slice1, results.next().getShardIndex() );
+        assertEquals( timestamp, next.getCreatedTime());
+
+        next = results.next();
+
+
+        assertEquals( slice2, next.getShardIndex() );
+
+        assertEquals( timestamp, next.getCreatedTime());
+
+        next = results.next();
+
+
+        assertEquals( slice1, next.getShardIndex() );
+
+        assertEquals( timestamp, next.getCreatedTime());
 
         assertFalse( results.hasNext() );
 
         //test paging and size
         results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.of( new Shard( slice2, 0l ) ), types );
 
-        assertEquals( slice2, results.next().getShardIndex() );
+        next = results.next();
 
-        assertEquals( slice1, results.next().getShardIndex() );
+        assertEquals( slice2, next.getShardIndex() );
+        assertEquals( timestamp, next.getCreatedTime());
+
+        next = results.next();
+
+        assertEquals( slice1, next.getShardIndex() );
+        assertEquals( timestamp, next.getCreatedTime());
 
 
         assertFalse( results.hasNext() );
@@ -136,13 +158,15 @@ public class EdgeShardSerializationTest {
 
         final long slice3 = slice2 * 2;
 
+        final long timestamp = 10000l;
+
         String[] types = { "edgeType", "subType" };
 
-        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, slice1, types );
+        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, slice1, timestamp, types );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice2, types ) );
+        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice2, timestamp,types ) );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice3, types ) );
+        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, slice3, timestamp, types ) );
 
         batch.execute();
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
index 6824e74..7546531 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
@@ -77,6 +77,37 @@ public class NodeShardAllocationTest {
     }
 
 
+
+    @Test
+    public void minTime() {
+        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+
+        final NodeShardApproximation nodeShardCounterSerialization =
+                mock( NodeShardApproximation.class );
+
+
+        final TimeService timeService = mock( TimeService.class );
+
+        final Keyspace keyspace = mock( Keyspace.class );
+
+
+        NodeShardAllocation approximation =
+                new NodeShardAllocationImpl( edgeShardSerialization, nodeShardCounterSerialization, timeService,
+                        graphFig, keyspace );
+
+
+        final long timeservicetime = System.currentTimeMillis();
+
+        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+
+        final long expected = timeservicetime - 2 * graphFig.getShardCacheTimeout();
+
+        final long returned = approximation.getMinTime();
+
+        assertEquals("Correct time was returned", expected, returned);
+    }
+
+
     @Test
     public void noShards() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
@@ -95,7 +126,7 @@ public class NodeShardAllocationTest {
 
         NodeShardAllocation approximation =
                 new NodeShardAllocationImpl( edgeShardSerialization, nodeShardCounterSerialization, timeService,
-                        graphFig );
+                        graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -115,7 +146,7 @@ public class NodeShardAllocationTest {
 
 
     @Test
-    public void existingFutureShard() {
+    public void existingFutureShardSameTime() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
 
         final NodeShardApproximation nodeShardCounterSerialization =
@@ -134,7 +165,7 @@ public class NodeShardAllocationTest {
 
         NodeShardAllocation approximation =
                 new NodeShardAllocationImpl( edgeShardSerialization, nodeShardCounterSerialization, timeService,
-                        graphFig );
+                        graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -145,7 +176,7 @@ public class NodeShardAllocationTest {
 
         when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
 
-        final Shard futureShard =  new Shard(timeservicetime + graphFig.getShardCacheTimeout() * 2, timeservicetime) ;
+        final Shard futureShard =  new Shard(10000l, timeservicetime) ;
 
         /**
          * Mock up returning a min shard, and a future shard
@@ -179,7 +210,7 @@ public class NodeShardAllocationTest {
 
         NodeShardAllocation approximation =
                 new NodeShardAllocationImpl( edgeShardSerialization, nodeShardApproximation, timeService,
-                        graphFig );
+                        graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -203,8 +234,7 @@ public class NodeShardAllocationTest {
 
         final long count = graphFig.getShardSize() - 1;
 
-        when( nodeShardApproximation.getCount(scope, nodeId, 0l, type, subType ))
-                                           .thenReturn( count );
+        when( nodeShardApproximation.getCount(scope, nodeId, 0l, type, subType )).thenReturn( count );
 
         final boolean result = approximation.auditMaxShard( scope, nodeId, type, subType );
 
@@ -231,7 +261,7 @@ public class NodeShardAllocationTest {
 
         NodeShardAllocation approximation =
                 new NodeShardAllocationImpl( edgeShardSerialization, nodeShardApproximation, timeService,
-                        graphFig );
+                        graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -258,12 +288,13 @@ public class NodeShardAllocationTest {
                 .getCount(   scope , nodeId, 0l,type , subType  ))
                 .thenReturn( shardCount );
 
-        ArgumentCaptor<Long> newUUIDValue = ArgumentCaptor.forClass( Long.class );
+        ArgumentCaptor<Long> shardValue = ArgumentCaptor.forClass( Long.class );
+        ArgumentCaptor<Long> timestampValue = ArgumentCaptor.forClass( Long.class );
 
 
         //mock up our mutation
         when( edgeShardSerialization
-                .writeEdgeMeta( same( scope ), same( nodeId ), newUUIDValue.capture(), same( type ), same( subType ) ) )
+                .writeEdgeMeta( same( scope ), same( nodeId ), shardValue.capture(), timestampValue.capture(), same( type ), same( subType ) ) )
                 .thenReturn( mock( MutationBatch.class ) );
 
 
@@ -273,13 +304,16 @@ public class NodeShardAllocationTest {
 
         //check our new allocated UUID
 
-        final long expectedTime = timeservicetime + 2 * graphFig.getShardCacheTimeout();
 
-        final long savedTimestamp = newUUIDValue.getValue();
+        final long savedTimestamp = timestampValue.getValue();
+
+
 
 
 
-        assertEquals( "Expected at 2x timeout generated", expectedTime, savedTimestamp );
+        assertEquals( "Expected time service time", timeservicetime, savedTimestamp );
+
+        //now check our max value was set
     }
 
 
@@ -304,7 +338,7 @@ public class NodeShardAllocationTest {
 
         NodeShardAllocation approximation =
                 new NodeShardAllocationImpl( edgeShardSerialization, nodeShardApproximation, timeService,
-                        graphFig );
+                        graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -333,14 +367,13 @@ public class NodeShardAllocationTest {
         /**
          * Simulate slow node
          */
-        final Shard futureShard1 = new Shard(futureTime - 1, timeservicetime);
-
-        final Shard futureShard2 = new Shard(futureTime + 10000, timeservicetime);
 
-        final Shard futureShard3 = new Shard(futureShard2.getShardIndex() + 10000, timeservicetime);
+        //our second shard is the "oldest", and hence should be returned in the iterator.  Future shard 1 and 3 should be removed
+        final Shard futureShard1 = new Shard(futureTime - 1, timeservicetime+1000);
 
+        final Shard futureShard2 = new Shard(futureTime + 10000, timeservicetime);
 
-        final int pageSize = 100;
+        final Shard futureShard3 = new Shard(futureShard2.getShardIndex() + 10000, timeservicetime+2000);
 
         /**
          * Mock up returning a min shard
@@ -363,12 +396,12 @@ public class NodeShardAllocationTest {
 
 
         final Iterator<Shard>
-                result = approximation.getShards( scope, nodeId, Optional.<Shard>absent(), type, subType );
+                result = approximation.getSourceShards( scope, nodeId, Optional.<Shard>absent(), type, subType );
 
 
         assertTrue( "Shards present", result.hasNext() );
 
-        assertEquals("Only single next shard returned", futureShard1,  result.next());
+        assertEquals("Only single next shard returned", futureShard2,  result.next());
 
         assertTrue("Shards present", result.hasNext());
 
@@ -384,8 +417,8 @@ public class NodeShardAllocationTest {
 
         assertEquals("2 values removed", 2,  values.size());
 
-        assertEquals("Deleted Max Future", futureShard3, values.get( 0 ).longValue());
-        assertEquals("Deleted Next Future", futureShard2, values.get( 1 ).longValue());
+        assertEquals("Deleted Max Future", futureShard1.getShardIndex(), values.get( 0 ).longValue());
+        assertEquals("Deleted Next Future", futureShard3.getShardIndex(), values.get( 1 ).longValue());
 
     }
 
@@ -410,7 +443,7 @@ public class NodeShardAllocationTest {
 
         NodeShardAllocation approximation =
                 new NodeShardAllocationImpl( edgeShardSerialization, nodeShardApproximation, timeService,
-                        graphFig );
+                        graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -423,7 +456,8 @@ public class NodeShardAllocationTest {
                 .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
                         same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
 
-        final Iterator<Shard> result = approximation.getShards( scope, nodeId, Optional.<Shard>absent(), type, subType );
+        final Iterator<Shard> result = approximation.getSourceShards( scope, nodeId, Optional.<Shard>absent(), type,
+                subType );
 
         assertEquals("0 shard allocated", 0l, result.next().getShardIndex());
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/72f0d7d8/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java
index a8fdbc4..4934037 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java
@@ -92,7 +92,7 @@ public class NodeShardCacheTest {
 //         * Simulate returning no shards at all.
 //         */
 //        when( allocation
-//                .getShards( same( scope ), same( id ), same( max), same( edgeType ),
+//                .getSourceShards( same( scope ), same( id ), same( max), same( edgeType ),
 //                        same( otherIdType ) ) )
 //                .thenReturn( Collections.singletonList( 0l ).iterator() );
 //
@@ -139,7 +139,7 @@ public class NodeShardCacheTest {
 //        /**
 //         * Simulate returning single shard
 //         */
-//        when( allocation.getShards( same( scope ), same( id ), same(max),
+//        when( allocation.getSourceShards( same( scope ), same( id ), same(max),
 //                same( edgeType ), same( otherIdType ) ) ).thenReturn( Collections.singletonList( min ).iterator() );
 //
 //
@@ -188,7 +188,7 @@ public class NodeShardCacheTest {
 //        /**
 //         * Simulate returning all shards
 //         */
-//        when( allocation.getShards( same( scope ), same( id ), any( Optional.class ),
+//        when( allocation.getSourceShards( same( scope ), same( id ), any( Optional.class ),
 //                same( edgeType ), same( otherIdType ) ) ).thenReturn( Arrays.asList( min, mid, max ).iterator() );
 //
 //
@@ -284,14 +284,14 @@ public class NodeShardCacheTest {
 //        /**
 //         * Simulate returning all shards
 //         */
-//        when( allocation.getShards( same( scope ), same( id ),  any(Optional.class),
+//        when( allocation.getSourceShards( same( scope ), same( id ),  any(Optional.class),
 //                same( edgeType ), same( otherIdType ) ) ).thenReturn( Arrays.asList( min, mid, max ).iterator() );
 //
 //
 //        //check getting equal to our min, mid and max
 //
 //        Iterator<Long> slice =
-//                cache.getShards( scope, id, max, edgeType, otherIdType );
+//                cache.getSourceShards( scope, id, max, edgeType, otherIdType );
 //
 //
 //        assertEquals( max, slice.next().longValue() );
@@ -299,13 +299,13 @@ public class NodeShardCacheTest {
 //        assertEquals( min, slice.next().longValue() );
 //
 //
-//        slice = cache.getShards( scope, id, mid, edgeType, otherIdType );
+//        slice = cache.getSourceShards( scope, id, mid, edgeType, otherIdType );
 //
 //        assertEquals( mid, slice.next().longValue() );
 //        assertEquals( min, slice.next().longValue() );
 //
 //
-//        slice = cache.getShards( scope, id, min, edgeType, otherIdType );
+//        slice = cache.getSourceShards( scope, id, min, edgeType, otherIdType );
 //
 //        assertEquals( min, slice.next().longValue() );
 //


[02/11] git commit: WIP overwrite

Posted by to...@apache.org.
WIP overwrite


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/b1434ddf
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/b1434ddf
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/b1434ddf

Branch: refs/heads/USERGRID-188
Commit: b1434ddfff6bbb9b7e65a007071b258c4d7234bb
Parents: 4cfc849
Author: Todd Nine <tn...@apigee.com>
Authored: Wed Jul 2 09:35:36 2014 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Tue Jul 8 14:42:39 2014 -0600

----------------------------------------------------------------------
 .../impl/EdgeSerializationImpl.java             | 142 ++---
 .../impl/shard/EdgeShardSerialization.java      |   2 +-
 .../impl/shard/EdgeShardStrategy.java           |   5 +-
 .../impl/shard/NodeShardAllocation.java         |   2 +-
 .../impl/shard/NodeShardCache.java              |  10 +-
 .../graph/serialization/impl/shard/Shard.java   | 103 ++++
 .../serialization/impl/shard/ShardEntries.java  |  45 ++
 .../shard/impl/EdgeShardSerializationImpl.java  |  13 +-
 .../shard/impl/NodeShardAllocationImpl.java     |  15 +-
 .../impl/shard/impl/NodeShardCacheImpl.java     |  39 +-
 .../shard/impl/SizebasedEdgeShardStrategy.java  |  11 +-
 .../impl/shard/EdgeShardSerializationTest.java  |  30 +-
 .../impl/shard/NodeShardAllocationTest.java     |  35 +-
 .../impl/shard/NodeShardCacheTest.java          | 516 +++++++++----------
 14 files changed, 573 insertions(+), 395 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
index 10d1048..7c630c1 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
@@ -55,6 +55,8 @@ import org.apache.usergrid.persistence.graph.SearchByIdType;
 import org.apache.usergrid.persistence.graph.impl.SimpleMarkedEdge;
 import org.apache.usergrid.persistence.graph.serialization.EdgeSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardStrategy;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
 import org.apache.usergrid.persistence.graph.serialization.util.EdgeHasher;
 import org.apache.usergrid.persistence.graph.serialization.util.EdgeUtils;
 import org.apache.usergrid.persistence.model.entity.Id;
@@ -65,7 +67,6 @@ import com.google.inject.Singleton;
 import com.netflix.astyanax.Keyspace;
 import com.netflix.astyanax.MutationBatch;
 import com.netflix.astyanax.Serializer;
-import com.netflix.astyanax.model.AbstractComposite;
 import com.netflix.astyanax.model.Column;
 import com.netflix.astyanax.model.CompositeBuilder;
 import com.netflix.astyanax.model.CompositeParser;
@@ -73,8 +74,6 @@ import com.netflix.astyanax.model.DynamicComposite;
 import com.netflix.astyanax.query.RowQuery;
 import com.netflix.astyanax.serializers.AbstractSerializer;
 import com.netflix.astyanax.serializers.LongSerializer;
-import com.netflix.astyanax.serializers.StringSerializer;
-import com.netflix.astyanax.serializers.UUIDSerializer;
 import com.netflix.astyanax.util.RangeBuilder;
 
 import static com.google.common.base.Preconditions.checkNotNull;
@@ -213,7 +212,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
             @Override
             public void countEdge( final Id rowId, final long shardId, final String... types ) {
-                if(!isDeleted){
+                if ( !isDeleted ) {
                     edgeShardStrategy.increment( scope, rowId, shardId, 1l, types );
                 }
             }
@@ -291,62 +290,82 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
          * Key in the serializers based on the edge
          */
 
-        final long sourceRowKeyShard = edgeShardStrategy.getWriteShard( scope, sourceNodeId, timestamp, type );
-        final RowKey sourceRowKey = new RowKey( sourceNodeId, type, sourceRowKeyShard);
 
+        /**
+         * write edges from source->target
+         */
 
 
-        final long sourceWithTypeRowKeyShard =  edgeShardStrategy.getWriteShard( scope, sourceNodeId, timestamp, type, targetNodeType );
+        final DirectedEdge sourceEdge = new DirectedEdge( targetNodeId, timestamp );
 
-        final RowKeyType sourceRowKeyType = new RowKeyType( sourceNodeId, type, targetNodeId, sourceWithTypeRowKeyShard );
+        final ShardEntries sourceRowKeyShard = edgeShardStrategy.getWriteShards( scope, sourceNodeId, timestamp, type );
 
-        final DirectedEdge sourceEdge = new DirectedEdge( targetNodeId, timestamp );
 
+        for ( Shard shard : sourceRowKeyShard.getEntries() ) {
 
+            final long shardId = shard.getShardIndex();
+            final RowKey sourceRowKey = new RowKey( sourceNodeId, type, shardId );
+            op.writeEdge( sourceNodeEdgesCf, sourceRowKey, sourceEdge );
+            op.countEdge( sourceNodeId, shardId, type );
+        }
 
-        final long targetRowKeyShard = edgeShardStrategy.getWriteShard( scope, targetNodeId, timestamp, type );
-        final RowKey targetRowKey = new RowKey( targetNodeId, type, targetRowKeyShard);
 
-        final long targetWithTypeRowKeyShard = edgeShardStrategy.getWriteShard( scope, targetNodeId, timestamp, type, souceNodeType );
-        final RowKeyType targetRowKeyType = new RowKeyType( targetNodeId, type, sourceNodeId, targetWithTypeRowKeyShard );
+        final ShardEntries sourceWithTypeRowKeyShard =
+                edgeShardStrategy.getWriteShards( scope, sourceNodeId, timestamp, type, targetNodeType );
 
-        final DirectedEdge targetEdge = new DirectedEdge( sourceNodeId, timestamp );
+        for ( Shard shard : sourceWithTypeRowKeyShard.getEntries() ) {
 
+            final long shardId = shard.getShardIndex();
+            final RowKeyType sourceRowKeyType = new RowKeyType( sourceNodeId, type, targetNodeId, shardId );
 
-        final EdgeRowKey edgeRowKey = new EdgeRowKey( sourceNodeId, type, targetNodeId, edgeShardStrategy
-                .getWriteShard( scope, sourceNodeId, timestamp, type, targetNodeId.getUuid().toString(),
-                        targetNodeId.getType() ) );
+            op.writeEdge( sourceNodeTargetTypeCf, sourceRowKeyType, sourceEdge );
+            op.countEdge( sourceNodeId, shardId, type, targetNodeType );
+        }
 
 
         /**
-         * write edges from source->target
+         * write edges from target<-source
          */
 
-        op.writeEdge( sourceNodeEdgesCf, sourceRowKey, sourceEdge );
-        op.countEdge( sourceNodeId, sourceRowKeyShard, type );
+        final DirectedEdge targetEdge = new DirectedEdge( sourceNodeId, timestamp );
+
+
+        final ShardEntries targetRowKeyShard = edgeShardStrategy.getWriteShards( scope, targetNodeId, timestamp, type );
+
+        for ( Shard shard : targetRowKeyShard.getEntries() ) {
+            final long shardId = shard.getShardIndex();
+            final RowKey targetRowKey = new RowKey( targetNodeId, type, shardId );
+
+            op.writeEdge( targetNodeEdgesCf, targetRowKey, targetEdge );
+            op.countEdge( targetNodeId, shardId, type );
+        }
+
+
+        final ShardEntries targetWithTypeRowKeyShard =
+                edgeShardStrategy.getWriteShards( scope, targetNodeId, timestamp, type, souceNodeType );
+
 
-        op.writeEdge( sourceNodeTargetTypeCf, sourceRowKeyType, sourceEdge );
-        op.countEdge( sourceNodeId, sourceWithTypeRowKeyShard, type, targetNodeType );
+        for ( Shard shard : targetWithTypeRowKeyShard.getEntries() ) {
 
+            final long shardId = shard.getShardIndex();
 
+            final RowKeyType targetRowKeyType = new RowKeyType( targetNodeId, type, sourceNodeId, shardId );
 
 
+            op.writeEdge( targetNodeSourceTypeCf, targetRowKeyType, targetEdge );
+            op.countEdge( targetNodeId, shardId, type, souceNodeType );
+        }
+
         /**
-         * write edges from target<-source
+         * Always a 0l shard, we're hard limiting 2b timestamps for the same edge
          */
-        op.writeEdge( targetNodeEdgesCf, targetRowKey, targetEdge );
-        op.countEdge( targetNodeId, targetRowKeyShard, type );
-
-        op.writeEdge( targetNodeSourceTypeCf, targetRowKeyType, targetEdge );
-        op.countEdge( targetNodeId, targetWithTypeRowKeyShard, type, souceNodeType );
+        final EdgeRowKey edgeRowKey = new EdgeRowKey( sourceNodeId, type, targetNodeId, 0l);
 
 
         /**
          * Write this in the timestamp log for this edge of source->target
          */
         op.writeVersion( graphEdgeVersionsCf, edgeRowKey, timestamp );
-
-
     }
 
 
@@ -622,10 +641,10 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
      */
     private static class EdgeSerializer extends AbstractSerializer<DirectedEdge> {
 
-                private static final IdColDynamicCompositeSerializer ID_COL_SERIALIZER =  IdColDynamicCompositeSerializer.get();
-//        private static final UUIDSerializer UUID_SERIALIZER = UUIDSerializer.get();
-//        private static final StringSerializer STRING_SERIALIZER = StringSerializer.get().getString(;
-//        )
+        private static final IdColDynamicCompositeSerializer ID_COL_SERIALIZER = IdColDynamicCompositeSerializer.get();
+        //        private static final UUIDSerializer UUID_SERIALIZER = UUIDSerializer.get();
+        //        private static final StringSerializer STRING_SERIALIZER = StringSerializer.get().getString(;
+        //        )
 
 
         @Override
@@ -633,20 +652,20 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
             DynamicComposite composite = new DynamicComposite();
 
-//            //add our edge
-//            composite.addComponent( edge.timestamp, LONG_SERIALIZER, LONG_TYPE_REVERSED,
-//                    AbstractComposite.ComponentEquality.EQUAL );
+            //            //add our edge
+            //            composite.addComponent( edge.timestamp, LONG_SERIALIZER, LONG_TYPE_REVERSED,
+            //                    AbstractComposite.ComponentEquality.EQUAL );
 
-//            //we do this explicity instead of re-using the id composite serializer b/c we want high order
-//            //time uuids first, not second. In this column family, there is no sort
-//            composite.addComponent( edge.id.getUuid(), UUID_SERIALIZER, UUID_TYPE_REVERSED,
-//                    AbstractComposite.ComponentEquality.EQUAL );
-//
-//            composite.addComponent( edge.id.getType(), STRING_SERIALIZER );
+            //            //we do this explicity instead of re-using the id composite serializer b/c we want high order
+            //            //time uuids first, not second. In this column family, there is no sort
+            //            composite.addComponent( edge.id.getUuid(), UUID_SERIALIZER, UUID_TYPE_REVERSED,
+            //                    AbstractComposite.ComponentEquality.EQUAL );
+            //
+            //            composite.addComponent( edge.id.getType(), STRING_SERIALIZER );
 
             composite.addComponent( edge.timestamp, LONG_SERIALIZER );
 
-            ID_COL_SERIALIZER.toComposite( composite, edge.id);
+            ID_COL_SERIALIZER.toComposite( composite, edge.id );
 
             return composite.serialize();
         }
@@ -678,24 +697,24 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
     private static class RowKey {
         public final Id nodeId;
         public final long[] hash;
-        public final long shard;
+        public final long shardId;
 
 
         /**
          * Create a row key with the node and the edgeType
          */
-        public RowKey( Id nodeId, String edgeType, final long shard ) {
-            this( nodeId, EdgeHasher.createEdgeHash( edgeType ), shard );
+        public RowKey( Id nodeId, String edgeType, final long shardId ) {
+            this( nodeId, EdgeHasher.createEdgeHash( edgeType ), shardId );
         }
 
 
         /**
          * Create a new row key with the hash, should only be used in deserialization or internal callers.
          */
-        protected RowKey( Id nodeId, long[] hash, final long shard ) {
+        protected RowKey( Id nodeId, long[] hash, final long shardId ) {
             this.nodeId = nodeId;
             this.hash = hash;
-            this.shard = shard;
+            this.shardId = shardId;
         }
     }
 
@@ -712,24 +731,24 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
          * @param edgeType The type of the edge
          * @param typeId The type of hte id
          */
-        public RowKeyType( final Id nodeId, final String edgeType, final Id typeId, final long shard ) {
-            this( nodeId, edgeType, typeId.getType(), shard );
+        public RowKeyType( final Id nodeId, final String edgeType, final Id typeId, final long shardId ) {
+            this( nodeId, edgeType, typeId.getType(), shardId );
         }
 
 
         /**
          * Create a row key with the node id in the row key, the edge type, adn the target type from the id
          */
-        public RowKeyType( final Id nodeId, final String edgeType, final String targetType, final long shard ) {
-            super( nodeId, EdgeHasher.createEdgeHash( edgeType, targetType ), shard );
+        public RowKeyType( final Id nodeId, final String edgeType, final String targetType, final long shardId ) {
+            super( nodeId, EdgeHasher.createEdgeHash( edgeType, targetType ), shardId );
         }
 
 
         /**
          * Internal use in de-serializing.  Should only be used in this case or by internal callers
          */
-        private RowKeyType( final Id nodeId, final long[] hash, final long shard ) {
-            super( nodeId, hash, shard );
+        private RowKeyType( final Id nodeId, final long[] hash, final long shardId ) {
+            super( nodeId, hash, shardId );
         }
     }
 
@@ -767,11 +786,11 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
         protected final Optional<Edge> last;
         protected final long maxTimestamp;
         protected final ApplicationScope scope;
-        protected final Iterator<Long> shards;
+        protected final Iterator<ShardEntries> shards;
 
 
         protected EdgeSearcher( final ApplicationScope scope, final long maxTimestamp, final Optional<Edge> last,
-                                final Iterator<Long> shards ) {
+                                final Iterator<ShardEntries> shards ) {
             this.scope = scope;
             this.maxTimestamp = maxTimestamp;
             this.last = last;
@@ -787,7 +806,10 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
         @Override
         public ScopedRowKey<ApplicationScope, R> next() {
-            return ScopedRowKey.fromKey( scope, generateRowKey( shards.next() ) );
+            /**
+             * Todo, multi scan
+             */
+            return ScopedRowKey.fromKey( scope, generateRowKey( shards.next().getEntries().iterator().next().getShardIndex() ) );
         }
 
 
@@ -875,7 +897,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
             builder.addLong( key.hash[0] );
             builder.addLong( key.hash[1] );
-            builder.addLong( key.shard );
+            builder.addLong( key.shardId );
         }
 
 
@@ -908,7 +930,7 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
 
             builder.addLong( keyType.hash[0] );
             builder.addLong( keyType.hash[1] );
-            builder.addLong( keyType.shard );
+            builder.addLong( keyType.shardId );
         }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
index d49cfdf..480b71d 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
@@ -52,7 +52,7 @@ public interface EdgeShardSerialization extends Migration{
      * @param types The types to use
      * @return
      */
-    public Iterator<Long> getEdgeMetaData(ApplicationScope scope, Id nodeId, Optional<Long> start,  String... types);
+    public Iterator<Shard> getEdgeMetaData(ApplicationScope scope, Id nodeId, Optional<Shard> start,  String... types);
 
     /**
      * Remove the shard from the edge meta data from the types.

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
index 09436ac..22c9470 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
@@ -37,7 +37,8 @@ public interface EdgeShardStrategy {
      * @param timestamp The timestamp on the edge
      * @param types The types in the edge
      */
-    public long getWriteShard(final ApplicationScope scope, final Id rowKeyId, final  long timestamp, final String... types );
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId, final long timestamp,
+                                           final String... types );
 
 
     /**
@@ -48,7 +49,7 @@ public interface EdgeShardStrategy {
      * @param maxTimestamp The max timestamp to use
      * @param types the types in the edge
      */
-    public Iterator<Long> getReadShards(final ApplicationScope scope,final  Id rowKeyId, final long maxTimestamp,final  String... types );
+    public Iterator<ShardEntries> getReadShards(final ApplicationScope scope,final  Id rowKeyId, final long maxTimestamp,final  String... types );
 
     /**
      * Increment our count meta data by the passed value.  Can be a positive or a negative number.

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
index 1097ced..6ecadbb 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
@@ -44,7 +44,7 @@ public interface NodeShardAllocation {
      * @param edgeTypes
      * @return A list of all shards <= the current shard.  This will always return 0l if no shards are allocated
      */
-    public Iterator<Long> getShards( final ApplicationScope scope, final Id nodeId, Optional<Long> maxShardId,
+    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, Optional<Shard> maxShardId,
                                      final String... edgeTypes );
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
index 667fdbf..3e1675a 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
@@ -35,21 +35,23 @@ public interface NodeShardCache {
 
 
     /**
-     * Get the time meta data for the given node
+     * Get the shard for the given timestamp
      * @param nodeId
      * @param timestamp The time to select the slice for.
      * @param edgeType
      */
-    public long getSlice(final ApplicationScope scope, final Id nodeId, final long timestamp, final String... edgeType);
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, final long timestamp,
+                                final String... edgeType );
 
     /**
-     * Get an iterator of all versions <= the version
+     * Get an iterator of all versions <= the version for iterating shard entry sets
      * @param scope
      * @param nodeId
      * @param maxTimestamp The highest timestamp
      * @param edgeType
      * @return
      */
-    public Iterator<Long> getVersions(final ApplicationScope scope, final Id nodeId, final long  maxTimestamp, final String... edgeType);
+    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, final long maxTimestamp,
+                                     final String... edgeType );
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
new file mode 100644
index 0000000..d0c3e01
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+public class Shard implements Comparable<Shard> {
+
+    private final long shardIndex;
+    private final long createdTime;
+
+
+    public Shard( final long shardIndex, final long createdTime ) {
+        this.shardIndex = shardIndex;
+        this.createdTime = createdTime;
+    }
+
+
+    /**
+     * Get the long shard index
+     */
+    public long getShardIndex() {
+        return shardIndex;
+    }
+
+
+    /**
+     * Get the timestamp in epoch millis this shard was created
+     */
+    public long getCreatedTime() {
+        return createdTime;
+    }
+
+
+    @Override
+    public int compareTo( final Shard o ) {
+        if ( o == null ) {
+            return 1;
+        }
+
+        if ( shardIndex > o.shardIndex ) {
+            return 1;
+        }
+
+        else if ( shardIndex == o.shardIndex ) {
+            if ( createdTime > o.createdTime ) {
+                return 1;
+            }
+            else if ( createdTime < o.createdTime ) {
+                return -1;
+            }
+
+            return 0;
+        }
+
+        return -1;
+    }
+
+
+    @Override
+    public boolean equals( final Object o ) {
+        if ( this == o ) {
+            return true;
+        }
+        if ( !( o instanceof Shard ) ) {
+            return false;
+        }
+
+        final Shard shard = ( Shard ) o;
+
+        if ( createdTime != shard.createdTime ) {
+            return false;
+        }
+        if ( shardIndex != shard.shardIndex ) {
+            return false;
+        }
+
+        return true;
+    }
+
+
+    @Override
+    public int hashCode() {
+        int result = ( int ) ( shardIndex ^ ( shardIndex >>> 32 ) );
+        result = 31 * result + ( int ) ( createdTime ^ ( createdTime >>> 32 ) );
+        return result;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
new file mode 100644
index 0000000..6ec5e20
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+import java.util.Set;
+
+
+/**
+ * There are cases where we need to read or write to more than 1 shard.
+ */
+public class ShardEntries {
+
+
+    private Set<Shard> shards;
+
+
+    public ShardEntries( Set<Shard> shards ) {
+        this.shards = shards;
+    }
+
+
+    public Set<Shard> getEntries() {
+        return shards;
+    }
+
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
index a08ec3b..63075b3 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
@@ -38,6 +38,7 @@ import org.apache.usergrid.persistence.core.astyanax.OrganizationScopedRowKeySer
 import org.apache.usergrid.persistence.core.astyanax.ColumnNameIterator;
 import org.apache.usergrid.persistence.core.astyanax.ColumnParser;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardSerialization;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
 import org.apache.usergrid.persistence.model.entity.Id;
 
 import com.google.common.base.Optional;
@@ -65,7 +66,7 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
     private static final byte HOLDER = 0x00;
 
-    private static final LongColumnParser COLUMN_PARSER = new LongColumnParser();
+    private static final ShardColumnParser COLUMN_PARSER = new ShardColumnParser();
 
 
     protected final Keyspace keyspace;
@@ -105,7 +106,7 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
 
     @Override
-    public Iterator<Long> getEdgeMetaData( final ApplicationScope scope, final Id nodeId, final Optional<Long> start,
+    public Iterator<Shard> getEdgeMetaData( final ApplicationScope scope, final Id nodeId, final Optional<Shard> start,
                                            final String... types ) {
         /**
          * If the edge is present, we need to being seeking from this
@@ -114,7 +115,7 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
         final RangeBuilder rangeBuilder = new RangeBuilder().setLimit( graphFig.getScanPageSize() );
 
         if ( start.isPresent() ) {
-            rangeBuilder.setStart( start.get() );
+            rangeBuilder.setStart( start.get().getShardIndex() );
         }
 
         final EdgeRowKey key = new EdgeRowKey( nodeId, types );
@@ -163,11 +164,11 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
     }
 
 
-    private static class LongColumnParser implements ColumnParser<Long, Long> {
+    private static class ShardColumnParser implements ColumnParser<Long,Shard> {
 
         @Override
-        public Long parseColumn( final Column<Long> column ) {
-            return column.getName();
+        public Shard parseColumn( final Column<Long> column ) {
+            return new Shard(column.getName(), column.getTimestamp());
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
index cf70669..5cece93 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
@@ -33,6 +33,7 @@ import org.apache.usergrid.persistence.graph.exception.GraphRuntimeException;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardAllocation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.count.NodeShardCounterSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.count.ShardKey;
 import org.apache.usergrid.persistence.model.entity.Id;
@@ -69,12 +70,12 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
     @Override
-    public Iterator<Long> getShards( final ApplicationScope scope, final Id nodeId, Optional<Long> maxShardId, final String... edgeTypes ) {
+    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, Optional<Shard> maxShardId, final String... edgeTypes ) {
 
-        final Iterator<Long> existingShards =
+        final Iterator<Shard> existingShards =
                 edgeShardSerialization.getEdgeMetaData( scope, nodeId, maxShardId, edgeTypes );
 
-        final PushbackIterator<Long> pushbackIterator = new PushbackIterator( existingShards );
+        final PushbackIterator<Shard> pushbackIterator = new PushbackIterator( existingShards );
 //
 //
 //        final long now = timeService.getCurrentTime();
@@ -132,7 +133,7 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
          * Nothing to iterate, return an iterator with 0.
          */
         if(!pushbackIterator.hasNext()){
-            pushbackIterator.pushback( 0l );
+            pushbackIterator.pushback( new Shard(0l, 0l) );
         }
 
         return pushbackIterator;
@@ -142,7 +143,7 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
     @Override
     public boolean auditMaxShard( final ApplicationScope scope, final Id nodeId, final String... edgeType ) {
 
-        final Iterator<Long> maxShards = getShards( scope, nodeId, Optional.<Long>absent(), edgeType );
+        final Iterator<Shard> maxShards = getShards( scope, nodeId, Optional.<Shard>absent(), edgeType );
 
 
         //if the first shard has already been allocated, do nothing.
@@ -152,14 +153,14 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
             return false;
         }
 
-        final long maxShard = maxShards.next();
+        final Shard maxShard = maxShards.next();
 
         /**
          * Check out if we have a count for our shard allocation
          */
 
 
-        final long count = nodeShardApproximation.getCount( scope, nodeId, maxShard, edgeType );
+        final long count = nodeShardApproximation.getCount( scope, nodeId, maxShard.getShardIndex(), edgeType );
 
         if ( count < graphFig.getShardSize() ) {
             return false;

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
index 3b78898..eeefb3a 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
@@ -34,6 +34,8 @@ import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.exception.GraphRuntimeException;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardAllocation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardCache;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
 import org.apache.usergrid.persistence.graph.serialization.util.IterableUtil;
 import org.apache.usergrid.persistence.model.entity.Id;
 
@@ -93,7 +95,8 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
     @Override
-    public long getSlice( final ApplicationScope scope, final Id nodeId, final long timestamp, final String... edgeType ) {
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, final long timestamp,
+                               final String... edgeType ) {
 
 
         final CacheKey key = new CacheKey( scope, nodeId, edgeType );
@@ -106,7 +109,7 @@ public class NodeShardCacheImpl implements NodeShardCache {
             throw new GraphRuntimeException( "Unable to load shard key for graph", e );
         }
 
-        final Long shardId = entry.getShardId( timestamp );
+        final ShardEntries shardId = entry.getShardId( timestamp );
 
         if ( shardId != null ) {
             return shardId;
@@ -118,8 +121,8 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
     @Override
-    public Iterator<Long> getVersions( final ApplicationScope scope, final Id nodeId, final long maxTimestamp,
-                                       final String... edgeType ) {
+    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, final long maxTimestamp,
+                                     final String... edgeType ) {
         final CacheKey key = new CacheKey( scope, nodeId, edgeType );
               CacheEntry entry;
 
@@ -130,10 +133,10 @@ public class NodeShardCacheImpl implements NodeShardCache {
                   throw new GraphRuntimeException( "Unable to load shard key for graph", e );
               }
 
-        Iterator<Long> iterator = entry.getShards( maxTimestamp );
+        Iterator<ShardEntries> iterator = entry.getShards( maxTimestamp );
 
         if(iterator == null){
-            return Collections.<Long>emptyList().iterator();
+            return Collections.<ShardEntries>emptyList().iterator();
         }
 
         return iterator;
@@ -161,8 +164,8 @@ public class NodeShardCacheImpl implements NodeShardCache {
 //                          //TODO, we need to put some sort of upper bounds on this, it could possibly get too large
 
 
-                          final Iterator<Long> edges = nodeShardAllocation
-                                  .getShards( key.scope, key.id, Optional.<Long>absent(), key.types );
+                          final Iterator<Shard> edges = nodeShardAllocation
+                                  .getShards( key.scope, key.id, Optional.<Shard>absent(), key.types );
 
                           return new CacheEntry( edges );
                       }
@@ -224,13 +227,13 @@ public class NodeShardCacheImpl implements NodeShardCache {
         /**
          * Get the list of all segments
          */
-        private TreeSet<Long> shards;
+        private TreeSet<Shard> shards;
 
 
-        private CacheEntry( final Iterator<Long> shards ) {
+        private CacheEntry( final Iterator<Shard> shards ) {
             this.shards = new TreeSet<>( );
 
-            for ( Long shard : IterableUtil.wrap( shards ) ) {
+            for ( Shard shard : IterableUtil.wrap( shards ) ) {
                 this.shards.add( shard );
             }
         }
@@ -239,8 +242,9 @@ public class NodeShardCacheImpl implements NodeShardCache {
         /**
          * Get the shard's UUID for the uuid we're attempting to seek from
          */
-        public Long getShardId( final Long seek ) {
-            return this.shards.floor( seek );
+        public ShardEntries getShardId( final Long seek ) {
+            return bootstrapEntry();
+//            return this.shards.floor( seek );
         }
 
 
@@ -248,8 +252,13 @@ public class NodeShardCacheImpl implements NodeShardCache {
          * Get all shards <= this one in decending order
          * @return
          */
-        public Iterator<Long> getShards( final Long maxShard ){
-            return this.shards.headSet(maxShard, true  ).descendingIterator();
+        public Iterator<ShardEntries> getShards( final Long maxShard ){
+            return  Collections.singleton(bootstrapEntry() ).iterator();
+//            return this.shards.headSet(maxShard, true  ).descendingIterator();
+        }
+
+        private ShardEntries bootstrapEntry(){
+            return new ShardEntries( Collections.singleton( new Shard(0l, 0l) ) );
         }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
index f246f23..17791cb 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
@@ -26,6 +26,7 @@ import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardStrategy;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardCache;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
 import org.apache.usergrid.persistence.model.entity.Id;
 
 import com.google.inject.Inject;
@@ -52,16 +53,16 @@ public class SizebasedEdgeShardStrategy implements EdgeShardStrategy {
 
 
     @Override
-    public long getWriteShard( final ApplicationScope scope, final Id rowKeyId, final long timestamp,
-                               final String... types ) {
-        return shardCache.getSlice( scope, rowKeyId, timestamp, types );
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId, final long timestamp,
+                                final String... types ) {
+        return shardCache.getWriteShards( scope, rowKeyId, timestamp, types );
     }
 
 
     @Override
-    public Iterator<Long> getReadShards( final ApplicationScope scope, final Id rowKeyId, final long maxTimestamp,
+    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id rowKeyId, final long maxTimestamp,
                                          final String... types ) {
-        return shardCache.getVersions( scope, rowKeyId, maxTimestamp, types );
+        return shardCache.getReadShards( scope, rowKeyId, maxTimestamp, types );
     }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
index 479e1bf..89d89b8 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
@@ -103,22 +103,22 @@ public class EdgeShardSerializationTest {
         batch.execute();
 
 
-        Iterator<Long> results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Long>absent(), types );
+        Iterator<Shard> results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Shard>absent(), types );
 
-        assertEquals( slice3, results.next().longValue() );
+        assertEquals( slice3, results.next().getShardIndex() );
 
-        assertEquals( slice2, results.next().longValue() );
+        assertEquals( slice2, results.next().getShardIndex() );
 
-        assertEquals( slice1, results.next().longValue() );
+        assertEquals( slice1, results.next().getShardIndex() );
 
         assertFalse( results.hasNext() );
 
         //test paging and size
-        results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.of( slice2 ), types );
+        results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.of( new Shard( slice2, 0l ) ), types );
 
-        assertEquals( slice2, results.next().longValue() );
+        assertEquals( slice2, results.next().getShardIndex() );
 
-        assertEquals( slice1, results.next().longValue() );
+        assertEquals( slice1, results.next().getShardIndex() );
 
 
         assertFalse( results.hasNext() );
@@ -147,24 +147,24 @@ public class EdgeShardSerializationTest {
         batch.execute();
 
 
-        Iterator<Long> results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Long>absent(), types );
+        Iterator<Shard> results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Shard>absent(), types );
 
-        assertEquals( slice3, results.next().longValue() );
+        assertEquals( slice3, results.next().getShardIndex() );
 
-        assertEquals( slice2, results.next().longValue() );
+        assertEquals( slice2, results.next().getShardIndex() );
 
-        assertEquals( slice1, results.next().longValue() );
+        assertEquals( slice1, results.next().getShardIndex() );
 
         assertFalse( results.hasNext() );
 
         //test paging and size
         edgeShardSerialization.removeEdgeMeta( scope, now, slice1, types ).execute();
 
-        results = edgeShardSerialization.getEdgeMetaData( scope, now,Optional.<Long>absent(), types );
+        results = edgeShardSerialization.getEdgeMetaData( scope, now,Optional.<Shard>absent(), types );
 
-        assertEquals( slice3, results.next().longValue() );
+        assertEquals( slice3, results.next().getShardIndex() );
 
-        assertEquals( slice2, results.next().longValue() );
+        assertEquals( slice2, results.next().getShardIndex() );
 
         assertFalse( results.hasNext() );
 
@@ -173,7 +173,7 @@ public class EdgeShardSerializationTest {
 
         edgeShardSerialization.removeEdgeMeta( scope, now, slice3, types ).execute();
 
-        results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Long>absent(), types );
+        results = edgeShardSerialization.getEdgeMetaData( scope, now, Optional.<Shard>absent(), types );
 
 
         assertFalse( results.hasNext() );

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
index 5c846f1..6824e74 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
@@ -31,8 +31,6 @@ import org.mockito.ArgumentCaptor;
 import org.apache.usergrid.persistence.core.consistency.TimeService;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.GraphFig;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.count.NodeShardCounterSerialization;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.count.ShardKey;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.NodeShardAllocationImpl;
 import org.apache.usergrid.persistence.model.entity.Id;
 import org.apache.usergrid.persistence.model.util.UUIDGenerator;
@@ -46,7 +44,6 @@ import static org.apache.usergrid.persistence.graph.test.util.EdgeTestUtils.crea
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.same;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -109,7 +106,7 @@ public class NodeShardAllocationTest {
          */
         when( edgeShardSerialization
                 .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Collections.<Long>emptyList().iterator() );
+                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
 
         final boolean result = approximation.auditMaxShard( scope, nodeId, type, subType );
 
@@ -148,7 +145,7 @@ public class NodeShardAllocationTest {
 
         when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
 
-        final long futureShard =  timeservicetime + graphFig.getShardCacheTimeout() * 2 ;
+        final Shard futureShard =  new Shard(timeservicetime + graphFig.getShardCacheTimeout() * 2, timeservicetime) ;
 
         /**
          * Mock up returning a min shard, and a future shard
@@ -199,7 +196,7 @@ public class NodeShardAllocationTest {
          */
         when( edgeShardSerialization
                 .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList( 0l ).iterator() );
+                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
 
 
         //return a shard size < our max by 1
@@ -251,7 +248,7 @@ public class NodeShardAllocationTest {
          */
         when( edgeShardSerialization
                 .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList( 0l ).iterator() );
+                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
 
 
         final long shardCount = graphFig.getShardSize();
@@ -331,14 +328,16 @@ public class NodeShardAllocationTest {
         final long futureTime = timeService.getCurrentTime()  + 2 * graphFig.getShardCacheTimeout();
 
 
+        final Shard minShard = new Shard(0l, 0l);
+
         /**
          * Simulate slow node
          */
-        final long futureShard1 = futureTime - 1;
+        final Shard futureShard1 = new Shard(futureTime - 1, timeservicetime);
 
-        final long futureShard2 = futureTime + 10000;
+        final Shard futureShard2 = new Shard(futureTime + 10000, timeservicetime);
 
-        final long futureShard3 = futureShard2 + 10000;
+        final Shard futureShard3 = new Shard(futureShard2.getShardIndex() + 10000, timeservicetime);
 
 
         final int pageSize = 100;
@@ -348,7 +347,7 @@ public class NodeShardAllocationTest {
          */
         when( edgeShardSerialization
                 .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ), same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList(futureShard3, futureShard2, futureShard1, 0l).iterator() );
+                        same( subType ) ) ).thenReturn( Arrays.asList(futureShard3, futureShard2, futureShard1, minShard).iterator() );
 
 
 
@@ -363,17 +362,17 @@ public class NodeShardAllocationTest {
                 .thenReturn( mock( MutationBatch.class ) );
 
 
-        final Iterator<Long>
-                result = approximation.getShards( scope, nodeId, Optional.<Long>absent(), type, subType );
+        final Iterator<Shard>
+                result = approximation.getShards( scope, nodeId, Optional.<Shard>absent(), type, subType );
 
 
         assertTrue( "Shards present", result.hasNext() );
 
-        assertEquals("Only single next shard returned", futureShard1,  result.next().longValue());
+        assertEquals("Only single next shard returned", futureShard1,  result.next());
 
         assertTrue("Shards present", result.hasNext());
 
-        assertEquals("Previous shard present", 0l, result.next().longValue());
+        assertEquals("Previous shard present", 0l, result.next().getShardIndex());
 
         assertFalse("No shards left", result.hasNext());
 
@@ -422,11 +421,11 @@ public class NodeShardAllocationTest {
          */
         when( edgeShardSerialization
                 .getEdgeMetaData( same( scope ), same( nodeId ), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Collections.<Long>emptyList().iterator() );
+                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
 
-        final Iterator<Long> result = approximation.getShards( scope, nodeId, Optional.<Long>absent(), type, subType );
+        final Iterator<Shard> result = approximation.getShards( scope, nodeId, Optional.<Shard>absent(), type, subType );
 
-        assertEquals("0 shard allocated", 0l, result.next().longValue());
+        assertEquals("0 shard allocated", 0l, result.next().getShardIndex());
 
         assertFalse( "No shard allocated", result.hasNext() );
     }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/b1434ddf/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java
index 6c46c32..a8fdbc4 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCacheTest.java
@@ -22,7 +22,6 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
-import java.util.UUID;
 
 import org.junit.Before;
 import org.junit.Test;
@@ -30,7 +29,6 @@ import org.junit.Test;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.NodeShardCacheImpl;
-import org.apache.usergrid.persistence.graph.test.util.EdgeTestUtils;
 import org.apache.usergrid.persistence.model.entity.Id;
 import org.apache.usergrid.persistence.model.util.UUIDGenerator;
 
@@ -67,263 +65,259 @@ public class NodeShardCacheTest {
 
         when( scope.getApplication() ).thenReturn( orgId );
     }
-
-
-    @Test
-    public void testNoShards() throws ConnectionException {
-
-        final GraphFig graphFig = getFigMock();
-
-        final NodeShardAllocation allocation = mock( NodeShardAllocation.class );
-
-        final Id id = createId( "test" );
-
-        final String edgeType = "edge";
-
-        final String otherIdType = "type";
-
-
-        final long newTime = 10000l;
-
-
-        NodeShardCache cache = new NodeShardCacheImpl( allocation, graphFig );
-
-
-        final Optional max = Optional.absent();
-        /**
-         * Simulate returning no shards at all.
-         */
-        when( allocation
-                .getShards( same( scope ), same( id ), same( max), same( edgeType ),
-                        same( otherIdType ) ) )
-                .thenReturn( Collections.singletonList( 0l ).iterator() );
-
-
-        long slice = cache.getSlice( scope, id, newTime, edgeType, otherIdType );
-
-
-        //we return the min UUID possible, all edges should start by writing to this edge
-        assertEquals(0l, slice );
-
-
-        /**
-         * Verify that we fired the audit
-         */
-        verify( allocation ).auditMaxShard( scope, id, edgeType, otherIdType );
-    }
-
-
-    @Test
-    public void testSingleExistingShard() {
-
-        final GraphFig graphFig = getFigMock();
-
-        final NodeShardAllocation allocation = mock( NodeShardAllocation.class );
-
-
-        final Id id = createId( "test" );
-
-        final String edgeType = "edge";
-
-        final String otherIdType = "type";
-
-
-        final long newTime = 10000l;
-
-        final long min = 0;
-
-
-        NodeShardCache cache = new NodeShardCacheImpl( allocation, graphFig );
-
-
-        final Optional max = Optional.absent();
-
-        /**
-         * Simulate returning single shard
-         */
-        when( allocation.getShards( same( scope ), same( id ), same(max),
-                same( edgeType ), same( otherIdType ) ) ).thenReturn( Collections.singletonList( min ).iterator() );
-
-
-        long slice = cache.getSlice( scope, id, newTime, edgeType, otherIdType );
-
-
-        //we return the min UUID possible, all edges should start by writing to this edge
-        assertEquals( min, slice );
-
-        /**
-         * Verify that we fired the audit
-         */
-        verify( allocation ).auditMaxShard( scope, id, edgeType, otherIdType );
-    }
-
-
-    @Test
-    public void testRangeShard() {
-
-        final GraphFig graphFig = getFigMock();
-
-        final NodeShardAllocation allocation = mock( NodeShardAllocation.class );
-
-        final Id id = createId( "test" );
-
-        final String edgeType = "edge";
-
-        final String otherIdType = "type";
-
-
-        /**
-         * Set our min mid and max
-         */
-        final long min = 0;
-
-
-        final long mid = 10000;
-
-
-        final long max = 20000;
-
-
-        NodeShardCache cache = new NodeShardCacheImpl( allocation, graphFig );
-
-
-        /**
-         * Simulate returning all shards
-         */
-        when( allocation.getShards( same( scope ), same( id ), any( Optional.class ),
-                same( edgeType ), same( otherIdType ) ) ).thenReturn( Arrays.asList( min, mid, max ).iterator() );
-
-
-        //check getting equal to our min, mid and max
-
-        long slice = cache.getSlice( scope, id, min, edgeType, otherIdType );
-
-
-        //we return the min UUID possible, all edges should start by writing to this edge
-        assertEquals( min, slice );
-
-        slice = cache.getSlice( scope, id, mid,
-                edgeType, otherIdType );
-
-
-        //we return the mid UUID possible, all edges should start by writing to this edge
-        assertEquals( mid, slice );
-
-        slice = cache.getSlice( scope, id, max ,
-                edgeType, otherIdType );
-
-
-        //we return the mid UUID possible, all edges should start by writing to this edge
-        assertEquals( max, slice );
-
-        //now test in between
-        slice = cache.getSlice( scope, id, min+1, edgeType, otherIdType );
-
-
-        //we return the min UUID possible, all edges should start by writing to this edge
-        assertEquals( min, slice );
-
-        slice = cache.getSlice( scope, id,   mid-1, edgeType, otherIdType );
-
-
-        //we return the min UUID possible, all edges should start by writing to this edge
-        assertEquals( min, slice );
-
-
-        slice = cache.getSlice( scope, id,   mid+1, edgeType, otherIdType );
-
-
-        //we return the mid UUID possible, all edges should start by writing to this edge
-        assertEquals( mid, slice );
-
-        slice = cache.getSlice( scope, id,  max-1, edgeType, otherIdType );
-
-
-        //we return the mid UUID possible, all edges should start by writing to this edge
-        assertEquals( mid, slice );
-
-
-        slice = cache.getSlice( scope, id,   max, edgeType, otherIdType );
-
-
-        //we return the mid UUID possible, all edges should start by writing to this edge
-        assertEquals( max, slice );
-
-        /**
-         * Verify that we fired the audit
-         */
-        verify( allocation ).auditMaxShard( scope, id, edgeType, otherIdType );
-    }
-
-
-    @Test
-    public void testRangeShardIterator() {
-
-        final GraphFig graphFig = getFigMock();
-
-        final NodeShardAllocation allocation = mock( NodeShardAllocation.class );
-
-        final Id id = createId( "test" );
-
-        final String edgeType = "edge";
-
-        final String otherIdType = "type";
-
-
-        /**
-         * Set our min mid and max
-         */
-        final long min = 1;
-
-
-        final long mid = 100;
-
-
-        final long max = 200;
-
-
-        NodeShardCache cache = new NodeShardCacheImpl( allocation, graphFig );
-
-
-        /**
-         * Simulate returning all shards
-         */
-        when( allocation.getShards( same( scope ), same( id ),  any(Optional.class),
-                same( edgeType ), same( otherIdType ) ) ).thenReturn( Arrays.asList( min, mid, max ).iterator() );
-
-
-        //check getting equal to our min, mid and max
-
-        Iterator<Long> slice =
-                cache.getVersions( scope, id,   max, edgeType, otherIdType );
-
-
-        assertEquals( max, slice.next().longValue() );
-        assertEquals( mid, slice.next().longValue() );
-        assertEquals( min, slice.next().longValue() );
-
-
-        slice = cache.getVersions( scope, id,   mid,
-                edgeType, otherIdType );
-
-        assertEquals( mid, slice.next().longValue() );
-        assertEquals( min, slice.next().longValue() );
-
-
-        slice = cache.getVersions( scope, id,   min,
-                edgeType, otherIdType );
-
-        assertEquals( min, slice.next().longValue() );
-
-
-    }
-
-
-    private GraphFig getFigMock() {
-        final GraphFig graphFig = mock( GraphFig.class );
-        when( graphFig.getShardCacheSize() ).thenReturn( 1000l );
-        when( graphFig.getShardCacheTimeout() ).thenReturn( 30000l );
-
-        return graphFig;
-    }
+//
+//
+//    @Test
+//    public void testNoShards() throws ConnectionException {
+//
+//        final GraphFig graphFig = getFigMock();
+//
+//        final NodeShardAllocation allocation = mock( NodeShardAllocation.class );
+//
+//        final Id id = createId( "test" );
+//
+//        final String edgeType = "edge";
+//
+//        final String otherIdType = "type";
+//
+//
+//        final long newTime = 10000l;
+//
+//
+//        NodeShardCache cache = new NodeShardCacheImpl( allocation, graphFig );
+//
+//
+//        final Optional max = Optional.absent();
+//        /**
+//         * Simulate returning no shards at all.
+//         */
+//        when( allocation
+//                .getShards( same( scope ), same( id ), same( max), same( edgeType ),
+//                        same( otherIdType ) ) )
+//                .thenReturn( Collections.singletonList( 0l ).iterator() );
+//
+//
+//        long slice = cache.getSliceShard( scope, id, newTime, edgeType, otherIdType );
+//
+//
+//        //we return the min UUID possible, all edges should start by writing to this edge
+//        assertEquals(0l, slice );
+//
+//
+//        /**
+//         * Verify that we fired the audit
+//         */
+//        verify( allocation ).auditMaxShard( scope, id, edgeType, otherIdType );
+//    }
+//
+//
+//    @Test
+//    public void testSingleExistingShard() {
+//
+//        final GraphFig graphFig = getFigMock();
+//
+//        final NodeShardAllocation allocation = mock( NodeShardAllocation.class );
+//
+//
+//        final Id id = createId( "test" );
+//
+//        final String edgeType = "edge";
+//
+//        final String otherIdType = "type";
+//
+//
+//        final long newTime = 10000l;
+//
+//        final long min = 0;
+//
+//
+//        NodeShardCache cache = new NodeShardCacheImpl( allocation, graphFig );
+//
+//
+//        final Optional max = Optional.absent();
+//
+//        /**
+//         * Simulate returning single shard
+//         */
+//        when( allocation.getShards( same( scope ), same( id ), same(max),
+//                same( edgeType ), same( otherIdType ) ) ).thenReturn( Collections.singletonList( min ).iterator() );
+//
+//
+//        long slice = cache.getSliceShard( scope, id, newTime, edgeType, otherIdType );
+//
+//
+//        //we return the min UUID possible, all edges should start by writing to this edge
+//        assertEquals( min, slice );
+//
+//        /**
+//         * Verify that we fired the audit
+//         */
+//        verify( allocation ).auditMaxShard( scope, id, edgeType, otherIdType );
+//    }
+//
+//
+//    @Test
+//    public void testRangeShard() {
+//
+//        final GraphFig graphFig = getFigMock();
+//
+//        final NodeShardAllocation allocation = mock( NodeShardAllocation.class );
+//
+//        final Id id = createId( "test" );
+//
+//        final String edgeType = "edge";
+//
+//        final String otherIdType = "type";
+//
+//
+//        /**
+//         * Set our min mid and max
+//         */
+//        final long min = 0;
+//
+//
+//        final long mid = 10000;
+//
+//
+//        final long max = 20000;
+//
+//
+//        NodeShardCache cache = new NodeShardCacheImpl( allocation, graphFig );
+//
+//
+//        /**
+//         * Simulate returning all shards
+//         */
+//        when( allocation.getShards( same( scope ), same( id ), any( Optional.class ),
+//                same( edgeType ), same( otherIdType ) ) ).thenReturn( Arrays.asList( min, mid, max ).iterator() );
+//
+//
+//        //check getting equal to our min, mid and max
+//
+//        long slice = cache.getSliceShard( scope, id, min, edgeType, otherIdType );
+//
+//
+//        //we return the min UUID possible, all edges should start by writing to this edge
+//        assertEquals( min, slice );
+//
+//        slice = cache.getSliceShard( scope, id, mid, edgeType, otherIdType );
+//
+//
+//        //we return the mid UUID possible, all edges should start by writing to this edge
+//        assertEquals( mid, slice );
+//
+//        slice = cache.getSliceShard( scope, id, max, edgeType, otherIdType );
+//
+//
+//        //we return the mid UUID possible, all edges should start by writing to this edge
+//        assertEquals( max, slice );
+//
+//        //now test in between
+//        slice = cache.getSliceShard( scope, id, min + 1, edgeType, otherIdType );
+//
+//
+//        //we return the min UUID possible, all edges should start by writing to this edge
+//        assertEquals( min, slice );
+//
+//        slice = cache.getSliceShard( scope, id, mid - 1, edgeType, otherIdType );
+//
+//
+//        //we return the min UUID possible, all edges should start by writing to this edge
+//        assertEquals( min, slice );
+//
+//
+//        slice = cache.getSliceShard( scope, id, mid + 1, edgeType, otherIdType );
+//
+//
+//        //we return the mid UUID possible, all edges should start by writing to this edge
+//        assertEquals( mid, slice );
+//
+//        slice = cache.getSliceShard( scope, id, max - 1, edgeType, otherIdType );
+//
+//
+//        //we return the mid UUID possible, all edges should start by writing to this edge
+//        assertEquals( mid, slice );
+//
+//
+//        slice = cache.getSliceShard( scope, id, max, edgeType, otherIdType );
+//
+//
+//        //we return the mid UUID possible, all edges should start by writing to this edge
+//        assertEquals( max, slice );
+//
+//        /**
+//         * Verify that we fired the audit
+//         */
+//        verify( allocation ).auditMaxShard( scope, id, edgeType, otherIdType );
+//    }
+//
+//
+//    @Test
+//    public void testRangeShardIterator() {
+//
+//        final GraphFig graphFig = getFigMock();
+//
+//        final NodeShardAllocation allocation = mock( NodeShardAllocation.class );
+//
+//        final Id id = createId( "test" );
+//
+//        final String edgeType = "edge";
+//
+//        final String otherIdType = "type";
+//
+//
+//        /**
+//         * Set our min mid and max
+//         */
+//        final long min = 1;
+//
+//
+//        final long mid = 100;
+//
+//
+//        final long max = 200;
+//
+//
+//        NodeShardCache cache = new NodeShardCacheImpl( allocation, graphFig );
+//
+//
+//        /**
+//         * Simulate returning all shards
+//         */
+//        when( allocation.getShards( same( scope ), same( id ),  any(Optional.class),
+//                same( edgeType ), same( otherIdType ) ) ).thenReturn( Arrays.asList( min, mid, max ).iterator() );
+//
+//
+//        //check getting equal to our min, mid and max
+//
+//        Iterator<Long> slice =
+//                cache.getShards( scope, id, max, edgeType, otherIdType );
+//
+//
+//        assertEquals( max, slice.next().longValue() );
+//        assertEquals( mid, slice.next().longValue() );
+//        assertEquals( min, slice.next().longValue() );
+//
+//
+//        slice = cache.getShards( scope, id, mid, edgeType, otherIdType );
+//
+//        assertEquals( mid, slice.next().longValue() );
+//        assertEquals( min, slice.next().longValue() );
+//
+//
+//        slice = cache.getShards( scope, id, min, edgeType, otherIdType );
+//
+//        assertEquals( min, slice.next().longValue() );
+//
+//
+//    }
+//
+//
+//    private GraphFig getFigMock() {
+//        final GraphFig graphFig = mock( GraphFig.class );
+//        when( graphFig.getShardCacheSize() ).thenReturn( 1000l );
+//        when( graphFig.getShardCacheTimeout() ).thenReturn( 30000l );
+//
+//        return graphFig;
+//    }
 }


[11/11] git commit: Work in progress. Added testing

Posted by to...@apache.org.
Work in progress.  Added testing


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/cd043b14
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/cd043b14
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/cd043b14

Branch: refs/heads/USERGRID-188
Commit: cd043b143f5f467679befb4af0659448a5a17abb
Parents: 3ecd603
Author: Todd Nine <tn...@apigee.com>
Authored: Wed Jul 23 10:39:35 2014 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Wed Jul 23 10:39:35 2014 -0600

----------------------------------------------------------------------
 .../impl/shard/EdgeShardSerialization.java      |   12 +-
 .../graph/serialization/impl/shard/Shard.java   |   27 +-
 .../impl/shard/ShardEntryGroup.java             |  199 ++--
 .../impl/shard/impl/EdgeSearcher.java           |    2 +-
 .../shard/impl/EdgeShardSerializationImpl.java  |   35 +-
 .../shard/impl/NodeShardAllocationImpl.java     |    8 +-
 .../impl/shard/impl/NodeShardCacheImpl.java     |    2 +-
 .../shard/impl/ShardEntryGroupIterator.java     |    2 -
 .../impl/shard/EdgeShardSerializationTest.java  |  119 +-
 .../impl/shard/NodeShardAllocationTest.java     | 1104 +++++++++---------
 .../impl/shard/ShardEntryGroupTest.java         |  129 +-
 11 files changed, 891 insertions(+), 748 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
index e91dda3..1f15107 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerialization.java
@@ -40,11 +40,11 @@ public interface EdgeShardSerialization extends Migration{
      * @param scope The scope to write
      * @param nodeId The id in the edge
      * @param nodeType Is the node a source or target node
-     * @param shard The next time to write
-     * @param timestamp the timestamp to set on the shard
+     * @param shard The shard to write
      * @param types The types to write to.  Can be edge type, or edgeType+id type
      */
-    public MutationBatch writeEdgeMeta(ApplicationScope scope, Id nodeId, NodeType nodeType, long shard, long timestamp,  String... types);
+    public MutationBatch writeShardMeta( ApplicationScope scope, Id nodeId, NodeType nodeType, Shard shard,
+                                         String... types );
 
     /**
      * Get an iterator of all meta data and types.  Returns a range from High to low
@@ -55,7 +55,8 @@ public interface EdgeShardSerialization extends Migration{
      * @param types The types to use
      * @return
      */
-    public Iterator<Shard> getEdgeMetaData(ApplicationScope scope, Id nodeId, NodeType nodeType, Optional<Shard> start,  String... types);
+    public Iterator<Shard> getShardMetaData( ApplicationScope scope, Id nodeId, NodeType nodeType,
+                                             Optional<Shard> start, String... types );
 
     /**
      * Remove the shard from the edge meta data from the types.
@@ -67,6 +68,7 @@ public interface EdgeShardSerialization extends Migration{
      * @param types
      * @return
      */
-    public MutationBatch removeEdgeMeta(ApplicationScope scope, Id nodeId, NodeType nodeType, long shard, String... types);
+    public MutationBatch removeShardMeta( ApplicationScope scope, Id nodeId, NodeType nodeType, Shard shard,
+                                          String... types );
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
index f95699b..4b58224 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
@@ -23,11 +23,13 @@ public class Shard implements Comparable<Shard> {
 
     private final long shardIndex;
     private final long createdTime;
+    private final boolean compacted;
 
 
-    public Shard( final long shardIndex, final long createdTime ) {
+    public Shard( final long shardIndex, final long createdTime, final boolean compacted ) {
         this.shardIndex = shardIndex;
         this.createdTime = createdTime;
+        this.compacted = compacted;
     }
 
 
@@ -48,9 +50,15 @@ public class Shard implements Comparable<Shard> {
 
 
     /**
+     * Return true if this shard has been compacted
+     */
+    public boolean isCompacted() {
+        return compacted;
+    }
+
+
+    /**
      * Compare the shards based on the timestamp first, then the created time second
-     * @param o
-     * @return
      */
     @Override
     public int compareTo( final Shard o ) {
@@ -70,6 +78,19 @@ public class Shard implements Comparable<Shard> {
                 return -1;
             }
 
+            else {
+
+                //kind of arbitrary compacted takes precedence
+                if ( compacted && !o.compacted ) {
+                    return 1;
+                }
+
+                else if ( !compacted && o.compacted ){
+                    return -1;
+                }
+
+
+            }
             return 0;
         }
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
index 6bd1421..ebb037e 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
@@ -21,192 +21,173 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard;
 
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Comparator;
-import java.util.TreeMap;
+import java.util.TreeSet;
 
 
 /**
- * There are cases where we need to read or write to more than 1 shard.  This object encapsulates
- * a set of shards that should be written to and read from.  All reads should combine the data sets from
- * all shards in the group, and writes should be written to each shard.  Once the shard can safely be compacted
- * a background process should be triggered to remove additional shards and make seeks faster.  This multiread/write
- * should only occur during the time period of the delta (in milliseconds), after which the next read will asynchronously compact the
- * shards into a single shard.
+ * There are cases where we need to read or write to more than 1 shard.  This object encapsulates a set of shards that
+ * should be written to and read from.  All reads should combine the data sets from all shards in the group, and writes
+ * should be written to each shard.  Once the shard can safely be compacted a background process should be triggered to
+ * remove additional shards and make seeks faster.  This multiread/write should only occur during the time period of the
+ * delta (in milliseconds), after which the next read will asynchronously compact the shards into a single shard.
  */
 public class ShardEntryGroup {
 
 
-    private TreeMap<Long, Shard> shards;
-
-    private Shard minShardByIndex;
+    private TreeSet<Shard> shards;
 
     private final long delta;
 
-    private Shard neighbor;
+    private Shard compactionTarget;
+
+
+    private long maxCreatedTime;
 
 
     /**
      * The max delta we accept in milliseconds for create time to be considered a member of this group
-     * @param delta
      */
     public ShardEntryGroup( final long delta ) {
         this.delta = delta;
-        this.shards = new TreeMap<>( ShardTimeComparator.INSTANCE );
+        this.shards = new TreeSet<>();
+        this.maxCreatedTime = 0;
     }
 
 
     /**
-     * Only add a shard if the created timestamp is within the delta of one of the entries
-     * @param shard
-     * @return
+     * Only add a shard if it is within the rules require to meet a group.  The rules are outlined below.
+     *
+     * Case 1)  First shard in the group, always added
+     *
+     * Case 2) Shard is unmerged, it should be included with it's peers since other nodes may not have it yet
+     *
+     * Case 3) The list contains only non compacted shards, and this is last and and merged.  It is considered a lower
+     * bound
      */
-    public boolean addShard(final Shard shard){
-
-        //compare the time and see if it falls withing any of the elements based on their timestamp
-        final long shardCreateTime = shard.getCreatedTime();
-
-        final Long lessThanKey = shards.floorKey( shardCreateTime );
-
-        final Long greaterThanKey = shards.ceilingKey( shardCreateTime );
-
+    public boolean addShard( final Shard shard ) {
 
-        final long lessThanDelta = shardCreateTime - lessThanKey;
+        //shards can be ar
 
-        final long greaterThanDelta = greaterThanKey - shardCreateTime;
-
-        if(lessThanDelta < delta || greaterThanDelta < delta ){
-            this.shards.put( shardCreateTime, shard );
+        //compare the time and see if it falls withing any of the elements based on their timestamp
+        //        final long shardCreateTime = shard.getCreatedTime();
+
+        //        final Long lessThanKey = shards.floorKey( shardCreateTime );
+        //
+        //        final Long greaterThanKey = shards.ceilingKey( shardCreateTime );
+        //
+        //        //first into the set
+        //        if ( lessThanKey == null && greaterThanKey == null ) {
+        //            addShardInternal( shard );
+        //            return true;
+        //        }
+        //
+        //        if ( lessThanKey != null && shardCreateTime - lessThanKey < delta ) {
+        //            addShardInternal( shard );
+        //            return true;
+        //        }
+        //
+        //
+        //        if ( greaterThanKey != null && greaterThanKey - shardCreateTime < delta ) {
+        //            addShardInternal( shard );
+        //
+        //            return true;
+        //        }
+
+        if ( shards.size() == 0 ) {
+            addShardInternal( shard );
+            return true;
+        }
 
-            if(shard.compareTo( minShardByIndex ) < 0){
-                minShardByIndex = shard;
-            }
 
+        //shard is not compacted, or it's predecessor isn't, we should include it in this group
+        if ( !shard.isCompacted() || !shards.last().isCompacted() ) {
+            addShardInternal( shard );
             return true;
         }
 
+
         return false;
     }
 
 
     /**
-     * Add the n-1 shard to the set.  This is required, because nodes that have not yet updated their
-     * shard caches can be writing reading to the n-1 node only
-     *
-     * @param shard The shard to possibly add as a neighbor
-     * @return True if this shard as added as a neighbor, false otherwise
+     * Add the shard and set the min created time
      */
-    public boolean setNeighbor( final Shard shard ){
+    private void addShardInternal( final Shard shard ) {
+        shards.add( shard );
 
-        //not in the transition state don't set the neighbor, it will slow seeks down
-        if(!isRolling()){
-            return false;
-        }
+        maxCreatedTime = Math.max( maxCreatedTime, shard.getCreatedTime() );
 
+        //it's not a compacted shard, so it's a candidate to be the compaction target
+        if ( !shard.isCompacted() && ( compactionTarget == null || shard.compareTo( compactionTarget ) < 0 ) ) {
+            compactionTarget = shard;
+        }
+    }
 
-        neighbor = shard;
-        this.shards.put( shard.getCreatedTime(), shard );
-        return true;
 
+    /**
+     * Get the entries that we should read from.
+     */
+    public Collection<Shard> getReadShards() {
+        return shards;
     }
 
 
     /**
-     * Get the entries that we should read from.
-     *
-     * @return
+     * Get the entries, with the max shard time being first. We write to all shards until they're migrated
      */
-    public Collection<Shard> getReadShards(final long currentTime) {
+    public Collection<Shard> getWriteShards( long currentTime ) {
 
         /**
-         * The shards are still rolling (I.E can't be compacted)
+         * The shards in this set can be combined, we should only write to the compaction target to avoid
+         * adding data to other shards
          */
-        if(needsCompaction( currentTime )){
-            return shards.values();
+        if ( shouldCompact( currentTime ) ) {
+            return Collections.singleton( compactionTarget );
         }
 
-        return Collections.singleton(minShardByIndex);
-    }
-
 
-    /**
-     * Get the entries, with the max shard time being first. We write to all shards until they're migrated
-     *
-     * @return
-     */
-    public Collection<Shard> getWriteShards() {
-        return shards.values();
+        return shards;
     }
 
 
     /**
      * Get the shard all compactions should write to
-     * @return
      */
-    public Shard getMergeTarget(){
-        return minShardByIndex;
+    public Shard getCompactionTarget() {
+        return compactionTarget;
     }
 
 
     /**
      * Returns true if the newest created shard is path the currentTime - delta
+     *
      * @param currentTime The current system time in milliseconds
+     *
      * @return True if these shards can safely be combined into a single shard, false otherwise
      */
-    public boolean needsCompaction(final long currentTime){
+    public boolean shouldCompact( final long currentTime ) {
 
         /**
          * We don't have enough shards to compact, ignore
          */
-        if(shards.size() < 2){
+        if ( shards.size() < 2 ) {
             return false;
         }
 
-
-        final long maxTimestamp = shards.lastKey();
-
-
-        return currentTime - delta > maxTimestamp;
-    }
-
-
-    /**
-     * Return true if the shard is rolling.  If this is the case, we want to include the n-1 entry, since everyone
-     * may not yet have it until compaction is safe to perform
-     * @return
-     */
-    private boolean isRolling(){
-       return shards.size() > 1;
+        return currentTime - delta > maxCreatedTime;
     }
 
 
     /**
      * Return true if this shard can be deleted AFTER all of the data in it has been moved
-     * @param shard
-     * @return
      */
-    public boolean canBeDeleted(final Shard shard){
+    public boolean canBeDeleted( final Shard shard ) {
         //if we're a neighbor shard (n-1) or the target compaction shard, we can't be deleted
-        //we purposefully use .equals here, since 2 shards might have the same index with different timestamps (unlikely but could happen)
-        if(shard == neighbor ||  getMergeTarget().equals( shard )){
-            return false;
-        }
-
-        return true;
-    }
-
-    /**
-     * Compares 2 shards based on create time.  Does not handle nulls intentionally
-     */
-    private static final class ShardTimeComparator implements Comparator<Long> {
-
-        public static final ShardTimeComparator INSTANCE = new ShardTimeComparator();
-
-
-        @Override
-        public int compare( final Long o1, final Long o2 ) {
-            return o1.compareTo( o2 );
-        }
+        //we purposefully use shard index comparison over .equals here, since 2 shards might have the same index with
+        // different timestamps
+        // (unlikely but could happen)
+        return !shard.isCompacted() && ( compactionTarget != null && compactionTarget.getShardIndex() != shard
+                .getShardIndex() );
     }
-
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
index d0e642c..8b39115 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
@@ -52,7 +52,7 @@ public abstract class EdgeSearcher<R, C, T> implements ColumnParser<C, T>, Itera
          * TODO Shard fix this
          */
         return ScopedRowKey
-                .fromKey( scope, generateRowKey( shards.next().getMergeTarget().getShardIndex() ) );
+                .fromKey( scope, generateRowKey( shards.next().getCompactionTarget().getShardIndex() ) );
     }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
index 745e02a..b6f65f9 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
@@ -66,12 +66,9 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
      */
     private static final MultiTennantColumnFamily<ApplicationScope, DirectedRowKey, Long> EDGE_SHARDS =
             new MultiTennantColumnFamily<>( "Edge_Shards",
-                    new OrganizationScopedRowKeySerializer<>( new DirectedEdgeRowKeySerializer() ),
-                    LongSerializer.get() );
+                    new OrganizationScopedRowKeySerializer<>( new EdgeShardRowKeySerializer() ), LongSerializer.get() );
 
 
-    private static final byte HOLDER = 0x00;
-
     private static final ShardColumnParser COLUMN_PARSER = new ShardColumnParser();
 
 
@@ -90,13 +87,14 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
 
     @Override
-    public MutationBatch writeEdgeMeta( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
-                                        final long shard, final long timestamp, final String... types ) {
-
+    public MutationBatch writeShardMeta( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                         final Shard shard, final String... types ) {
 
         ValidationUtils.validateApplicationScope( scope );
         ValidationUtils.verifyIdentity( nodeId );
-        Preconditions.checkArgument( shard > -1, "shardId must be greater than -1" );
+        Preconditions.checkNotNull( shard );
+        Preconditions.checkArgument( shard.getShardIndex() > -1, "shardid must be greater than -1" );
+        Preconditions.checkArgument( shard.getCreatedTime() > -1, "createdTime must be greater than -1" );
         Preconditions.checkNotNull( types );
 
         final DirectedRowKey key = new DirectedRowKey( nodeId, nodeType, types );
@@ -105,15 +103,16 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
         final MutationBatch batch = keyspace.prepareMutationBatch();
 
-        batch.withTimestamp( timestamp ).withRow( EDGE_SHARDS, rowKey ).putColumn( shard, HOLDER );
+        batch.withTimestamp( shard.getCreatedTime() ).withRow( EDGE_SHARDS, rowKey )
+             .putColumn( shard.getShardIndex(), shard.isCompacted() );
 
         return batch;
     }
 
 
     @Override
-    public Iterator<Shard> getEdgeMetaData( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
-                                            final Optional<Shard> start, final String... types ) {
+    public Iterator<Shard> getShardMetaData( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                             final Optional<Shard> start, final String... types ) {
         /**
          * If the edge is present, we need to being seeking from this
          */
@@ -139,12 +138,14 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
 
     @Override
-    public MutationBatch removeEdgeMeta( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
-                                         final long shard, final String... types ) {
+    public MutationBatch removeShardMeta( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                          final Shard shard, final String... types ) {
 
         ValidationUtils.validateApplicationScope( scope );
         ValidationUtils.verifyIdentity( nodeId );
-        Preconditions.checkArgument( shard > -1, "shard must be greater than -1" );
+        Preconditions.checkNotNull( shard );
+        Preconditions.checkArgument( shard.getShardIndex() > -1, "shardid must be greater than -1" );
+        Preconditions.checkArgument( shard.getCreatedTime() > -1, "createdTime must be greater than -1" );
         Preconditions.checkNotNull( types );
 
         final DirectedRowKey key = new DirectedRowKey( nodeId, nodeType, types );
@@ -153,7 +154,7 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
         final MutationBatch batch = keyspace.prepareMutationBatch();
 
-        batch.withRow( EDGE_SHARDS, rowKey ).deleteColumn( shard );
+        batch.withRow( EDGE_SHARDS, rowKey ).deleteColumn( shard.getShardIndex() );
 
         return batch;
     }
@@ -186,7 +187,7 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
     }
 
 
-    private static class DirectedEdgeRowKeySerializer implements CompositeFieldSerializer<DirectedRowKey> {
+    private static class EdgeShardRowKeySerializer implements CompositeFieldSerializer<DirectedRowKey> {
 
         private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
 
@@ -247,7 +248,7 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
         @Override
         public Shard parseColumn( final Column<Long> column ) {
-            return new Shard( column.getName(), column.getTimestamp() );
+            return new Shard( column.getName(), column.getTimestamp(), column.getBooleanValue() );
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
index dbf8382..6accf91 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
@@ -88,7 +88,7 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
                                                 final Optional<Shard> maxShardId, final String... edgeTypes ) {
 
         final Iterator<Shard> existingShards =
-                edgeShardSerialization.getEdgeMetaData( scope, nodeId, nodeType, maxShardId, edgeTypes );
+                edgeShardSerialization.getShardMetaData( scope, nodeId, nodeType, maxShardId, edgeTypes );
 
         return new ShardEntryGroupIterator( existingShards, graphFig.getShardMinDelta() );
     }
@@ -104,7 +104,7 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
          * latest
          */
         final Iterator<Shard> maxShards =
-                edgeShardSerialization.getEdgeMetaData( scope, nodeId, nodeType, Optional.<Shard>absent(), edgeType );
+                edgeShardSerialization.getShardMetaData( scope, nodeId, nodeType, Optional.<Shard>absent(), edgeType );
 
 
         //if the first shard has already been allocated, do nothing.
@@ -204,10 +204,12 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
         final long createTimestamp = timeService.getCurrentTime();
 
+        final Shard shard = new Shard(marked.getTimestamp(), createTimestamp, false);
+
 
         try {
             this.edgeShardSerialization
-                    .writeEdgeMeta( scope, nodeId, nodeType, marked.getTimestamp(), createTimestamp, edgeType )
+                    .writeShardMeta( scope, nodeId, nodeType, shard, edgeType )
                     .execute();
         }
         catch ( ConnectionException e ) {

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
index ad30dc6..6ac83cf 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
@@ -249,7 +249,7 @@ public class NodeShardCacheImpl implements NodeShardCache {
             this.shards = new TreeMap<>(ShardEntriesComparator.INSTANCE);
 
             for ( ShardEntryGroup shard : IterableUtil.wrap( shards ) ) {
-                this.shards.put(shard.getMergeTarget().getShardIndex() , shard );
+                this.shards.put(shard.getCompactionTarget().getShardIndex() , shard );
             }
         }
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIterator.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIterator.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIterator.java
index 39e198e..8e69be4 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIterator.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIterator.java
@@ -91,8 +91,6 @@ public class ShardEntryGroupIterator implements Iterator<ShardEntryGroup> {
             //we can't add this shard to the current group.  Add the group and return.
             next = shardEntryGroup;
 
-            shardEntryGroup.setNeighbor( shard );
-
             sourceIterator.pushback( shard );
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
index 937a51a..00e0164 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardSerializationTest.java
@@ -29,10 +29,10 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 
+import org.apache.usergrid.persistence.collection.guice.MigrationManagerRule;
+import org.apache.usergrid.persistence.core.cassandra.CassandraRule;
 import org.apache.usergrid.persistence.core.cassandra.ITRunner;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
-import org.apache.usergrid.persistence.core.cassandra.CassandraRule;
-import org.apache.usergrid.persistence.collection.guice.MigrationManagerRule;
 import org.apache.usergrid.persistence.graph.guice.TestGraphModule;
 import org.apache.usergrid.persistence.model.entity.Id;
 import org.apache.usergrid.persistence.model.util.UUIDGenerator;
@@ -49,8 +49,8 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 
-@RunWith( ITRunner.class )
-@UseModules( { TestGraphModule.class } )
+@RunWith(ITRunner.class)
+@UseModules({ TestGraphModule.class })
 public class EdgeShardSerializationTest {
 
     @ClassRule
@@ -86,67 +86,87 @@ public class EdgeShardSerializationTest {
 
         final Id now = createId( "test" );
 
-        final long slice1 = 1000l;
+        final long timestamp = 10000l;
 
-        final long slice2 = slice1 * 2;
+        final Shard shard1 = new Shard( 1000l, timestamp, false );
 
-        final long slice3 = slice2 * 2;
+        final Shard shard2 = new Shard( shard1.getShardIndex() * 2, timestamp, true );
+
+        final Shard shard3 = new Shard( shard2.getShardIndex() * 2, timestamp, false );
 
-        final long timestamp = 10000l;
 
         String[] types = { "edgeType", "subType" };
 
-        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice1, timestamp, types );
+        MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, now, NodeType.SOURCE, shard1, types );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice2, timestamp, types ) );
+        batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, now, NodeType.SOURCE, shard2, types ) );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice3, timestamp, types ) );
+        batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, now, NodeType.SOURCE, shard3, types ) );
 
         batch.execute();
 
 
-        Iterator<Shard> results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
+        Iterator<Shard> results =
+                edgeShardSerialization.getShardMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
 
         Shard next = results.next();
 
-        assertEquals( slice3, next.getShardIndex() );
+        assertEquals( shard3, next.getShardIndex() );
+
+        assertEquals( shard3.getCreatedTime(), next.getCreatedTime() );
+
+        assertEquals( shard3.isCompacted(), next.isCompacted() );
 
-        assertEquals( timestamp, next.getCreatedTime());
 
         next = results.next();
 
 
-        assertEquals( slice2, next.getShardIndex() );
+        assertEquals( shard2, next.getShardIndex() );
+
+        assertEquals( shard2.getCreatedTime(), next.getCreatedTime() );
+
+        assertEquals( shard2.isCompacted(), next.isCompacted() );
 
-        assertEquals( timestamp, next.getCreatedTime());
 
         next = results.next();
 
 
-        assertEquals( slice1, next.getShardIndex() );
+        assertEquals( shard1, next.getShardIndex() );
+
+        assertEquals( shard1.getCreatedTime(), next.getCreatedTime() );
 
-        assertEquals( timestamp, next.getCreatedTime());
+        assertEquals( shard1.isCompacted(), next.isCompacted() );
 
         assertFalse( results.hasNext() );
 
         //test we get nothing with the other node type
-        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.TARGET, Optional.<Shard>absent(), types );
+        results =
+                edgeShardSerialization.getShardMetaData( scope, now, NodeType.TARGET, Optional.<Shard>absent(), types );
 
-        assertFalse(results.hasNext());
+        assertFalse( results.hasNext() );
 
 
         //test paging and size
-        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.of( new Shard( slice2, 0l ) ), types );
+        results = edgeShardSerialization.getShardMetaData( scope, now, NodeType.SOURCE, Optional.of( shard2 ), types );
 
         next = results.next();
 
-        assertEquals( slice2, next.getShardIndex() );
-        assertEquals( timestamp, next.getCreatedTime());
+
+        assertEquals( shard2, next.getShardIndex() );
+
+        assertEquals( shard2.getCreatedTime(), next.getCreatedTime() );
+
+        assertEquals( shard2.isCompacted(), next.isCompacted() );
+
 
         next = results.next();
 
-        assertEquals( slice1, next.getShardIndex() );
-        assertEquals( timestamp, next.getCreatedTime());
+
+        assertEquals( shard1, next.getShardIndex() );
+
+        assertEquals( shard1.getCreatedTime(), next.getCreatedTime() );
+
+        assertEquals( shard1.isCompacted(), next.isCompacted() );
 
 
         assertFalse( results.hasNext() );
@@ -158,57 +178,66 @@ public class EdgeShardSerializationTest {
 
         final Id now = createId( "test" );
 
-        final long slice1 = 1000l;
 
-        final long slice2 = slice1 * 2;
+        final long timestamp = 10000l;
 
-        final long slice3 = slice2 * 2;
+        final Shard shard1 = new Shard( 1000l, timestamp, false );
 
-        final long timestamp = 10000l;
+        final Shard shard2 = new Shard( shard1.getShardIndex() * 2, timestamp, true );
+
+        final Shard shard3 = new Shard( shard2.getShardIndex() * 2, timestamp, false );
 
         String[] types = { "edgeType", "subType" };
 
-        MutationBatch batch = edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice1, timestamp, types );
+        MutationBatch batch =
+                edgeShardSerialization.writeShardMeta( scope, now, NodeType.SOURCE, shard1, types );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice2, timestamp,types ) );
+        batch.mergeShallow(
+                edgeShardSerialization.writeShardMeta( scope, now, NodeType.SOURCE, shard2, types ) );
 
-        batch.mergeShallow( edgeShardSerialization.writeEdgeMeta( scope, now, NodeType.SOURCE, slice3, timestamp, types ) );
+        batch.mergeShallow(
+                edgeShardSerialization.writeShardMeta( scope, now, NodeType.SOURCE, shard3, types ) );
 
         batch.execute();
 
 
-        Iterator<Shard> results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
+        Iterator<Shard> results =
+                edgeShardSerialization.getShardMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
 
-        assertEquals( slice3, results.next().getShardIndex() );
+        assertEquals( shard3, results.next() );
 
-        assertEquals( slice2, results.next().getShardIndex() );
+        assertEquals( shard2, results.next() );
 
-        assertEquals( slice1, results.next().getShardIndex() );
+        assertEquals( shard1, results.next() );
 
         assertFalse( results.hasNext() );
 
         //test nothing with other type
-        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.TARGET, Optional.<Shard>absent(), types );
+        results =
+                edgeShardSerialization.getShardMetaData( scope, now, NodeType.TARGET, Optional.<Shard>absent(), types );
+
+        assertFalse( results.hasNext() );
 
-        assertFalse(results.hasNext());
 
         //test paging and size
-        edgeShardSerialization.removeEdgeMeta( scope, now, NodeType.SOURCE, slice1, types ).execute();
+        edgeShardSerialization.removeShardMeta( scope, now, NodeType.SOURCE, shard1, types ).execute();
 
-        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
+        results =
+                edgeShardSerialization.getShardMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
 
-        assertEquals( slice3, results.next().getShardIndex() );
+        assertEquals( shard3, results.next().getShardIndex() );
 
-        assertEquals( slice2, results.next().getShardIndex() );
+        assertEquals( shard2, results.next().getShardIndex() );
 
         assertFalse( results.hasNext() );
 
 
-        edgeShardSerialization.removeEdgeMeta( scope, now, NodeType.SOURCE, slice2, types ).execute();
+        edgeShardSerialization.removeShardMeta( scope, now, NodeType.SOURCE, shard2, types ).execute();
 
-        edgeShardSerialization.removeEdgeMeta( scope, now, NodeType.SOURCE, slice3, types ).execute();
+        edgeShardSerialization.removeShardMeta( scope, now, NodeType.SOURCE, shard3, types ).execute();
 
-        results = edgeShardSerialization.getEdgeMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
+        results =
+                edgeShardSerialization.getShardMetaData( scope, now, NodeType.SOURCE, Optional.<Shard>absent(), types );
 
 
         assertFalse( results.hasNext() );

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
index 8c60e2c..0ac7f78 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
@@ -86,556 +86,556 @@ public class NodeShardAllocationTest {
     }
 
 
-    @Test
-    public void minTime() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
-
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
-
-        final NodeShardApproximation nodeShardCounterSerialization = mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
-                        nodeShardCounterSerialization, timeService, graphFig, keyspace );
-
-
-        final long timeservicetime = System.currentTimeMillis();
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-        final long expected = timeservicetime - 2 * graphFig.getShardCacheTimeout();
-
-        final long returned = approximation.getMinTime();
-
-        assertEquals( "Correct time was returned", expected, returned );
-    }
-
-
-    @Test
-    public void noShards() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
-
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
-
-
-        final NodeShardApproximation nodeShardCounterSerialization = mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
-                        nodeShardCounterSerialization, timeService, graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-        /**
-         * Mock up returning an empty iterator, our audit shouldn't create a new shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),
-                        same( type ), same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
-
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
-
-        assertFalse( "No shard allocated", result );
-    }
-
-
-    @Test
-    public void existingFutureShardSameTime() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
-
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
-
-        final NodeShardApproximation nodeShardCounterSerialization = mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
-                        nodeShardCounterSerialization, timeService, graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-
-        final long timeservicetime = System.currentTimeMillis();
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-        final Shard futureShard = new Shard( 10000l, timeservicetime );
-
-        /**
-         * Mock up returning a min shard, and a future shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
-                        same( type ), same( subType ) ) ).thenReturn( Arrays.asList( futureShard ).iterator() );
-
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
-
-        assertFalse( "No shard allocated", result );
-    }
-
-
-    @Test
-    public void lowCountFutureShard() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
-
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
-
-        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
-                        nodeShardApproximation, timeService, graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-
-        final long timeservicetime = System.currentTimeMillis();
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-
-        /**
-         * Mock up returning a min shard, and a future shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
-                        same( type ), same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l ) ).iterator() );
-
-
-        //return a shard size < our max by 1
-
-        final long count = graphFig.getShardSize() - 1;
-
-        when( nodeShardApproximation.getCount( scope, nodeId, NodeType.TARGET, 0l, type, subType ) )
-                .thenReturn( count );
-
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
-
-        assertFalse( "Shard allocated", result );
-    }
-
-
-    @Test
-    public void equalCountFutureShard() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
-
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
-
-        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
-                        nodeShardApproximation, timeService, graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-
-        final long timeservicetime = System.currentTimeMillis();
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-
-        /**
-         * Mock up returning a min shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),
-                        same( type ), same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l ) ).iterator() );
-
-
-        final long shardCount = graphFig.getShardSize();
-
-        //return a shard size equal to our max
-        when( nodeShardApproximation.getCount( scope, nodeId, NodeType.SOURCE, 0l, type, subType ) )
-                .thenReturn( shardCount );
-
-        ArgumentCaptor<Long> shardValue = ArgumentCaptor.forClass( Long.class );
-        ArgumentCaptor<Long> timestampValue = ArgumentCaptor.forClass( Long.class );
-
-
-        //mock up our mutation
-        when( edgeShardSerialization
-                .writeEdgeMeta( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), shardValue.capture(),
-                        timestampValue.capture(), same( type ), same( subType ) ) )
-                .thenReturn( mock( MutationBatch.class ) );
-
-
-        final SimpleMarkedEdge returnedEdge =
-                new SimpleMarkedEdge( nodeId, type, createId( "subType" ), 10005l, false );
-        final Iterator<MarkedEdge> edgeIterator = Collections.singleton( ( MarkedEdge ) returnedEdge ).iterator();
-
-        //mock up returning the value
-        when( shardedEdgeSerialization
-                .getEdgesFromSourceByTargetType( same( edgeColumnFamilies ), same( scope ), any( SearchByIdType.class ),
-                        any( Iterator.class ) ) ).thenReturn( edgeIterator );
-
-
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
-
-        assertTrue( "Shard allocated", result );
-
-        //check our new allocated UUID
-
-
-        final long savedTimestamp = timestampValue.getValue();
-
-
-        assertEquals( "Expected time service time", timeservicetime, savedTimestamp );
-
-
-        //now check our max value was set
-
-        final long savedShardPivot = shardValue.getValue();
-
-        assertEquals( "Expected max value to be the same", returnedEdge.getTimestamp(), savedShardPivot );
-    }
-
-
-    @Test
-    public void futureCountShardCleanup() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
-
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
-
-        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
-                        nodeShardApproximation, timeService, graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-
-        /**
-         * Use the time service to generate timestamps
-         */
-        final long timeservicetime = 10000;
-
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-        assertTrue( "Shard cache mocked", graphFig.getShardCacheTimeout() > 0 );
-
-
-        /**
-         * Simulates clock drift when 2 nodes create future shards near one another
-         */
-        final long minDelta = graphFig.getShardMinDelta();
-
-
-        final Shard minShard = new Shard( 0l, 0l );
-
-        //a shard that isn't our minimum, but exists after compaction
-        final Shard compactedShard = new Shard( 5000, 1000 );
-
-        /**
-         * Simulate different node time allocation
-         */
-
-        final long minTime = 10000;
-        //our second shard is the "oldest", and hence should be returned in the iterator.  Future shard 1 and 3
-        // should be removed
-
-        //this should get dropped, It's allocated after future shard2 even though the time is less
-        final Shard futureShard1 = new Shard( 10000, minTime + minDelta );
-
-        //should get kept.
-        final Shard futureShard2 = new Shard( 10005, minTime );
-
-        //should be removed
-        final Shard futureShard3 = new Shard( 10010, minTime + minDelta / 2 );
-
-        /**
-         * Mock up returning a min shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
-                        same( type ), same( subType ) ) ).thenReturn(
-                Arrays.asList( futureShard3, futureShard2, futureShard1, compactedShard, minShard ).iterator() );
-
-
-        ArgumentCaptor<Long> newLongValue = ArgumentCaptor.forClass( Long.class );
-
-
-        //mock up our mutation
-        when( edgeShardSerialization
-                .removeEdgeMeta( same( scope ), same( nodeId ), eq( NodeType.TARGET ), newLongValue.capture(),
-                        same( type ), same( subType ) ) ).thenReturn( mock( MutationBatch.class ) );
-
-
-        final Iterator<ShardEntryGroup> result =
-                approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
-
-
-        assertTrue( "Shards present", result.hasNext() );
-
-
-        ShardEntryGroup shardEntryGroup = result.next();
-
-        assertEquals( "Future shard returned", futureShard1, shardEntryGroup.getMergeTarget() );
-
-
-        //now verify all 4 are in this group.  This is because the first shard (0,0) (n-1_ may be the only shard other
-        //nodes see while we're rolling our state.  This means it should be read and merged from as well
-
-        Collection<Shard> writeShards = shardEntryGroup.getWriteShards();
-
-        assertEquals( "Shard size as expected", 4, writeShards.size() );
-
-        assertTrue( writeShards.contains( futureShard1 ) );
-        assertTrue( writeShards.contains( futureShard2 ) );
-        assertTrue( writeShards.contains( futureShard3 ) );
-        assertTrue( writeShards.contains( compactedShard ) );
-
-
-        Collection<Shard> readShards = shardEntryGroup.getReadShards( minTime + minDelta );
-
-        assertEquals( "Shard size as expected", 4, readShards.size() );
-
-        assertTrue( readShards.contains( futureShard1 ) );
-        assertTrue( readShards.contains( futureShard2 ) );
-        assertTrue( readShards.contains( futureShard3 ) );
-        assertTrue( readShards.contains( compactedShard ) );
-
-
-        assertTrue( "Shards present", result.hasNext() );
-
-        shardEntryGroup = result.next();
-
-        writeShards = shardEntryGroup.getWriteShards();
-
-
-        assertTrue( "Previous shard present", writeShards.contains( minShard ) );
-
-
-        writeShards = shardEntryGroup.getReadShards( minTime + minDelta );
-
-
-        assertTrue( "Previous shard present", writeShards.contains( minShard ) );
-
-
-        assertFalse( "No shards left", result.hasNext() );
-    }
-
-
-    @Test
-    public void noShardsReturns() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
-
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
-
-        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        when( timeService.getCurrentTime() ).thenReturn( 10000l );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
-                        nodeShardApproximation, timeService, graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-        /**
-         * Mock up returning an empty iterator, our audit shouldn't create a new shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
-                        same( type ), same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
-
-        final Iterator<ShardEntryGroup> result =
-                approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
-
-
-        ShardEntryGroup shardEntryGroup = result.next();
-
-        final Shard expected = new Shard( 0, 0 );
-
-        assertEquals( "Future shard returned", expected, shardEntryGroup.getMergeTarget() );
-
-
-        //now verify all 4 are in this group.  This is because the first shard (0,0) (n-1_ may be the only shard other
-        //nodes see while we're rolling our state.  This means it should be read and merged from as well
-
-        Collection<Shard> writeShards = shardEntryGroup.getWriteShards();
-
-        Collection<Shard> readShards = shardEntryGroup.getReadShards( 10000l );
-
-
-        assertTrue( "0 shard allocated", writeShards.contains( expected ) );
-
-        assertTrue( "0 shard allocated", readShards.contains( expected ) );
-
-
-        assertFalse( "No shard allocated", result.hasNext() );
-    }
-
-
-    @Test
-    public void invalidConfiguration() {
-
-        final GraphFig graphFig = mock( GraphFig.class );
-
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
-
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
-
-        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
-
-
-        /**
-         * Return 100000 milliseconds
-         */
-        final TimeService timeService = mock( TimeService.class );
-
-        final long time = 100000l;
-
-        when( timeService.getCurrentTime() ).thenReturn( time );
-
-
-        final long cacheTimeout = 30000l;
-
-        when( graphFig.getShardCacheTimeout() ).thenReturn( 30000l );
-
-
-        final long tooSmallDelta = ( long ) ( ( cacheTimeout * 2 ) * .99 );
-
-        when( graphFig.getShardMinDelta() ).thenReturn( tooSmallDelta );
-
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
-                        nodeShardApproximation, timeService, graphFig, keyspace );
-
-
-        /**
-         * Should throw an exception
-         */
-        try {
-            approximation.getMinTime();
-            fail( "Should have thrown a GraphRuntimeException" );
-        }
-        catch ( GraphRuntimeException gre ) {
-            //swallow
-        }
-
-        //now test something that passes.
-
-        final long minDelta = cacheTimeout * 2;
-
-        when( graphFig.getShardMinDelta() ).thenReturn( minDelta );
-
-        long returned = approximation.getMinTime();
-
-        long expectedReturned = time - minDelta;
-
-        assertEquals( expectedReturned, returned );
-
-        final long delta = cacheTimeout * 4;
-
-        when( graphFig.getShardMinDelta() ).thenReturn( delta );
-
-        returned = approximation.getMinTime();
-
-        expectedReturned = time - delta;
-
-        assertEquals( expectedReturned, returned );
-    }
+//    @Test
+//    public void minTime() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
+//
+//        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
+//
+//        final NodeShardApproximation nodeShardCounterSerialization = mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+//                        nodeShardCounterSerialization, timeService, graphFig, keyspace );
+//
+//
+//        final long timeservicetime = System.currentTimeMillis();
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//        final long expected = timeservicetime - 2 * graphFig.getShardCacheTimeout();
+//
+//        final long returned = approximation.getMinTime();
+//
+//        assertEquals( "Correct time was returned", expected, returned );
+//    }
+//
+//
+//    @Test
+//    public void noShards() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
+//
+//        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
+//
+//
+//        final NodeShardApproximation nodeShardCounterSerialization = mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+//                        nodeShardCounterSerialization, timeService, graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//        /**
+//         * Mock up returning an empty iterator, our audit shouldn't create a new shard
+//         */
+//        when( edgeShardSerialization
+//                .getShardMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),
+//                        same( type ), same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
+//
+//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
+//
+//        assertFalse( "No shard allocated", result );
+//    }
+//
+//
+//    @Test
+//    public void existingFutureShardSameTime() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
+//
+//        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
+//
+//        final NodeShardApproximation nodeShardCounterSerialization = mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+//                        nodeShardCounterSerialization, timeService, graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//
+//        final long timeservicetime = System.currentTimeMillis();
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//        final Shard futureShard = new Shard( 10000l, timeservicetime, compacted );
+//
+//        /**
+//         * Mock up returning a min shard, and a future shard
+//         */
+//        when( edgeShardSerialization
+//                .getShardMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
+//                        same( type ), same( subType ) ) ).thenReturn( Arrays.asList( futureShard ).iterator() );
+//
+//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
+//
+//        assertFalse( "No shard allocated", result );
+//    }
+//
+//
+//    @Test
+//    public void lowCountFutureShard() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
+//
+//        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
+//
+//        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+//                        nodeShardApproximation, timeService, graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//
+//        final long timeservicetime = System.currentTimeMillis();
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//
+//        /**
+//         * Mock up returning a min shard, and a future shard
+//         */
+//        when( edgeShardSerialization
+//                .getShardMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
+//                        same( type ), same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l, compacted ) ).iterator() );
+//
+//
+//        //return a shard size < our max by 1
+//
+//        final long count = graphFig.getShardSize() - 1;
+//
+//        when( nodeShardApproximation.getCount( scope, nodeId, NodeType.TARGET, 0l, type, subType ) )
+//                .thenReturn( count );
+//
+//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
+//
+//        assertFalse( "Shard allocated", result );
+//    }
+//
+//
+//    @Test
+//    public void equalCountFutureShard() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
+//
+//        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
+//
+//        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+//                        nodeShardApproximation, timeService, graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//
+//        final long timeservicetime = System.currentTimeMillis();
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//
+//        /**
+//         * Mock up returning a min shard
+//         */
+//        when( edgeShardSerialization
+//                .getShardMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),
+//                        same( type ), same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l, compacted ) ).iterator() );
+//
+//
+//        final long shardCount = graphFig.getShardSize();
+//
+//        //return a shard size equal to our max
+//        when( nodeShardApproximation.getCount( scope, nodeId, NodeType.SOURCE, 0l, type, subType ) )
+//                .thenReturn( shardCount );
+//
+//        ArgumentCaptor<Long> shardValue = ArgumentCaptor.forClass( Long.class );
+//        ArgumentCaptor<Long> timestampValue = ArgumentCaptor.forClass( Long.class );
+//
+//
+//        //mock up our mutation
+//        when( edgeShardSerialization
+//                .writeShardMeta( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), shardValue.capture(),
+//                        timestampValue.capture(), same( type ), same( subType ) ) )
+//                .thenReturn( mock( MutationBatch.class ) );
+//
+//
+//        final SimpleMarkedEdge returnedEdge =
+//                new SimpleMarkedEdge( nodeId, type, createId( "subType" ), 10005l, false );
+//        final Iterator<MarkedEdge> edgeIterator = Collections.singleton( ( MarkedEdge ) returnedEdge ).iterator();
+//
+//        //mock up returning the value
+//        when( shardedEdgeSerialization
+//                .getEdgesFromSourceByTargetType( same( edgeColumnFamilies ), same( scope ), any( SearchByIdType.class ),
+//                        any( Iterator.class ) ) ).thenReturn( edgeIterator );
+//
+//
+//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
+//
+//        assertTrue( "Shard allocated", result );
+//
+//        //check our new allocated UUID
+//
+//
+//        final long savedTimestamp = timestampValue.getValue();
+//
+//
+//        assertEquals( "Expected time service time", timeservicetime, savedTimestamp );
+//
+//
+//        //now check our max value was set
+//
+//        final long savedShardPivot = shardValue.getValue();
+//
+//        assertEquals( "Expected max value to be the same", returnedEdge.getTimestamp(), savedShardPivot );
+//    }
+//
+//
+//    @Test
+//    public void futureCountShardCleanup() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
+//
+//        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
+//
+//        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+//                        nodeShardApproximation, timeService, graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//
+//        /**
+//         * Use the time service to generate timestamps
+//         */
+//        final long timeservicetime = 10000;
+//
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//        assertTrue( "Shard cache mocked", graphFig.getShardCacheTimeout() > 0 );
+//
+//
+//        /**
+//         * Simulates clock drift when 2 nodes create future shards near one another
+//         */
+//        final long minDelta = graphFig.getShardMinDelta();
+//
+//
+//        final Shard minShard = new Shard( 0l, 0l, compacted );
+//
+//        //a shard that isn't our minimum, but exists after compaction
+//        final Shard compactedShard = new Shard( 5000, 1000, compacted );
+//
+//        /**
+//         * Simulate different node time allocation
+//         */
+//
+//        final long minTime = 10000;
+//        //our second shard is the "oldest", and hence should be returned in the iterator.  Future shard 1 and 3
+//        // should be removed
+//
+//        //this should get dropped, It's allocated after future shard2 even though the time is less
+//        final Shard futureShard1 = new Shard( 10000, minTime + minDelta, compacted );
+//
+//        //should get kept.
+//        final Shard futureShard2 = new Shard( 10005, minTime, compacted );
+//
+//        //should be removed
+//        final Shard futureShard3 = new Shard( 10010, minTime + minDelta / 2, compacted );
+//
+//        /**
+//         * Mock up returning a min shard
+//         */
+//        when( edgeShardSerialization
+//                .getShardMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
+//                        same( type ), same( subType ) ) ).thenReturn(
+//                Arrays.asList( futureShard3, futureShard2, futureShard1, compactedShard, minShard ).iterator() );
+//
+//
+//        ArgumentCaptor<Long> newLongValue = ArgumentCaptor.forClass( Long.class );
+//
+//
+//        //mock up our mutation
+//        when( edgeShardSerialization
+//                .removeShardMeta( same( scope ), same( nodeId ), eq( NodeType.TARGET ), newLongValue.capture(),
+//                        same( type ), same( subType ) ) ).thenReturn( mock( MutationBatch.class ) );
+//
+//
+//        final Iterator<ShardEntryGroup> result =
+//                approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
+//
+//
+//        assertTrue( "Shards present", result.hasNext() );
+//
+//
+//        ShardEntryGroup shardEntryGroup = result.next();
+//
+//        assertEquals( "Future shard returned", futureShard1, shardEntryGroup.getCompactionTarget() );
+//
+//
+//        //now verify all 4 are in this group.  This is because the first shard (0,0) (n-1_ may be the only shard other
+//        //nodes see while we're rolling our state.  This means it should be read and merged from as well
+//
+//        Collection<Shard> writeShards = shardEntryGroup.getWriteShards();
+//
+//        assertEquals( "Shard size as expected", 4, writeShards.size() );
+//
+//        assertTrue( writeShards.contains( futureShard1 ) );
+//        assertTrue( writeShards.contains( futureShard2 ) );
+//        assertTrue( writeShards.contains( futureShard3 ) );
+//        assertTrue( writeShards.contains( compactedShard ) );
+//
+//
+//        Collection<Shard> readShards = shardEntryGroup.getReadShards( minTime + minDelta );
+//
+//        assertEquals( "Shard size as expected", 4, readShards.size() );
+//
+//        assertTrue( readShards.contains( futureShard1 ) );
+//        assertTrue( readShards.contains( futureShard2 ) );
+//        assertTrue( readShards.contains( futureShard3 ) );
+//        assertTrue( readShards.contains( compactedShard ) );
+//
+//
+//        assertTrue( "Shards present", result.hasNext() );
+//
+//        shardEntryGroup = result.next();
+//
+//        writeShards = shardEntryGroup.getWriteShards();
+//
+//
+//        assertTrue( "Previous shard present", writeShards.contains( minShard ) );
+//
+//
+//        writeShards = shardEntryGroup.getReadShards( minTime + minDelta );
+//
+//
+//        assertTrue( "Previous shard present", writeShards.contains( minShard ) );
+//
+//
+//        assertFalse( "No shards left", result.hasNext() );
+//    }
+//
+//
+//    @Test
+//    public void noShardsReturns() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
+//
+//        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
+//
+//        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        when( timeService.getCurrentTime() ).thenReturn( 10000l );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+//                        nodeShardApproximation, timeService, graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//        /**
+//         * Mock up returning an empty iterator, our audit shouldn't create a new shard
+//         */
+//        when( edgeShardSerialization
+//                .getShardMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
+//                        same( type ), same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
+//
+//        final Iterator<ShardEntryGroup> result =
+//                approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
+//
+//
+//        ShardEntryGroup shardEntryGroup = result.next();
+//
+//        final Shard expected = new Shard( 0, 0, compacted );
+//
+//        assertEquals( "Future shard returned", expected, shardEntryGroup.getCompactionTarget() );
+//
+//
+//        //now verify all 4 are in this group.  This is because the first shard (0,0) (n-1_ may be the only shard other
+//        //nodes see while we're rolling our state.  This means it should be read and merged from as well
+//
+//        Collection<Shard> writeShards = shardEntryGroup.getWriteShards();
+//
+//        Collection<Shard> readShards = shardEntryGroup.getReadShards( 10000l );
+//
+//
+//        assertTrue( "0 shard allocated", writeShards.contains( expected ) );
+//
+//        assertTrue( "0 shard allocated", readShards.contains( expected ) );
+//
+//
+//        assertFalse( "No shard allocated", result.hasNext() );
+//    }
+//
+//
+//    @Test
+//    public void invalidConfiguration() {
+//
+//        final GraphFig graphFig = mock( GraphFig.class );
+//
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
+//
+//        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
+//
+//        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
+//
+//
+//        /**
+//         * Return 100000 milliseconds
+//         */
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final long time = 100000l;
+//
+//        when( timeService.getCurrentTime() ).thenReturn( time );
+//
+//
+//        final long cacheTimeout = 30000l;
+//
+//        when( graphFig.getShardCacheTimeout() ).thenReturn( 30000l );
+//
+//
+//        final long tooSmallDelta = ( long ) ( ( cacheTimeout * 2 ) * .99 );
+//
+//        when( graphFig.getShardMinDelta() ).thenReturn( tooSmallDelta );
+//
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+//                        nodeShardApproximation, timeService, graphFig, keyspace );
+//
+//
+//        /**
+//         * Should throw an exception
+//         */
+//        try {
+//            approximation.getMinTime();
+//            fail( "Should have thrown a GraphRuntimeException" );
+//        }
+//        catch ( GraphRuntimeException gre ) {
+//            //swallow
+//        }
+//
+//        //now test something that passes.
+//
+//        final long minDelta = cacheTimeout * 2;
+//
+//        when( graphFig.getShardMinDelta() ).thenReturn( minDelta );
+//
+//        long returned = approximation.getMinTime();
+//
+//        long expectedReturned = time - minDelta;
+//
+//        assertEquals( expectedReturned, returned );
+//
+//        final long delta = cacheTimeout * 4;
+//
+//        when( graphFig.getShardMinDelta() ).thenReturn( delta );
+//
+//        returned = approximation.getMinTime();
+//
+//        expectedReturned = time - delta;
+//
+//        assertEquals( expectedReturned, returned );
+//    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/cd043b14/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
index 4e23d83..4df84f5 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
@@ -21,8 +21,9 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard;
 
 import org.junit.Test;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 
@@ -36,7 +37,7 @@ public class ShardEntryGroupTest {
 
         final long delta = 10000;
 
-        Shard rootShard = new Shard( 0, 0 );
+        Shard rootShard = new Shard( 0, 0, false );
 
         ShardEntryGroup shardEntryGroup = new ShardEntryGroup( delta );
 
@@ -46,9 +47,9 @@ public class ShardEntryGroupTest {
 
         assertFalse( "Single shard cannot be deleted", shardEntryGroup.canBeDeleted( rootShard ) );
 
-        assertSame( "Same shard for merge target", rootShard, shardEntryGroup.getMergeTarget() );
+        assertNull( "No merge target found", shardEntryGroup.getCompactionTarget() );
 
-        assertFalse( "Merge cannot be run with a single shard", shardEntryGroup.needsCompaction( 0 ) );
+        assertFalse( "Merge cannot be run with a single shard", shardEntryGroup.shouldCompact( Long.MAX_VALUE ) );
     }
 
 
@@ -57,22 +58,130 @@ public class ShardEntryGroupTest {
 
         final long delta = 10000;
 
-        Shard firstShard = new Shard( 1000, 1000 );
+        Shard firstShard = new Shard( 1000, 1000, false );
 
-        Shard secondShard = new Shard( 1000, 1000 );
+        Shard secondShard = new Shard( 1000, 1001, false );
 
 
         ShardEntryGroup shardEntryGroup = new ShardEntryGroup( delta );
 
-        final boolean result = shardEntryGroup.addShard( rootShard );
+        boolean result = shardEntryGroup.addShard( firstShard );
 
         assertTrue( "Shard added", result );
 
-        assertFalse( "Single shard cannot be deleted", shardEntryGroup.canBeDeleted( rootShard ) );
+        result = shardEntryGroup.addShard( secondShard );
+
+        assertTrue( " Shard added", result );
+
+
+        assertFalse( "Root shard cannot be deleted", shardEntryGroup.canBeDeleted( firstShard ) );
+
+        assertFalse( "Root shard cannot be deleted", shardEntryGroup.canBeDeleted( secondShard ) );
+
+        assertFalse( "Duplicate shard id cannot be deleted", shardEntryGroup.canBeDeleted( secondShard ) );
+
+        assertEquals( "Same shard for merge target", firstShard, shardEntryGroup.getCompactionTarget() );
+
+        //shouldn't return true, since we haven't passed delta time in the second shard
+        assertFalse( "Merge cannot be run within min time",
+                shardEntryGroup.shouldCompact( firstShard.getCreatedTime() + delta ) );
+
+        //shouldn't return true, since we haven't passed delta time in the second shard
+        assertFalse( "Merge cannot be run within min time",
+                shardEntryGroup.shouldCompact( secondShard.getCreatedTime() + delta ) );
+
+        assertTrue( "Merge should be run with after min time",
+                shardEntryGroup.shouldCompact( secondShard.getCreatedTime() + delta + 1 ) );
+    }
+
+
+    @Test
+    public void multipleShardGroups() {
+
+        final long delta = 10000;
+
+        Shard firstShard = new Shard( 1000, 10000, false );
+
+        Shard secondShard = new Shard( 999, 9000, false );
+
+        Shard compactedShard1 = new Shard( 900, 8000, true );
+
+        Shard compactedShard2 = new Shard( 800, 7000, true );
+
+
+        ShardEntryGroup shardEntryGroup = new ShardEntryGroup( delta );
+
+        boolean result = shardEntryGroup.addShard( firstShard );
+
+        assertTrue( "Shard added", result );
+
+        result = shardEntryGroup.addShard( secondShard );
+
+        assertTrue( " Shard added", result );
+
+        result = shardEntryGroup.addShard( compactedShard1 );
+
+        assertTrue( "Shard added", result );
+
+        result = shardEntryGroup.addShard( compactedShard2 );
+
+        assertFalse( "Shouldn't add since it's compacted", result );
+
+        ShardEntryGroup secondGroup = new ShardEntryGroup( delta );
+
+        result = secondGroup.addShard( compactedShard2 );
+
+        assertTrue( "Added successfully", result );
+    }
+
+
+    @Test
+    public void boundShardGroup() {
+
+        final long delta = 10000;
+
+        Shard firstShard = new Shard( 1000, 10000, false );
+
+        Shard secondShard = new Shard( 999, 9000, false );
+
+        Shard compactedShard1 = new Shard( 900, 8000, true );
+
+
+        ShardEntryGroup shardEntryGroup = new ShardEntryGroup( delta );
+
+        boolean result = shardEntryGroup.addShard( firstShard );
+
+        assertTrue( "Shard added", result );
+
+        result = shardEntryGroup.addShard( secondShard );
+
+        assertTrue( " Shard added", result );
+
+        result = shardEntryGroup.addShard( compactedShard1 );
+
+        assertTrue( "Shard added", result );
+
+
+
+        assertFalse( "Shard cannot be deleted", shardEntryGroup.canBeDeleted( firstShard ) );
+
+        assertFalse( "Root shard cannot be deleted", shardEntryGroup.canBeDeleted( secondShard ) );
+
+        assertFalse( "Duplicate shard id cannot be deleted", shardEntryGroup.canBeDeleted( secondShard ) );
+
+        assertEquals( "Same shard for merge target", firstShard, shardEntryGroup.getCompactionTarget() );
+
+        //shouldn't return true, since we haven't passed delta time in the second shard
+        assertFalse( "Merge cannot be run within min time",
+                shardEntryGroup.shouldCompact( firstShard.getCreatedTime() + delta ) );
+
+        //shouldn't return true, since we haven't passed delta time in the second shard
+        assertFalse( "Merge cannot be run within min time",
+                shardEntryGroup.shouldCompact( secondShard.getCreatedTime() + delta ) );
 
-        assertSame( "Same shard for merge target", rootShard, shardEntryGroup.getMergeTarget() );
+        assertTrue( "Merge should be run with after min time",
+                shardEntryGroup.shouldCompact( secondShard.getCreatedTime() + delta + 1 ) );
 
-        assertFalse( "Merge cannot be run with a single shard", shardEntryGroup.needsCompaction( 0 ) );
     }
 }
 


[06/11] Finished refactor. Need to continue on shard allocation

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKey.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKey.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKey.java
deleted file mode 100644
index 12b1d7c..0000000
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKey.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
-
-
-import org.apache.usergrid.persistence.model.entity.Id;
-
-
-/**
- * Used to store row keys by sourceId, targetId and edgeType
- */
-public class EdgeRowKey {
-    public final Id nodeId;
-    public final String[] edgeTypes;
-
-
-    public EdgeRowKey( final Id nodeId, final String[] edgeTypes ) {
-        this.nodeId = nodeId;
-        this.edgeTypes = edgeTypes;
-    }
-
-
-}
-
-
-

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java
index be97f95..90b264c 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeRowKeySerializer.java
@@ -21,8 +21,8 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
 
 import org.apache.usergrid.persistence.core.astyanax.CompositeFieldSerializer;
 import org.apache.usergrid.persistence.core.astyanax.IdRowCompositeSerializer;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeRowKey;
 import org.apache.usergrid.persistence.model.entity.Id;
-import org.apache.usergrid.persistence.model.entity.SimpleId;
 
 import com.netflix.astyanax.model.CompositeBuilder;
 import com.netflix.astyanax.model.CompositeParser;
@@ -31,9 +31,8 @@ import com.netflix.astyanax.model.CompositeParser;
 /**
  * Class to perform serialization for row keys from edges
  */
-public class EdgeRowKeySerializer implements CompositeFieldSerializer<EdgeRowKey> {
 
-    private static final EdgeRowKeySerializer INSTANCE = new EdgeRowKeySerializer();
+public class EdgeRowKeySerializer implements CompositeFieldSerializer<EdgeRowKey> {
 
     private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
 
@@ -42,13 +41,10 @@ public class EdgeRowKeySerializer implements CompositeFieldSerializer<EdgeRowKey
     public void toComposite( final CompositeBuilder builder, final EdgeRowKey key ) {
 
         //add the row id to the composite
-        ID_SER.toComposite( builder, key.nodeId );
-
-        builder.addInteger( key.edgeTypes.length );
-
-        for(String type: key.edgeTypes){
-            builder.addString( type );
-        }
+        ID_SER.toComposite( builder, key.sourceId );
+        builder.addString( key.edgeType );
+        ID_SER.toComposite( builder, key.targetId );
+        builder.addLong( key.shardId );
     }
 
 
@@ -56,26 +52,12 @@ public class EdgeRowKeySerializer implements CompositeFieldSerializer<EdgeRowKey
     public EdgeRowKey fromComposite( final CompositeParser composite ) {
 
         final Id sourceId = ID_SER.fromComposite( composite );
+        final String edgeType = composite.readString();
+        final Id targetId = ID_SER.fromComposite( composite );
+        final long shard = composite.readLong();
 
-
-        final int length = composite.readInteger();
-
-        String[] types = new String[length];
-
-        for(int i = 0; i < length; i++){
-            types[i] = composite.readString();
-        }
-
-        return new EdgeRowKey( sourceId, types );
-
+        return new EdgeRowKey( sourceId, edgeType, targetId, shard );
     }
 
 
-
-    /**
-     * Get the singleton serializer
-     */
-    public static EdgeRowKeySerializer get() {
-        return INSTANCE;
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
new file mode 100644
index 0000000..3644210
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
@@ -0,0 +1,124 @@
+package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
+
+
+import java.util.Iterator;
+
+import org.apache.usergrid.persistence.core.astyanax.ColumnParser;
+import org.apache.usergrid.persistence.core.astyanax.ScopedRowKey;
+import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+import org.apache.usergrid.persistence.graph.Edge;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+
+import com.google.common.base.Optional;
+import com.netflix.astyanax.Serializer;
+import com.netflix.astyanax.model.Column;
+import com.netflix.astyanax.util.RangeBuilder;
+
+
+/**
+ * Searcher to be used when performing the search.  Performs I/O transformation as well as parsing for the iterator. If
+ * there are more row keys available to seek, the iterator will return true
+ *
+ * @param <R> The row type
+ * @param <C> The column type
+ * @param <T> The parsed return type
+ */
+public abstract class EdgeSearcher<R, C, T> implements ColumnParser<C, T>, Iterator<ScopedRowKey<ApplicationScope, R>> {
+
+    protected final Optional<Edge> last;
+    protected final long maxTimestamp;
+    protected final ApplicationScope scope;
+    protected final Iterator<ShardEntries> shards;
+
+
+    protected EdgeSearcher( final ApplicationScope scope, final long maxTimestamp, final Optional<Edge> last,
+                            final Iterator<ShardEntries> shards ) {
+        this.scope = scope;
+        this.maxTimestamp = maxTimestamp;
+        this.last = last;
+        this.shards = shards;
+    }
+
+
+    @Override
+    public boolean hasNext() {
+        return shards.hasNext();
+    }
+
+
+    @Override
+    public ScopedRowKey<ApplicationScope, R> next() {
+        /**
+         * Todo, multi scan
+         */
+        return ScopedRowKey
+                .fromKey( scope, generateRowKey( shards.next().getEntries().iterator().next().getShardIndex() ) );
+    }
+
+
+    @Override
+    public void remove() {
+        throw new UnsupportedOperationException( "Remove is unsupported" );
+    }
+
+
+    /**
+     * Set the range on a search
+     */
+    public void setRange( final RangeBuilder builder ) {
+
+        //set our start range since it was supplied to us
+        if ( last.isPresent() ) {
+            C sourceEdge = getStartColumn( last.get() );
+
+
+            builder.setStart( sourceEdge, getSerializer() );
+        }
+        else {
+
+
+        }
+    }
+
+
+    public boolean hasPage() {
+        return last.isPresent();
+    }
+
+
+    @Override
+    public T parseColumn( final Column<C> column ) {
+        final C edge = column.getName();
+
+        return createEdge( edge, column.getBooleanValue() );
+    }
+
+
+    /**
+     * Get the column's serializer
+     */
+    protected abstract Serializer<C> getSerializer();
+
+
+    /**
+     * Create a row key for this search to use
+     *
+     * @param shard The shard to use in the row key
+     */
+    protected abstract R generateRowKey( final long shard );
+
+
+    /**
+     * Set the start column to begin searching from.  The last is provided
+     */
+    protected abstract C getStartColumn( final Edge last );
+
+
+    /**
+     * Create an edge to return to the user based on the directed edge provided
+     *
+     * @param column The column name
+     * @param marked The marked flag in the column value
+     */
+    protected abstract T createEdge( final C column, final boolean marked );
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSerializer.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSerializer.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSerializer.java
new file mode 100644
index 0000000..d93f679
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSerializer.java
@@ -0,0 +1,77 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+
+package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
+
+
+import java.nio.ByteBuffer;
+
+import org.apache.usergrid.persistence.core.astyanax.IdColDynamicCompositeSerializer;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.DirectedEdge;
+import org.apache.usergrid.persistence.model.entity.Id;
+
+import com.google.common.base.Preconditions;
+import com.netflix.astyanax.model.DynamicComposite;
+import com.netflix.astyanax.serializers.AbstractSerializer;
+import com.netflix.astyanax.serializers.LongSerializer;
+
+
+/**
+ * Serializes to a source->target edge Note that we cannot set the edge type on de-serialization.  Only the target
+ * Id and version.
+ */
+public class EdgeSerializer extends AbstractSerializer<DirectedEdge> {
+
+    private static final IdColDynamicCompositeSerializer ID_COL_SERIALIZER = IdColDynamicCompositeSerializer.get();
+    private static final LongSerializer LONG_SERIALIZER = LongSerializer.get();
+
+
+    @Override
+    public ByteBuffer toByteBuffer( final DirectedEdge edge ) {
+
+        DynamicComposite composite = new DynamicComposite();
+
+        composite.addComponent( edge.timestamp, LONG_SERIALIZER );
+
+        ID_COL_SERIALIZER.toComposite( composite, edge.id );
+
+        return composite.serialize();
+    }
+
+
+    @Override
+    public DirectedEdge fromByteBuffer( final ByteBuffer byteBuffer ) {
+        DynamicComposite composite = DynamicComposite.fromByteBuffer( byteBuffer );
+
+        Preconditions.checkArgument( composite.size() == 3, "Composite should have 3 elements" );
+
+
+        //return the version
+        final long timestamp = composite.get( 0, LONG_SERIALIZER );
+
+
+        //parse our id
+        final Id id = ID_COL_SERIALIZER.fromComposite( composite, 1 );
+
+
+        return new DirectedEdge( id, timestamp );
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
index 5b7c901..745e02a 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeShardSerializationImpl.java
@@ -66,7 +66,8 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
      */
     private static final MultiTennantColumnFamily<ApplicationScope, DirectedRowKey, Long> EDGE_SHARDS =
             new MultiTennantColumnFamily<>( "Edge_Shards",
-                    new OrganizationScopedRowKeySerializer<>( new DirectedEdgeRowKeySerializer() ), LongSerializer.get() );
+                    new OrganizationScopedRowKeySerializer<>( new DirectedEdgeRowKeySerializer() ),
+                    LongSerializer.get() );
 
 
     private static final byte HOLDER = 0x00;
@@ -239,8 +240,6 @@ public class EdgeShardSerializationImpl implements EdgeShardSerialization {
 
             return NodeType.TARGET;
         }
-
-
     }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
index c0a44a9..20cb921 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
@@ -32,6 +32,7 @@ import org.apache.usergrid.persistence.core.consistency.TimeService;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.exception.GraphRuntimeException;
+import org.apache.usergrid.persistence.graph.serialization.EdgeSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardAllocation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
@@ -55,17 +56,21 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
     private static final MinShardTimeComparator MIN_SHARD_TIME_COMPARATOR = new MinShardTimeComparator();
 
     private final EdgeShardSerialization edgeShardSerialization;
-//    private final NodeShardCounterSerialization edgeShardCounterSerialization;
+    private final EdgeSerialization edgeSerialization;
+    //    private final NodeShardCounterSerialization edgeShardCounterSerialization;
     private final NodeShardApproximation nodeShardApproximation;
     private final TimeService timeService;
     private final GraphFig graphFig;
     private final Keyspace keyspace;
 
+
     @Inject
     public NodeShardAllocationImpl( final EdgeShardSerialization edgeShardSerialization,
+                                    final EdgeSerialization edgeSerialization,
                                     final NodeShardApproximation nodeShardApproximation, final TimeService timeService,
                                     final GraphFig graphFig, final Keyspace keyspace ) {
         this.edgeShardSerialization = edgeShardSerialization;
+        this.edgeSerialization = edgeSerialization;
         this.nodeShardApproximation = nodeShardApproximation;
         this.timeService = timeService;
         this.graphFig = graphFig;
@@ -74,11 +79,11 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
     @Override
-    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final Optional<Shard> maxShardId,
-                                            final String... edgeTypes ) {
+    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                      final Optional<Shard> maxShardId, final String... edgeTypes ) {
 
         final Iterator<Shard> existingShards =
-                edgeShardSerialization.getEdgeMetaData( scope, nodeId,nodeType, maxShardId, edgeTypes );
+                edgeShardSerialization.getEdgeMetaData( scope, nodeId, nodeType, maxShardId, edgeTypes );
 
         final PushbackIterator<Shard> pushbackIterator = new PushbackIterator( existingShards );
 
@@ -95,7 +100,7 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
             final Shard shard = pushbackIterator.next();
 
             //we're done, our current time uuid is greater than the value stored
-            if ( shard.getCreatedTime() < minConflictTime  ) {
+            if ( shard.getCreatedTime() < minConflictTime ) {
                 //push it back into the iterator
                 pushbackIterator.pushback( shard );
                 break;
@@ -106,7 +111,7 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
         //clean up our future
-        Collections.sort(futures, MIN_SHARD_TIME_COMPARATOR);
+        Collections.sort( futures, MIN_SHARD_TIME_COMPARATOR );
 
 
         //we have more than 1 future value, we need to remove it
@@ -114,10 +119,11 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
         MutationBatch cleanup = keyspace.prepareMutationBatch();
 
         //remove all futures except the last one, it is the only value we shouldn't lazy remove
-        for ( int i = 1; i < futures.size() ; i++ ) {
+        for ( int i = 1; i < futures.size(); i++ ) {
             final Shard toRemove = futures.get( i );
 
-            final MutationBatch batch = edgeShardSerialization.removeEdgeMeta( scope, nodeId, nodeType, toRemove.getShardIndex(), edgeTypes );
+            final MutationBatch batch = edgeShardSerialization
+                    .removeEdgeMeta( scope, nodeId, nodeType, toRemove.getShardIndex(), edgeTypes );
 
             cleanup.mergeShallow( batch );
         }
@@ -131,7 +137,7 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
         }
 
 
-        final int futuresSize =  futures.size();
+        final int futuresSize = futures.size();
 
         if ( futuresSize > 0 ) {
             pushbackIterator.pushback( futures.get( 0 ) );
@@ -141,8 +147,8 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
         /**
          * Nothing to iterate, return an iterator with 0.
          */
-        if(!pushbackIterator.hasNext()){
-            pushbackIterator.pushback( new Shard(0l, 0l) );
+        if ( !pushbackIterator.hasNext() ) {
+            pushbackIterator.pushback( new Shard( 0l, 0l ) );
         }
 
         return pushbackIterator;
@@ -150,7 +156,8 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
     @Override
-    public boolean auditMaxShard( final ApplicationScope scope, final Id nodeId,final NodeType nodeType,  final String... edgeType ) {
+    public boolean auditMaxShard( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                  final String... edgeType ) {
 
         final Iterator<Shard> maxShards = getShards( scope, nodeId, nodeType, Optional.<Shard>absent(), edgeType );
 
@@ -169,23 +176,32 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
          */
 
 
-        final long count = nodeShardApproximation.getCount( scope, nodeId, nodeType,  maxShard.getShardIndex(), edgeType );
+        final long count =
+                nodeShardApproximation.getCount( scope, nodeId, nodeType, maxShard.getShardIndex(), edgeType );
 
         if ( count < graphFig.getShardSize() ) {
             return false;
         }
 
+
+        /**
+         * TODO, use the EdgeShardStrategy and ShardEdgeSerialization to audit this shard
+         */
+
+        //get the max edge, in this shard, and write it.
+
+
         //try to get a lock here, and fail if one isn't present
 
-//        final long newShardTime = timeService.getCurrentTime() + graphFig.getShardCacheTimeout() * 2;
-//
-//
-//        try {
-//            this.edgeShardSerialization.writeEdgeMeta( scope, nodeId, newShardTime, edgeType ).execute();
-//        }
-//        catch ( ConnectionException e ) {
-//            throw new GraphRuntimeException( "Unable to write the new edge metadata" );
-//        }
+        //        final long newShardTime = timeService.getCurrentTime() + graphFig.getShardCacheTimeout() * 2;
+        //
+        //
+        //        try {
+        //            this.edgeShardSerialization.writeEdgeMeta( scope, nodeId, newShardTime, edgeType ).execute();
+        //        }
+        //        catch ( ConnectionException e ) {
+        //            throw new GraphRuntimeException( "Unable to write the new edge metadata" );
+        //        }
 
 
         return true;
@@ -194,7 +210,7 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
     @Override
     public long getMinTime() {
-        return timeService.getCurrentTime() - (2 * graphFig.getShardCacheTimeout());
+        return timeService.getCurrentTime() - ( 2 * graphFig.getShardCacheTimeout() );
     }
 
 
@@ -203,16 +219,15 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
      */
     private static final class MinShardTimeComparator implements Comparator<Shard> {
 
-            @Override
-            public int compare( final Shard s1, final Shard s2 ) {
-                int result =  Long.compare( s1.getCreatedTime(), s2.getCreatedTime() );
-
-                if(result == 0){
-                    result = Long.compare( s1.getShardIndex(), s2.getShardIndex() );
-                }
+        @Override
+        public int compare( final Shard s1, final Shard s2 ) {
+            int result = Long.compare( s1.getCreatedTime(), s2.getCreatedTime() );
 
-                return result;
+            if ( result == 0 ) {
+                result = Long.compare( s1.getShardIndex(), s2.getShardIndex() );
             }
-        }
 
+            return result;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
index a0bb0ef..73e8d4f 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
@@ -47,7 +47,6 @@ import com.google.common.cache.LoadingCache;
 import com.google.inject.Inject;
 
 
-
 /**
  * Simple implementation of the shard.  Uses a local Guava shard with a timeout.  If a value is not present in the
  * shard, it will need to be searched via cassandra.
@@ -67,8 +66,8 @@ public class NodeShardCacheImpl implements NodeShardCache {
      */
     @Inject
     public NodeShardCacheImpl( final NodeShardAllocation nodeShardAllocation, final GraphFig graphFig ) {
-        Preconditions.checkNotNull(nodeShardAllocation, "nodeShardAllocation is required");
-        Preconditions.checkNotNull(graphFig, "consistencyFig is required");
+        Preconditions.checkNotNull( nodeShardAllocation, "nodeShardAllocation is required" );
+        Preconditions.checkNotNull( graphFig, "consistencyFig is required" );
 
         this.nodeShardAllocation = nodeShardAllocation;
         this.graphFig = graphFig;
@@ -81,7 +80,8 @@ public class NodeShardCacheImpl implements NodeShardCache {
             public void propertyChange( final PropertyChangeEvent evt ) {
                 final String propertyName = evt.getPropertyName();
 
-                if ( propertyName.equals( GraphFig.SHARD_CACHE_SIZE ) || propertyName.equals( GraphFig.SHARD_CACHE_TIMEOUT ) ) {
+                if ( propertyName.equals( GraphFig.SHARD_CACHE_SIZE ) || propertyName
+                        .equals( GraphFig.SHARD_CACHE_TIMEOUT ) ) {
                     updateCache();
                 }
             }
@@ -95,8 +95,8 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
     @Override
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long timestamp,
-                               final String... edgeType ) {
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                        final long timestamp, final String... edgeType ) {
 
 
         final CacheKey key = new CacheKey( scope, nodeId, nodeType, edgeType );
@@ -121,27 +121,28 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
     @Override
-    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long maxTimestamp,
-                                     final String... edgeType ) {
+    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                                 final long maxTimestamp, final String... edgeType ) {
         final CacheKey key = new CacheKey( scope, nodeId, nodeType, edgeType );
-              CacheEntry entry;
+        CacheEntry entry;
 
-              try {
-                  entry = this.graphs.get( key );
-              }
-              catch ( ExecutionException e ) {
-                  throw new GraphRuntimeException( "Unable to load shard key for graph", e );
-              }
+        try {
+            entry = this.graphs.get( key );
+        }
+        catch ( ExecutionException e ) {
+            throw new GraphRuntimeException( "Unable to load shard key for graph", e );
+        }
 
         Iterator<ShardEntries> iterator = entry.getShards( maxTimestamp );
 
-        if(iterator == null){
+        if ( iterator == null ) {
             return Collections.<ShardEntries>emptyList().iterator();
         }
 
         return iterator;
     }
 
+
     /**
      * This is a race condition.  We could re-init the shard while another thread is reading it.  This is fine, the read
      * doesn't have to be precise.  The algorithm accounts for stale data.
@@ -149,27 +150,31 @@ public class NodeShardCacheImpl implements NodeShardCache {
     private void updateCache() {
 
         this.graphs = CacheBuilder.newBuilder().maximumSize( graphFig.getShardCacheSize() )
-                  .expireAfterWrite( graphFig.getShardCacheSize(), TimeUnit.MILLISECONDS )
-                  .build( new CacheLoader<CacheKey, CacheEntry>() {
+                                  .expireAfterWrite( graphFig.getShardCacheSize(), TimeUnit.MILLISECONDS )
+                                  .build( new CacheLoader<CacheKey, CacheEntry>() {
 
 
-                      @Override
-                      public CacheEntry load( final CacheKey key ) throws Exception {
+                                      @Override
+                                      public CacheEntry load( final CacheKey key ) throws Exception {
 
-//
-//                          /**
-//                           * Perform an audit in case we need to allocate a new shard
-//                           */
-//                          nodeShardAllocation.auditMaxShard( key.scope, key.id, key.types );
-//                          //TODO, we need to put some sort of upper bounds on this, it could possibly get too large
+                                          //
+                                          //                          /**
+                                          //                           * Perform an audit in case we need to allocate
+                                          // a new shard
+                                          //                           */
+                                          //                          nodeShardAllocation.auditMaxShard( key.scope,
+                                          // key.id, key.types );
+                                          //                          //TODO, we need to put some sort of upper
+                                          // bounds on this, it could possibly get too large
 
 
-                          final Iterator<Shard> edges = nodeShardAllocation
-                                  .getShards( key.scope, key.id, key.nodeType,  Optional.<Shard>absent(), key.types );
+                                          final Iterator<Shard> edges = nodeShardAllocation
+                                                  .getShards( key.scope, key.id, key.nodeType, Optional.<Shard>absent(),
+                                                          key.types );
 
-                          return new CacheEntry( edges );
-                      }
-                  } );
+                                          return new CacheEntry( edges );
+                                      }
+                                  } );
     }
 
 
@@ -241,7 +246,7 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
         private CacheEntry( final Iterator<Shard> shards ) {
-            this.shards = new TreeSet<>( );
+            this.shards = new TreeSet<>();
 
             for ( Shard shard : IterableUtil.wrap( shards ) ) {
                 this.shards.add( shard );
@@ -254,23 +259,21 @@ public class NodeShardCacheImpl implements NodeShardCache {
          */
         public ShardEntries getShardId( final Long seek ) {
             return bootstrapEntry();
-//            return this.shards.floor( seek );
+            //            return this.shards.floor( seek );
         }
 
 
         /**
          * Get all shards <= this one in decending order
-         * @return
          */
-        public Iterator<ShardEntries> getShards( final Long maxShard ){
-            return  Collections.singleton(bootstrapEntry() ).iterator();
-//            return this.shards.headSet(maxShard, true  ).descendingIterator();
+        public Iterator<ShardEntries> getShards( final Long maxShard ) {
+            return Collections.singleton( bootstrapEntry() ).iterator();
+            //            return this.shards.headSet(maxShard, true  ).descendingIterator();
         }
 
-        private ShardEntries bootstrapEntry(){
-            return new ShardEntries( Collections.singleton( new Shard(0l, 0l) ) );
+
+        private ShardEntries bootstrapEntry() {
+            return new ShardEntries( Collections.singleton( new Shard( 0l, 0l ) ) );
         }
     }
-
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/RowSerializer.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/RowSerializer.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/RowSerializer.java
new file mode 100644
index 0000000..f901699
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/RowSerializer.java
@@ -0,0 +1,64 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+
+package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
+
+
+import org.apache.usergrid.persistence.core.astyanax.CompositeFieldSerializer;
+import org.apache.usergrid.persistence.core.astyanax.IdRowCompositeSerializer;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.RowKey;
+import org.apache.usergrid.persistence.model.entity.Id;
+
+import com.netflix.astyanax.model.CompositeBuilder;
+import com.netflix.astyanax.model.CompositeParser;
+
+
+/**
+ * Class to perform serialization for row keys from edges
+ */
+public class RowSerializer implements CompositeFieldSerializer<RowKey> {
+
+    private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
+
+
+    @Override
+    public void toComposite( final CompositeBuilder builder, final RowKey key ) {
+
+        //add the row id to the composite
+        ID_SER.toComposite( builder, key.nodeId );
+
+        builder.addLong( key.hash[0] );
+        builder.addLong( key.hash[1] );
+        builder.addLong( key.shardId );
+    }
+
+
+    @Override
+    public RowKey fromComposite( final CompositeParser composite ) {
+
+        final Id id = ID_SER.fromComposite( composite );
+        final long[] hash = new long[] { composite.readLong(), composite.readLong() };
+        final long shard = composite.readLong();
+
+
+        return new RowKey( id, hash, shard );
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/RowTypeSerializer.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/RowTypeSerializer.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/RowTypeSerializer.java
new file mode 100644
index 0000000..6591d72
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/RowTypeSerializer.java
@@ -0,0 +1,60 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+
+package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
+
+
+import org.apache.usergrid.persistence.core.astyanax.CompositeFieldSerializer;
+import org.apache.usergrid.persistence.core.astyanax.IdRowCompositeSerializer;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.RowKeyType;
+import org.apache.usergrid.persistence.model.entity.Id;
+
+import com.netflix.astyanax.model.CompositeBuilder;
+import com.netflix.astyanax.model.CompositeParser;
+
+
+public class RowTypeSerializer implements CompositeFieldSerializer<RowKeyType> {
+
+    private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
+
+
+    @Override
+    public void toComposite( final CompositeBuilder builder, final RowKeyType keyType ) {
+
+        //add the row id to the composite
+        ID_SER.toComposite( builder, keyType.nodeId );
+
+        builder.addLong( keyType.hash[0] );
+        builder.addLong( keyType.hash[1] );
+        builder.addLong( keyType.shardId );
+    }
+
+
+    @Override
+    public RowKeyType fromComposite( final CompositeParser composite ) {
+
+        final Id id = ID_SER.fromComposite( composite );
+        final long[] hash = new long[] { composite.readLong(), composite.readLong() };
+        final long shard = composite.readLong();
+
+        return new RowKeyType( id, hash, shard );
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardRowIterator.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardRowIterator.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardRowIterator.java
new file mode 100644
index 0000000..1da85e1
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardRowIterator.java
@@ -0,0 +1,112 @@
+package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
+
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.usergrid.persistence.core.astyanax.ColumnNameIterator;
+import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
+import org.apache.usergrid.persistence.core.astyanax.ScopedRowKey;
+import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+
+import com.netflix.astyanax.Keyspace;
+import com.netflix.astyanax.model.ConsistencyLevel;
+import com.netflix.astyanax.query.RowQuery;
+import com.netflix.astyanax.util.RangeBuilder;
+
+
+/**
+ * Internal iterator to iterate over multiple row keys
+ *
+ * @param <R> The row type
+ * @param <C> The column type
+ * @param <T> The parsed return type
+ */
+public class ShardRowIterator<R, C, T> implements Iterator<T> {
+
+    private final EdgeSearcher<R, C, T> searcher;
+
+    private final MultiTennantColumnFamily<ApplicationScope, R, C> cf;
+
+    private Iterator<T> currentColumnIterator;
+
+    private final Keyspace keyspace;
+
+    private final int pageSize;
+
+    private final ConsistencyLevel consistencyLevel;
+
+
+    public ShardRowIterator( final EdgeSearcher<R, C, T> searcher,
+                             final MultiTennantColumnFamily<ApplicationScope, R, C> cf, final Keyspace keyspace,
+                             final ConsistencyLevel consistencyLevel, final int pageSize ) {
+        this.searcher = searcher;
+        this.cf = cf;
+        this.keyspace = keyspace;
+        this.pageSize = pageSize;
+        this.consistencyLevel = consistencyLevel;
+    }
+
+
+    @Override
+    public boolean hasNext() {
+        //we have more columns to return
+        if ( currentColumnIterator != null && currentColumnIterator.hasNext() ) {
+            return true;
+        }
+
+        /**
+         * We have another row key, advance to it and re-check
+         */
+        if ( searcher.hasNext() ) {
+            advanceRow();
+            return hasNext();
+        }
+
+        //we have no more columns, and no more row keys, we're done
+        return false;
+    }
+
+
+    @Override
+    public T next() {
+        if ( !hasNext() ) {
+            throw new NoSuchElementException( "There are no more rows or columns left to advance" );
+        }
+
+        return currentColumnIterator.next();
+    }
+
+
+    @Override
+    public void remove() {
+        throw new UnsupportedOperationException( "Remove is unsupported" );
+    }
+
+
+    /**
+     * Advance our iterator to the next row (assumes the check for row keys is elsewhere)
+     */
+    private void advanceRow() {
+
+        /**
+         * If the edge is present, we need to being seeking from this
+         */
+
+        final RangeBuilder rangeBuilder = new RangeBuilder().setLimit( pageSize );
+
+
+        //set the range into the search
+        searcher.setRange( rangeBuilder );
+
+        final ScopedRowKey<ApplicationScope, R> rowKey = searcher.next();
+
+
+        RowQuery<ScopedRowKey<ApplicationScope, R>, C> query =
+                keyspace.prepareQuery( cf ).setConsistencyLevel( consistencyLevel ).getKey( rowKey )
+                        .autoPaginate( true ).withColumnRange( rangeBuilder.build() );
+
+
+        currentColumnIterator = new ColumnNameIterator<C, T>( query, searcher, searcher.hasPage() );
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardedEdgeSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardedEdgeSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardedEdgeSerializationImpl.java
new file mode 100644
index 0000000..a0d1e6e
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardedEdgeSerializationImpl.java
@@ -0,0 +1,568 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
+
+
+import java.util.Iterator;
+import java.util.UUID;
+
+import javax.inject.Inject;
+
+import org.apache.usergrid.persistence.core.astyanax.CassandraConfig;
+import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
+import org.apache.usergrid.persistence.core.astyanax.ScopedRowKey;
+import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+import org.apache.usergrid.persistence.core.util.ValidationUtils;
+import org.apache.usergrid.persistence.graph.Edge;
+import org.apache.usergrid.persistence.graph.GraphFig;
+import org.apache.usergrid.persistence.graph.MarkedEdge;
+import org.apache.usergrid.persistence.graph.SearchByEdge;
+import org.apache.usergrid.persistence.graph.SearchByEdgeType;
+import org.apache.usergrid.persistence.graph.SearchByIdType;
+import org.apache.usergrid.persistence.graph.impl.SimpleMarkedEdge;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.DirectedEdge;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeColumnFamilies;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeRowKey;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardStrategy;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.RowKey;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.RowKeyType;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardedEdgeSerialization;
+import org.apache.usergrid.persistence.graph.serialization.util.EdgeUtils;
+import org.apache.usergrid.persistence.model.entity.Id;
+
+import com.google.inject.Singleton;
+import com.netflix.astyanax.Keyspace;
+import com.netflix.astyanax.MutationBatch;
+import com.netflix.astyanax.Serializer;
+import com.netflix.astyanax.util.RangeBuilder;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+
+@Singleton
+public class ShardedEdgeSerializationImpl implements ShardedEdgeSerialization {
+
+    protected final Keyspace keyspace;
+    protected final CassandraConfig cassandraConfig;
+    protected final GraphFig graphFig;
+    protected final EdgeShardStrategy writeEdgeShardStrategy;
+
+
+    @Inject
+    public ShardedEdgeSerializationImpl( final Keyspace keyspace, final CassandraConfig cassandraConfig,
+                                         final GraphFig graphFig, final EdgeShardStrategy writeEdgeShardStrategy ) {
+
+        checkNotNull( "keyspace required", keyspace );
+        checkNotNull( "cassandraConfig required", cassandraConfig );
+        checkNotNull( "consistencyFig required", graphFig );
+        checkNotNull( "writeEdgeShardStrategy required", writeEdgeShardStrategy );
+
+
+        this.keyspace = keyspace;
+        this.cassandraConfig = cassandraConfig;
+        this.graphFig = graphFig;
+        this.writeEdgeShardStrategy = writeEdgeShardStrategy;
+    }
+
+
+    @Override
+    public MutationBatch writeEdge( final EdgeColumnFamilies columnFamilies, final ApplicationScope scope,
+                                    final MarkedEdge markedEdge, final UUID timestamp ) {
+        ValidationUtils.validateApplicationScope( scope );
+        EdgeUtils.validateEdge( markedEdge );
+        ValidationUtils.verifyTimeUuid( timestamp, "timestamp" );
+
+
+        final MutationBatch batch = keyspace.prepareMutationBatch().withConsistencyLevel( cassandraConfig.getWriteCL() )
+                                            .withTimestamp( timestamp.timestamp() );
+
+        final boolean isDeleted = markedEdge.isDeleted();
+
+
+        doWrite( columnFamilies, scope, markedEdge, new RowOp<RowKey>() {
+            @Override
+            public void writeEdge( final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> columnFamily,
+                                   final RowKey rowKey, final DirectedEdge edge ) {
+                batch.withRow( columnFamily, ScopedRowKey.fromKey( scope, rowKey ) ).putColumn( edge, isDeleted );
+            }
+
+
+            @Override
+            public void countEdge( final Id rowId, final NodeType nodeType, final long shardId,
+                                   final String... types ) {
+                if ( !isDeleted ) {
+                    writeEdgeShardStrategy.increment( scope, rowId, nodeType, shardId, 1l, types );
+                }
+            }
+
+
+            @Override
+            public void writeVersion( final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> columnFamily,
+                                      final EdgeRowKey rowKey, final long timestamp ) {
+                batch.withRow( columnFamily, ScopedRowKey.fromKey( scope, rowKey ) ).putColumn( timestamp, isDeleted );
+            }
+        } );
+
+
+        return batch;
+    }
+
+
+    @Override
+    public MutationBatch deleteEdge( final EdgeColumnFamilies columnFamilies, final ApplicationScope scope,
+                                     final MarkedEdge markedEdge, final UUID timestamp ) {
+        ValidationUtils.validateApplicationScope( scope );
+        EdgeUtils.validateEdge( markedEdge );
+        ValidationUtils.verifyTimeUuid( timestamp, "timestamp" );
+
+
+        final MutationBatch batch = keyspace.prepareMutationBatch().withConsistencyLevel( cassandraConfig.getWriteCL() )
+                                            .withTimestamp( timestamp.timestamp() );
+
+
+        doWrite( columnFamilies, scope, markedEdge, new RowOp<RowKey>() {
+            @Override
+            public void writeEdge( final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> columnFamily,
+                                   final RowKey rowKey, final DirectedEdge edge ) {
+                batch.withRow( columnFamily, ScopedRowKey.fromKey( scope, rowKey ) ).deleteColumn( edge );
+            }
+
+
+            @Override
+            public void countEdge( final Id rowId, final NodeType nodeType, final long shardId,
+                                   final String... types ) {
+                writeEdgeShardStrategy.increment( scope, rowId, nodeType, shardId, -1, types );
+            }
+
+
+            @Override
+            public void writeVersion( final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> columnFamily,
+                                      final EdgeRowKey rowKey, final long timestamp ) {
+                batch.withRow( columnFamily, ScopedRowKey.fromKey( scope, rowKey ) ).deleteColumn( timestamp );
+            }
+        } );
+
+
+        return batch;
+    }
+
+
+    /**
+     * EdgeWrite the edges internally
+     *
+     * @param scope The scope to encapsulate
+     * @param edge The edge to write
+     * @param op The row operation to invoke
+     */
+    private void doWrite( final EdgeColumnFamilies columnFamilies, final ApplicationScope scope, final MarkedEdge edge,
+                          final RowOp op ) {
+        ValidationUtils.validateApplicationScope( scope );
+        EdgeUtils.validateEdge( edge );
+
+        final Id sourceNodeId = edge.getSourceNode();
+        final String sourceNodeType = sourceNodeId.getType();
+        final Id targetNodeId = edge.getTargetNode();
+        final String targetNodeType = targetNodeId.getType();
+        final long timestamp = edge.getTimestamp();
+        final String type = edge.getType();
+
+
+        /**
+         * Key in the serializers based on the edge
+         */
+
+
+        /**
+         * write edges from source->target
+         */
+
+
+        final DirectedEdge sourceEdge = new DirectedEdge( targetNodeId, timestamp );
+
+        final ShardEntries sourceRowKeyShard =
+                writeEdgeShardStrategy.getWriteShards( scope, sourceNodeId, NodeType.SOURCE, timestamp, type );
+
+        final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> sourceCf =
+                columnFamilies.getSourceNodeCfName();
+
+
+        for ( Shard shard : sourceRowKeyShard.getEntries() ) {
+
+            final long shardId = shard.getShardIndex();
+            final RowKey sourceRowKey = new RowKey( sourceNodeId, type, shardId );
+            op.writeEdge( sourceCf, sourceRowKey, sourceEdge );
+            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type );
+        }
+
+
+        final ShardEntries sourceWithTypeRowKeyShard = writeEdgeShardStrategy
+                .getWriteShards( scope, sourceNodeId, NodeType.SOURCE, timestamp, type, targetNodeType );
+
+        final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> targetCf =
+                columnFamilies.getSourceNodeTargetTypeCfName();
+
+        for ( Shard shard : sourceWithTypeRowKeyShard.getEntries() ) {
+
+            final long shardId = shard.getShardIndex();
+            final RowKeyType sourceRowKeyType = new RowKeyType( sourceNodeId, type, targetNodeId, shardId );
+
+            op.writeEdge( targetCf, sourceRowKeyType, sourceEdge );
+            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type, targetNodeType );
+        }
+
+
+        /**
+         * write edges from target<-source
+         */
+
+        final DirectedEdge targetEdge = new DirectedEdge( sourceNodeId, timestamp );
+
+
+        final ShardEntries targetRowKeyShard =
+                writeEdgeShardStrategy.getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type );
+
+        final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> sourceByTargetCf =
+                columnFamilies.getTargetNodeCfName();
+
+        for ( Shard shard : targetRowKeyShard.getEntries() ) {
+            final long shardId = shard.getShardIndex();
+            final RowKey targetRowKey = new RowKey( targetNodeId, type, shardId );
+
+            op.writeEdge( sourceByTargetCf, targetRowKey, targetEdge );
+            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type );
+        }
+
+
+        final ShardEntries targetWithTypeRowKeyShard = writeEdgeShardStrategy
+                .getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type, sourceNodeType );
+
+        final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> targetBySourceCf =
+                columnFamilies.getTargetNodeSourceTypeCfName();
+
+
+        for ( Shard shard : targetWithTypeRowKeyShard.getEntries() ) {
+
+            final long shardId = shard.getShardIndex();
+
+            final RowKeyType targetRowKeyType = new RowKeyType( targetNodeId, type, sourceNodeId, shardId );
+
+
+            op.writeEdge( targetBySourceCf, targetRowKeyType, targetEdge );
+            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type, sourceNodeType );
+        }
+
+        /**
+         * Always a 0l shard, we're hard limiting 2b timestamps for the same edge
+         */
+        final EdgeRowKey edgeRowKey = new EdgeRowKey( sourceNodeId, type, targetNodeId, 0l );
+
+
+        /**
+         * Write this in the timestamp log for this edge of source->target
+         */
+        op.writeVersion( columnFamilies.getGraphEdgeVersions(), edgeRowKey, timestamp );
+    }
+
+
+    @Override
+    public Iterator<MarkedEdge> getEdgeVersions( final EdgeColumnFamilies columnFamilies, final ApplicationScope scope,
+                                                 final SearchByEdge search, final Iterator<ShardEntries> shards ) {
+        ValidationUtils.validateApplicationScope( scope );
+        EdgeUtils.validateSearchByEdge( search );
+
+        final Id targetId = search.targetNode();
+        final Id sourceId = search.sourceNode();
+        final String type = search.getType();
+        final long maxTimestamp = search.getMaxTimestamp();
+        final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> columnFamily =
+                columnFamilies.getGraphEdgeVersions();
+        final Serializer<Long> serializer = columnFamily.getColumnSerializer();
+
+        final EdgeSearcher<EdgeRowKey, Long, MarkedEdge> searcher =
+                new EdgeSearcher<EdgeRowKey, Long, MarkedEdge>( scope, maxTimestamp, search.last(), shards ) {
+
+                    @Override
+                    protected Serializer<Long> getSerializer() {
+                        return serializer;
+                    }
+
+
+                    @Override
+                    public void setRange( final RangeBuilder builder ) {
+
+
+                        if ( last.isPresent() ) {
+                            super.setRange( builder );
+                            return;
+                        }
+
+                        //start seeking at a value < our max version
+                        builder.setStart( maxTimestamp );
+                    }
+
+
+                    @Override
+                    protected EdgeRowKey generateRowKey( long shard ) {
+                        return new EdgeRowKey( sourceId, type, targetId, shard );
+                    }
+
+
+                    @Override
+                    protected Long getStartColumn( final Edge last ) {
+                        return last.getTimestamp();
+                    }
+
+
+                    @Override
+                    protected MarkedEdge createEdge( final Long column, final boolean marked ) {
+                        return new SimpleMarkedEdge( sourceId, type, targetId, column.longValue(), marked );
+                    }
+                };
+
+        return new ShardRowIterator<>( searcher, columnFamily, keyspace, cassandraConfig.getReadCL(),
+                graphFig.getScanPageSize() );
+    }
+
+
+    @Override
+    public Iterator<MarkedEdge> getEdgesFromSource( final EdgeColumnFamilies columnFamilies,
+                                                    final ApplicationScope scope, final SearchByEdgeType edgeType,
+                                                    final Iterator<ShardEntries> shards ) {
+
+        ValidationUtils.validateApplicationScope( scope );
+        EdgeUtils.validateSearchByEdgeType( edgeType );
+
+        final Id sourceId = edgeType.getNode();
+        final String type = edgeType.getType();
+        final long maxTimestamp = edgeType.getMaxTimestamp();
+        final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> columnFamily =
+                columnFamilies.getSourceNodeCfName();
+        final Serializer<DirectedEdge> serializer = columnFamily.getColumnSerializer();
+
+
+        final EdgeSearcher<RowKey, DirectedEdge, MarkedEdge> searcher =
+                new EdgeSearcher<RowKey, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(), shards ) {
+
+
+                    @Override
+                    protected Serializer<DirectedEdge> getSerializer() {
+                        return serializer;
+                    }
+
+
+                    @Override
+                    protected RowKey generateRowKey( long shard ) {
+                        return new RowKey( sourceId, type, shard );
+                    }
+
+
+                    @Override
+                    protected DirectedEdge getStartColumn( final Edge last ) {
+                        return new DirectedEdge( last.getTargetNode(), last.getTimestamp() );
+                    }
+
+
+                    @Override
+                    protected MarkedEdge createEdge( final DirectedEdge edge, final boolean marked ) {
+                        return new SimpleMarkedEdge( sourceId, type, edge.id, edge.timestamp, marked );
+                    }
+                };
+
+
+        return new ShardRowIterator<>( searcher, columnFamily, keyspace, cassandraConfig.getReadCL(),
+                graphFig.getScanPageSize() );
+    }
+
+
+    @Override
+    public Iterator<MarkedEdge> getEdgesFromSourceByTargetType( final EdgeColumnFamilies columnFamilies,
+                                                                final ApplicationScope scope,
+                                                                final SearchByIdType edgeType,
+                                                                final Iterator<ShardEntries> shards ) {
+
+        ValidationUtils.validateApplicationScope( scope );
+        EdgeUtils.validateSearchByEdgeType( edgeType );
+
+        final Id targetId = edgeType.getNode();
+        final String type = edgeType.getType();
+        final String targetType = edgeType.getIdType();
+        final long maxTimestamp = edgeType.getMaxTimestamp();
+        final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> columnFamily =
+                columnFamilies.getSourceNodeTargetTypeCfName();
+        final Serializer<DirectedEdge> serializer = columnFamily.getColumnSerializer();
+
+
+        final EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge> searcher =
+                new EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(), shards ) {
+
+                    @Override
+                    protected Serializer<DirectedEdge> getSerializer() {
+                        return serializer;
+                    }
+
+
+                    @Override
+                    protected RowKeyType generateRowKey( long shard ) {
+                        return new RowKeyType( targetId, type, targetType, shard );
+                    }
+
+
+                    @Override
+                    protected DirectedEdge getStartColumn( final Edge last ) {
+                        return new DirectedEdge( last.getTargetNode(), last.getTimestamp() );
+                    }
+
+
+                    @Override
+                    protected MarkedEdge createEdge( final DirectedEdge edge, final boolean marked ) {
+                        return new SimpleMarkedEdge( targetId, type, edge.id, edge.timestamp, marked );
+                    }
+                };
+
+        return new ShardRowIterator( searcher, columnFamily, keyspace, cassandraConfig.getReadCL(),
+                graphFig.getScanPageSize() );
+    }
+
+
+    @Override
+    public Iterator<MarkedEdge> getEdgesToTarget( final EdgeColumnFamilies columnFamilies, final ApplicationScope scope,
+                                                  final SearchByEdgeType edgeType,
+                                                  final Iterator<ShardEntries> shards ) {
+        ValidationUtils.validateApplicationScope( scope );
+        EdgeUtils.validateSearchByEdgeType( edgeType );
+
+        final Id targetId = edgeType.getNode();
+        final String type = edgeType.getType();
+        final long maxTimestamp = edgeType.getMaxTimestamp();
+        final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> columnFamily =
+                columnFamilies.getTargetNodeCfName();
+        final Serializer<DirectedEdge> serializer = columnFamily.getColumnSerializer();
+
+        final EdgeSearcher<RowKey, DirectedEdge, MarkedEdge> searcher =
+                new EdgeSearcher<RowKey, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(), shards ) {
+
+                    @Override
+                    protected Serializer<DirectedEdge> getSerializer() {
+                        return serializer;
+                    }
+
+
+                    @Override
+                    protected RowKey generateRowKey( long shard ) {
+                        return new RowKey( targetId, type, shard );
+                    }
+
+
+                    @Override
+                    protected DirectedEdge getStartColumn( final Edge last ) {
+                        return new DirectedEdge( last.getSourceNode(), last.getTimestamp() );
+                    }
+
+
+                    @Override
+                    protected MarkedEdge createEdge( final DirectedEdge edge, final boolean marked ) {
+                        return new SimpleMarkedEdge( edge.id, type, targetId, edge.timestamp, marked );
+                    }
+                };
+
+
+        return new ShardRowIterator<>( searcher, columnFamily, keyspace, cassandraConfig.getReadCL(),
+                graphFig.getScanPageSize() );
+    }
+
+
+    @Override
+    public Iterator<MarkedEdge> getEdgesToTargetBySourceType( final EdgeColumnFamilies columnFamilies,
+                                                              final ApplicationScope scope,
+                                                              final SearchByIdType edgeType,
+                                                              final Iterator<ShardEntries> shards ) {
+
+        ValidationUtils.validateApplicationScope( scope );
+        EdgeUtils.validateSearchByEdgeType( edgeType );
+
+        final Id targetId = edgeType.getNode();
+        final String sourceType = edgeType.getIdType();
+        final String type = edgeType.getType();
+        final long maxTimestamp = edgeType.getMaxTimestamp();
+        final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> columnFamily =
+                columnFamilies.getTargetNodeSourceTypeCfName();
+        final Serializer<DirectedEdge> serializer = columnFamily.getColumnSerializer();
+
+
+        final EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge> searcher =
+                new EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(), shards ) {
+                    @Override
+                    protected Serializer<DirectedEdge> getSerializer() {
+                        return serializer;
+                    }
+
+
+                    @Override
+                    protected RowKeyType generateRowKey( final long shard ) {
+                        return new RowKeyType( targetId, type, sourceType, shard );
+                    }
+
+
+                    @Override
+                    protected DirectedEdge getStartColumn( final Edge last ) {
+                        return new DirectedEdge( last.getTargetNode(), last.getTimestamp() );
+                    }
+
+
+                    @Override
+                    protected MarkedEdge createEdge( final DirectedEdge edge, final boolean marked ) {
+                        return new SimpleMarkedEdge( edge.id, type, targetId, edge.timestamp, marked );
+                    }
+                };
+
+        return new ShardRowIterator<>( searcher, columnFamily, keyspace, cassandraConfig.getReadCL(),
+                graphFig.getScanPageSize() );
+    }
+
+
+    /**
+     * Simple callback to perform puts and deletes with a common row setup code
+     *
+     * @param <R> The row key type
+     */
+    private static interface RowOp<R> {
+
+        /**
+         * Write the edge with the given data
+         */
+        void writeEdge( final MultiTennantColumnFamily<ApplicationScope, R, DirectedEdge> columnFamily, R rowKey,
+                        DirectedEdge edge );
+
+        /**
+         * Perform the count on the edge
+         */
+        void countEdge( final Id rowId, NodeType type, long shardId, String... types );
+
+        /**
+         * Write the edge into the version cf
+         */
+        void writeVersion( final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> columnFamily,
+                           EdgeRowKey rowKey, long timestamp );
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeColumnFamilies.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeColumnFamilies.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeColumnFamilies.java
new file mode 100644
index 0000000..9050b0a
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeColumnFamilies.java
@@ -0,0 +1,150 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
+
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.DynamicCompositeType;
+
+import org.apache.usergrid.persistence.core.astyanax.ColumnTypes;
+import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
+import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamilyDefinition;
+import org.apache.usergrid.persistence.core.astyanax.OrganizationScopedRowKeySerializer;
+import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.DirectedEdge;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeColumnFamilies;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeRowKey;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.RowKey;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.RowKeyType;
+
+import com.netflix.astyanax.serializers.LongSerializer;
+
+import static org.apache.usergrid.persistence.core.astyanax.ColumnTypes.LONG_TYPE_REVERSED;
+import static org.apache.usergrid.persistence.core.astyanax.ColumnTypes.UUID_TYPE_REVERSED;
+
+
+/**
+ * Implementation of size based column family
+ */
+public class SizebasedEdgeColumnFamilies implements EdgeColumnFamilies {
+
+
+    //Row key with no type
+    private static final RowSerializer ROW_SERIALIZER = new RowSerializer();
+
+    //row key with target id type
+    private static final RowTypeSerializer ROW_TYPE_SERIALIZER = new RowTypeSerializer();
+
+    private static final EdgeRowKeySerializer EDGE_ROW_KEY_SERIALIZER = new EdgeRowKeySerializer();
+
+    //Edge serializers
+    private static final EdgeSerializer EDGE_SERIALIZER = new EdgeSerializer();
+
+    private static final LongSerializer LONG_SERIALIZER = LongSerializer.get();
+
+    private static final String EDGE_DYNAMIC_COMPOSITE_TYPE =
+            //we purposefully associate lower case "l" and "u" with reversed types.  This way we can use
+            //the default serialization in Astayanax, but get reverse order in cassandra
+            DynamicCompositeType.class.getSimpleName() + "(s=>UTF8Type,l=>" + LONG_TYPE_REVERSED + ",u=>"
+                    + UUID_TYPE_REVERSED + ")";
+
+
+    //initialize the CF's from our implementation
+    private static final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> SOURCE_NODE_EDGES =
+            new MultiTennantColumnFamily<>( "Graph_Source_Node_Edges",
+                    new OrganizationScopedRowKeySerializer<>( ROW_SERIALIZER ), EDGE_SERIALIZER );
+
+
+    private static final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> TARGET_NODE_EDGES =
+            new MultiTennantColumnFamily<>( "Graph_Target_Node_Edges",
+                    new OrganizationScopedRowKeySerializer<>( ROW_SERIALIZER ), EDGE_SERIALIZER );
+
+
+    private static final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> SOURCE_NODE_TARGET_TYPE =
+            new MultiTennantColumnFamily<>( "Graph_Source_Node_Target_Type",
+                    new OrganizationScopedRowKeySerializer<>( ROW_TYPE_SERIALIZER ), EDGE_SERIALIZER );
+
+
+    /**
+     * The edges that are to the target node with the source type.  The target node is the row key
+     */
+    private static final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> TARGET_NODE_SOURCE_TYPE =
+            new MultiTennantColumnFamily<>( "Graph_Target_Node_Source_Type",
+                    new OrganizationScopedRowKeySerializer<>( ROW_TYPE_SERIALIZER ), EDGE_SERIALIZER );
+
+
+    private static final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> EDGE_VERSIONS =
+            new MultiTennantColumnFamily<>( "Graph_Edge_Versions",
+                    new OrganizationScopedRowKeySerializer<>( EDGE_ROW_KEY_SERIALIZER ), LONG_SERIALIZER );
+
+
+    @Override
+    public MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> getSourceNodeCfName() {
+        return SOURCE_NODE_EDGES;
+    }
+
+
+    @Override
+    public MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> getTargetNodeCfName() {
+        return TARGET_NODE_EDGES;
+    }
+
+
+    @Override
+    public MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> getSourceNodeTargetTypeCfName() {
+        return SOURCE_NODE_TARGET_TYPE;
+    }
+
+
+    @Override
+    public MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> getTargetNodeSourceTypeCfName() {
+        return TARGET_NODE_SOURCE_TYPE;
+    }
+
+
+    @Override
+    public MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> getGraphEdgeVersions() {
+        return EDGE_VERSIONS;
+    }
+
+
+    @Override
+    public Collection<MultiTennantColumnFamilyDefinition> getColumnFamilies() {
+        return Arrays
+                .asList( graphCf( SOURCE_NODE_EDGES ), graphCf( TARGET_NODE_EDGES ), graphCf( SOURCE_NODE_TARGET_TYPE ),
+                        graphCf( TARGET_NODE_SOURCE_TYPE ),
+                        new MultiTennantColumnFamilyDefinition( EDGE_VERSIONS, BytesType.class.getSimpleName(),
+                                ColumnTypes.LONG_TYPE_REVERSED, BytesType.class.getSimpleName(),
+                                MultiTennantColumnFamilyDefinition.CacheOption.KEYS ) );
+    }
+
+
+    /**
+     * Helper to generate an edge definition by the type
+     */
+    private MultiTennantColumnFamilyDefinition graphCf( MultiTennantColumnFamily cf ) {
+        return new MultiTennantColumnFamilyDefinition( cf, BytesType.class.getSimpleName(), EDGE_DYNAMIC_COMPOSITE_TYPE,
+                BytesType.class.getSimpleName(), MultiTennantColumnFamilyDefinition.CacheOption.KEYS );
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
index 7a55c53..a71960b 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
@@ -54,52 +54,23 @@ public class SizebasedEdgeShardStrategy implements EdgeShardStrategy {
 
 
     @Override
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId,  final NodeType nodeType, final long timestamp,
-                                final String... types ) {
+    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId, final NodeType nodeType,
+                                        final long timestamp, final String... types ) {
         return shardCache.getWriteShards( scope, rowKeyId, nodeType, timestamp, types );
     }
 
 
     @Override
-    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id rowKeyId,   final NodeType nodeType,final long maxTimestamp,
-                                         final String... types ) {
+    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id rowKeyId,
+                                                 final NodeType nodeType, final long maxTimestamp,
+                                                 final String... types ) {
         return shardCache.getReadShards( scope, rowKeyId, nodeType, maxTimestamp, types );
     }
 
 
     @Override
-    public void increment( final ApplicationScope scope, final Id rowKeyId,  final NodeType nodeType, final long shardId, final long count,
-                           final String... types ) {
+    public void increment( final ApplicationScope scope, final Id rowKeyId, final NodeType nodeType, final long shardId,
+                           final long count, final String... types ) {
         shardApproximation.increment( scope, rowKeyId, nodeType, shardId, count, types );
     }
-
-
-    @Override
-    public String getSourceNodeCfName() {
-        return "Graph_Source_Node_Edges";
-    }
-
-
-    @Override
-    public String getTargetNodeCfName() {
-        return "Graph_Target_Node_Edges";
-    }
-
-
-    @Override
-    public String getSourceNodeTargetTypeCfName() {
-        return "Graph_Source_Node_Target_Type";
-    }
-
-
-    @Override
-    public String getTargetNodeSourceTypeCfName() {
-        return "Graph_Target_Node_Source_Type";
-    }
-
-
-    @Override
-    public String getGraphEdgeVersions() {
-        return "Graph_Edge_Versions";
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SourceEdgeSearcher.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SourceEdgeSearcher.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SourceEdgeSearcher.java
new file mode 100644
index 0000000..b33fcaf
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SourceEdgeSearcher.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
+
+
+public class SourceEdgeSearcher {}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java
index adcb42e..501cb83 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/GraphManagerShardingIT.java
@@ -116,6 +116,8 @@ public class GraphManagerShardingIT {
         }
 
 
+
+
         long shardCount = nodeShardApproximation.getCount( scope, sourceId, NodeType.SOURCE,  0l, edgeType );
 
         assertEquals("Shard count for source node should be the same as write count", writeCount, shardCount);


[10/11] git commit: Checkpoint, still a WIP. Broken stuff.

Posted by to...@apache.org.
Checkpoint, still a WIP.  Broken stuff.


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/3ecd603f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/3ecd603f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/3ecd603f

Branch: refs/heads/USERGRID-188
Commit: 3ecd603f9f28a7b2308c14ff2c590e39c8424b8c
Parents: f3b7ead
Author: Todd Nine <tn...@apigee.com>
Authored: Tue Jul 22 18:22:12 2014 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Tue Jul 22 18:22:12 2014 -0600

----------------------------------------------------------------------
 .../usergrid/persistence/graph/GraphFig.java    |  26 ++
 .../impl/EdgeSerializationImpl.java             |  35 +-
 .../impl/shard/EdgeShardStrategy.java           |   6 +-
 .../impl/shard/NodeShardAllocation.java         |   2 +-
 .../impl/shard/NodeShardCache.java              |   5 +-
 .../graph/serialization/impl/shard/Shard.java   |   5 +
 .../serialization/impl/shard/ShardEntries.java  |  46 ---
 .../impl/shard/ShardEntryGroup.java             | 212 +++++++++++
 .../impl/shard/ShardedEdgeSerialization.java    |  10 +-
 .../impl/shard/impl/EdgeSearcher.java           |  10 +-
 .../shard/impl/NodeShardAllocationImpl.java     | 169 ++++-----
 .../impl/shard/impl/NodeShardCacheImpl.java     |  88 +++--
 .../shard/impl/ShardEntryGroupIterator.java     |  99 ++++++
 .../impl/ShardedEdgeSerializationImpl.java      | 142 ++++----
 .../shard/impl/SizebasedEdgeShardStrategy.java  |   6 +-
 .../impl/shard/NodeShardAllocationTest.java     | 355 +++++++++++++------
 .../impl/shard/ShardEntryGroupTest.java         |  80 +++++
 .../shard/count/NodeShardApproximationTest.java |   6 +
 18 files changed, 882 insertions(+), 420 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java
index f84fef4..ae1c66c 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/GraphFig.java
@@ -39,8 +39,27 @@ public interface GraphFig extends GuicyFig {
 
     public static final String SHARD_CACHE_SIZE = "usergrid.graph.shard.cache.size";
 
+
+    /**
+     * Get the cache timeout.  The local cache will exist for this amount of time max (in millis).
+     */
     public static final String SHARD_CACHE_TIMEOUT = "usergrid.graph.shard.cache.timeout";
 
+    /**
+     * The minimum amount of time than can occur (in millis) between shard allocation.  Must be at least 2x the cache timeout.
+     *
+     * Note that you should also pad this for node clock drift.  A good value for this would be 2x the shard cache timeout + 30 seconds,
+     * assuming you have NTP and allow a max drift of 30 seconds
+     */
+    public static final String SHARD_MIN_DELTA = "usergrid.graph.shard.min.delta";
+
+
+    /**
+     * The maximum amount of clock drift allowed between client nodes in milliseconds
+     */
+    public static final String SHARD_MAX_DRIFT = "usergrid.graph.shard.max.drift";
+
+
     public static final String COUNTER_WRITE_FLUSH_COUNT = "usergrid.graph.shard.counter.beginFlush.count";
 
     public static final String COUNTER_WRITE_FLUSH_INTERVAL = "usergrid.graph.shard.counter.beginFlush.interval";
@@ -71,11 +90,18 @@ public interface GraphFig extends GuicyFig {
     @Key(SHARD_CACHE_TIMEOUT)
     long getShardCacheTimeout();
 
+    @Default("60000")
+    @Key( SHARD_MIN_DELTA )
+    long getShardMinDelta();
+
+
     @Default( "250000" )
     @Key( SHARD_CACHE_SIZE )
     long getShardCacheSize();
 
 
+
+
     @Default( "10000" )
     @Key( COUNTER_WRITE_FLUSH_COUNT )
     long getCounterFlushCount();

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
index 5a6e201..c586607 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
@@ -20,54 +20,31 @@
 package org.apache.usergrid.persistence.graph.serialization.impl;
 
 
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.Collection;
 import java.util.Iterator;
 import java.util.UUID;
 
 import javax.inject.Inject;
 
-import org.apache.cassandra.db.marshal.BytesType;
-
 import org.apache.usergrid.persistence.core.astyanax.CassandraConfig;
-import org.apache.usergrid.persistence.core.astyanax.ColumnTypes;
-import org.apache.usergrid.persistence.core.astyanax.CompositeFieldSerializer;
-import org.apache.usergrid.persistence.core.astyanax.IdColDynamicCompositeSerializer;
-import org.apache.usergrid.persistence.core.astyanax.IdRowCompositeSerializer;
-import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
-import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamilyDefinition;
-import org.apache.usergrid.persistence.core.migration.Migration;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.core.util.ValidationUtils;
-import org.apache.usergrid.persistence.graph.Edge;
 import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.MarkedEdge;
 import org.apache.usergrid.persistence.graph.SearchByEdge;
 import org.apache.usergrid.persistence.graph.SearchByEdgeType;
 import org.apache.usergrid.persistence.graph.SearchByIdType;
-import org.apache.usergrid.persistence.graph.impl.SimpleMarkedEdge;
 import org.apache.usergrid.persistence.graph.serialization.EdgeSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeColumnFamilies;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardStrategy;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntryGroup;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardedEdgeSerialization;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.EdgeSearcher;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.ShardRowIterator;
-import org.apache.usergrid.persistence.graph.serialization.util.EdgeHasher;
 import org.apache.usergrid.persistence.graph.serialization.util.EdgeUtils;
 import org.apache.usergrid.persistence.model.entity.Id;
 
-import com.google.common.base.Preconditions;
 import com.google.inject.Singleton;
 import com.netflix.astyanax.Keyspace;
 import com.netflix.astyanax.MutationBatch;
-import com.netflix.astyanax.Serializer;
-import com.netflix.astyanax.model.CompositeBuilder;
-import com.netflix.astyanax.model.CompositeParser;
-import com.netflix.astyanax.model.DynamicComposite;
-import com.netflix.astyanax.serializers.AbstractSerializer;
 
 import static com.google.common.base.Preconditions.checkNotNull;
 
@@ -130,7 +107,7 @@ public class EdgeSerializationImpl implements EdgeSerialization  {
         final String type = search.getType();
         final long maxTimestamp = search.getMaxTimestamp();
 
-        final Iterator<ShardEntries> readShards =
+        final Iterator<ShardEntryGroup> readShards =
                 edgeShardStrategy.getReadShards( scope, sourceId, NodeType.SOURCE, maxTimestamp, type );
 
         return shardedEdgeSerialization.getEdgeVersions( edgeColumnFamilies, scope, search, readShards );
@@ -149,7 +126,7 @@ public class EdgeSerializationImpl implements EdgeSerialization  {
         final long maxTimestamp = edgeType.getMaxTimestamp();
 
 
-        final Iterator<ShardEntries> readShards =
+        final Iterator<ShardEntryGroup> readShards =
                 edgeShardStrategy.getReadShards( scope, sourceId, NodeType.SOURCE, maxTimestamp, type );
 
         return shardedEdgeSerialization.getEdgesFromSource( edgeColumnFamilies, scope, edgeType, readShards );
@@ -169,7 +146,7 @@ public class EdgeSerializationImpl implements EdgeSerialization  {
         final long maxTimestamp = edgeType.getMaxTimestamp();
 
 
-        final Iterator<ShardEntries> readShards =   edgeShardStrategy
+        final Iterator<ShardEntryGroup> readShards =   edgeShardStrategy
                                         .getReadShards( scope, sourceId, NodeType.SOURCE, maxTimestamp, type, targetType );
 
         return shardedEdgeSerialization.getEdgesFromSourceByTargetType( edgeColumnFamilies, scope, edgeType, readShards );
@@ -187,7 +164,7 @@ public class EdgeSerializationImpl implements EdgeSerialization  {
         final long maxTimestamp = edgeType.getMaxTimestamp();
 
 
-        final Iterator<ShardEntries> readShards =  edgeShardStrategy.getReadShards( scope, targetId, NodeType.TARGET, maxTimestamp, type );
+        final Iterator<ShardEntryGroup> readShards =  edgeShardStrategy.getReadShards( scope, targetId, NodeType.TARGET, maxTimestamp, type );
 
         return shardedEdgeSerialization.getEdgesToTarget( edgeColumnFamilies, scope, edgeType, readShards );
     }
@@ -206,7 +183,7 @@ public class EdgeSerializationImpl implements EdgeSerialization  {
         final long maxTimestamp = edgeType.getMaxTimestamp();
 
 
-        Iterator<ShardEntries> readShards =   edgeShardStrategy
+        Iterator<ShardEntryGroup> readShards =   edgeShardStrategy
                                         .getReadShards( scope, targetId, NodeType.TARGET, maxTimestamp, type, sourceType );
 
         return shardedEdgeSerialization.getEdgesToTargetBySourceType( edgeColumnFamilies, scope, edgeType, readShards );

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
index be4153b..c6cf2aa 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
@@ -21,9 +21,7 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard;
 
 
 import java.util.Iterator;
-import java.util.UUID;
 
-import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.model.entity.Id;
 
@@ -39,7 +37,7 @@ public interface EdgeShardStrategy {
      * @param timestamp The timestamp on the edge
      * @param types The types in the edge
      */
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId,final  NodeType nodeType, final long timestamp,
+    public ShardEntryGroup getWriteShards( final ApplicationScope scope, final Id rowKeyId,final  NodeType nodeType, final long timestamp,
                                            final String... types );
 
 
@@ -52,7 +50,7 @@ public interface EdgeShardStrategy {
      * @param maxTimestamp The max timestamp to use
      * @param types the types in the edge
      */
-    public Iterator<ShardEntries> getReadShards(final ApplicationScope scope,final  Id rowKeyId, final NodeType nodeType,final long maxTimestamp,final  String... types );
+    public Iterator<ShardEntryGroup> getReadShards(final ApplicationScope scope,final  Id rowKeyId, final NodeType nodeType,final long maxTimestamp,final  String... types );
 
     /**
      * Increment our count meta data by the passed value.  Can be a positive or a negative number.

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
index f17ebd5..8deba66 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocation.java
@@ -45,7 +45,7 @@ public interface NodeShardAllocation {
      * @param edgeTypes
      * @return A list of all shards <= the current shard.  This will always return 0l if no shards are allocated
      */
-    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, Optional<Shard> maxShardId,
+    public Iterator<ShardEntryGroup> getShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, Optional<Shard> maxShardId,
                                             final String... edgeTypes );
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
index 7ce0b96..b39e7e5 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardCache.java
@@ -20,7 +20,6 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard;
 
 
 import java.util.Iterator;
-import java.util.UUID;
 
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.model.entity.Id;
@@ -41,7 +40,7 @@ public interface NodeShardCache {
      * @param timestamp The time to select the slice for.
      * @param edgeType
      */
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, NodeType nodeType, final long timestamp,
+    public ShardEntryGroup getWriteShards( final ApplicationScope scope, final Id nodeId, NodeType nodeType, final long timestamp,
                                 final String... edgeType );
 
     /**
@@ -53,7 +52,7 @@ public interface NodeShardCache {
      * @param edgeType
      * @return
      */
-    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, NodeType nodeType, final long maxTimestamp,
+    public Iterator<ShardEntryGroup> getReadShards( final ApplicationScope scope, final Id nodeId, NodeType nodeType, final long maxTimestamp,
                                      final String... edgeType );
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
index 80aea16..f95699b 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/Shard.java
@@ -47,6 +47,11 @@ public class Shard implements Comparable<Shard> {
     }
 
 
+    /**
+     * Compare the shards based on the timestamp first, then the created time second
+     * @param o
+     * @return
+     */
     @Override
     public int compareTo( final Shard o ) {
         if ( o == null ) {

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
deleted file mode 100644
index 570633c..0000000
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.usergrid.persistence.graph.serialization.impl.shard;
-
-
-import java.util.Set;
-
-
-/**
- * There are cases where we need to read or write to more than 1 shard.  This object encapsulates
- * a set of shards that should be written to.
- */
-public class ShardEntries {
-
-
-    private Set<Shard> shards;
-
-
-    public ShardEntries( Set<Shard> shards ) {
-        this.shards = shards;
-    }
-
-
-    public Set<Shard> getEntries() {
-        return shards;
-    }
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
new file mode 100644
index 0000000..6bd1421
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.TreeMap;
+
+
+/**
+ * There are cases where we need to read or write to more than 1 shard.  This object encapsulates
+ * a set of shards that should be written to and read from.  All reads should combine the data sets from
+ * all shards in the group, and writes should be written to each shard.  Once the shard can safely be compacted
+ * a background process should be triggered to remove additional shards and make seeks faster.  This multiread/write
+ * should only occur during the time period of the delta (in milliseconds), after which the next read will asynchronously compact the
+ * shards into a single shard.
+ */
+public class ShardEntryGroup {
+
+
+    private TreeMap<Long, Shard> shards;
+
+    private Shard minShardByIndex;
+
+    private final long delta;
+
+    private Shard neighbor;
+
+
+    /**
+     * The max delta we accept in milliseconds for create time to be considered a member of this group
+     * @param delta
+     */
+    public ShardEntryGroup( final long delta ) {
+        this.delta = delta;
+        this.shards = new TreeMap<>( ShardTimeComparator.INSTANCE );
+    }
+
+
+    /**
+     * Only add a shard if the created timestamp is within the delta of one of the entries
+     * @param shard
+     * @return
+     */
+    public boolean addShard(final Shard shard){
+
+        //compare the time and see if it falls withing any of the elements based on their timestamp
+        final long shardCreateTime = shard.getCreatedTime();
+
+        final Long lessThanKey = shards.floorKey( shardCreateTime );
+
+        final Long greaterThanKey = shards.ceilingKey( shardCreateTime );
+
+
+        final long lessThanDelta = shardCreateTime - lessThanKey;
+
+        final long greaterThanDelta = greaterThanKey - shardCreateTime;
+
+        if(lessThanDelta < delta || greaterThanDelta < delta ){
+            this.shards.put( shardCreateTime, shard );
+
+            if(shard.compareTo( minShardByIndex ) < 0){
+                minShardByIndex = shard;
+            }
+
+            return true;
+        }
+
+        return false;
+    }
+
+
+    /**
+     * Add the n-1 shard to the set.  This is required, because nodes that have not yet updated their
+     * shard caches can be writing reading to the n-1 node only
+     *
+     * @param shard The shard to possibly add as a neighbor
+     * @return True if this shard as added as a neighbor, false otherwise
+     */
+    public boolean setNeighbor( final Shard shard ){
+
+        //not in the transition state don't set the neighbor, it will slow seeks down
+        if(!isRolling()){
+            return false;
+        }
+
+
+        neighbor = shard;
+        this.shards.put( shard.getCreatedTime(), shard );
+        return true;
+
+    }
+
+
+    /**
+     * Get the entries that we should read from.
+     *
+     * @return
+     */
+    public Collection<Shard> getReadShards(final long currentTime) {
+
+        /**
+         * The shards are still rolling (I.E can't be compacted)
+         */
+        if(needsCompaction( currentTime )){
+            return shards.values();
+        }
+
+        return Collections.singleton(minShardByIndex);
+    }
+
+
+    /**
+     * Get the entries, with the max shard time being first. We write to all shards until they're migrated
+     *
+     * @return
+     */
+    public Collection<Shard> getWriteShards() {
+        return shards.values();
+    }
+
+
+    /**
+     * Get the shard all compactions should write to
+     * @return
+     */
+    public Shard getMergeTarget(){
+        return minShardByIndex;
+    }
+
+
+    /**
+     * Returns true if the newest created shard is path the currentTime - delta
+     * @param currentTime The current system time in milliseconds
+     * @return True if these shards can safely be combined into a single shard, false otherwise
+     */
+    public boolean needsCompaction(final long currentTime){
+
+        /**
+         * We don't have enough shards to compact, ignore
+         */
+        if(shards.size() < 2){
+            return false;
+        }
+
+
+        final long maxTimestamp = shards.lastKey();
+
+
+        return currentTime - delta > maxTimestamp;
+    }
+
+
+    /**
+     * Return true if the shard is rolling.  If this is the case, we want to include the n-1 entry, since everyone
+     * may not yet have it until compaction is safe to perform
+     * @return
+     */
+    private boolean isRolling(){
+       return shards.size() > 1;
+    }
+
+
+    /**
+     * Return true if this shard can be deleted AFTER all of the data in it has been moved
+     * @param shard
+     * @return
+     */
+    public boolean canBeDeleted(final Shard shard){
+        //if we're a neighbor shard (n-1) or the target compaction shard, we can't be deleted
+        //we purposefully use .equals here, since 2 shards might have the same index with different timestamps (unlikely but could happen)
+        if(shard == neighbor ||  getMergeTarget().equals( shard )){
+            return false;
+        }
+
+        return true;
+    }
+
+    /**
+     * Compares 2 shards based on create time.  Does not handle nulls intentionally
+     */
+    private static final class ShardTimeComparator implements Comparator<Long> {
+
+        public static final ShardTimeComparator INSTANCE = new ShardTimeComparator();
+
+
+        @Override
+        public int compare( final Long o1, final Long o2 ) {
+            return o1.compareTo( o2 );
+        }
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardedEdgeSerialization.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardedEdgeSerialization.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardedEdgeSerialization.java
index 814700d..2a19579 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardedEdgeSerialization.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardedEdgeSerialization.java
@@ -67,7 +67,7 @@ public interface ShardedEdgeSerialization {
      * @param shards The shards to iterate when searching
      */
     Iterator<MarkedEdge> getEdgeVersions( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
-                                          SearchByEdge search, Iterator<ShardEntries> shards );
+                                          SearchByEdge search, Iterator<ShardEntryGroup> shards );
 
     /**
      * Get an iterator of all edges by edge type originating from source node
@@ -78,7 +78,7 @@ public interface ShardedEdgeSerialization {
      * @param shards The shards to iterate when searching
      */
     Iterator<MarkedEdge> getEdgesFromSource( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
-                                             SearchByEdgeType search, Iterator<ShardEntries> shards );
+                                             SearchByEdgeType search, Iterator<ShardEntryGroup> shards );
 
 
     /**
@@ -90,7 +90,7 @@ public interface ShardedEdgeSerialization {
      * @param shards The shards to iterate when searching
      */
     Iterator<MarkedEdge> getEdgesFromSourceByTargetType( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
-                                                         SearchByIdType search, Iterator<ShardEntries> shards );
+                                                         SearchByIdType search, Iterator<ShardEntryGroup> shards );
 
     /**
      * Get an iterator of all edges by edge type pointing to the target node.  Returns all versions
@@ -101,7 +101,7 @@ public interface ShardedEdgeSerialization {
      * @param shards The shards to iterate when searching
      */
     Iterator<MarkedEdge> getEdgesToTarget( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
-                                           SearchByEdgeType search, Iterator<ShardEntries> shards );
+                                           SearchByEdgeType search, Iterator<ShardEntryGroup> shards );
 
 
     /**
@@ -114,5 +114,5 @@ public interface ShardedEdgeSerialization {
      * @param shards The shards to iterate when searching
      */
     Iterator<MarkedEdge> getEdgesToTargetBySourceType( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
-                                                       SearchByIdType search, Iterator<ShardEntries> shards );
+                                                       SearchByIdType search, Iterator<ShardEntryGroup> shards );
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
index 3644210..d0e642c 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/EdgeSearcher.java
@@ -7,7 +7,7 @@ import org.apache.usergrid.persistence.core.astyanax.ColumnParser;
 import org.apache.usergrid.persistence.core.astyanax.ScopedRowKey;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.Edge;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntryGroup;
 
 import com.google.common.base.Optional;
 import com.netflix.astyanax.Serializer;
@@ -28,11 +28,11 @@ public abstract class EdgeSearcher<R, C, T> implements ColumnParser<C, T>, Itera
     protected final Optional<Edge> last;
     protected final long maxTimestamp;
     protected final ApplicationScope scope;
-    protected final Iterator<ShardEntries> shards;
+    protected final Iterator<ShardEntryGroup> shards;
 
 
     protected EdgeSearcher( final ApplicationScope scope, final long maxTimestamp, final Optional<Edge> last,
-                            final Iterator<ShardEntries> shards ) {
+                            final Iterator<ShardEntryGroup> shards ) {
         this.scope = scope;
         this.maxTimestamp = maxTimestamp;
         this.last = last;
@@ -49,10 +49,10 @@ public abstract class EdgeSearcher<R, C, T> implements ColumnParser<C, T>, Itera
     @Override
     public ScopedRowKey<ApplicationScope, R> next() {
         /**
-         * Todo, multi scan
+         * TODO Shard fix this
          */
         return ScopedRowKey
-                .fromKey( scope, generateRowKey( shards.next().getEntries().iterator().next().getShardIndex() ) );
+                .fromKey( scope, generateRowKey( shards.next().getMergeTarget().getShardIndex() ) );
     }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
index b808f2f..dbf8382 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
@@ -20,40 +20,33 @@
 package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
 
 
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Iterator;
-import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.commons.collections4.iterators.PushbackIterator;
-
 import org.apache.usergrid.persistence.core.consistency.TimeService;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
-import org.apache.usergrid.persistence.graph.Edge;
 import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.MarkedEdge;
 import org.apache.usergrid.persistence.graph.exception.GraphRuntimeException;
 import org.apache.usergrid.persistence.graph.impl.SimpleSearchByEdgeType;
 import org.apache.usergrid.persistence.graph.impl.SimpleSearchByIdType;
-import org.apache.usergrid.persistence.graph.serialization.EdgeSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeColumnFamilies;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardAllocation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntryGroup;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardedEdgeSerialization;
 import org.apache.usergrid.persistence.model.entity.Id;
 
 import com.google.common.base.Optional;
 import com.google.inject.Inject;
 import com.netflix.astyanax.Keyspace;
-import com.netflix.astyanax.MutationBatch;
 import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
 
 
@@ -63,9 +56,7 @@ import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
 public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
-    private static final Logger LOG = LoggerFactory.getLogger(NodeShardAllocationImpl.class);
-
-    private static final MinShardTimeComparator MIN_SHARD_TIME_COMPARATOR = new MinShardTimeComparator();
+    private static final Logger LOG = LoggerFactory.getLogger( NodeShardAllocationImpl.class );
 
     private final EdgeShardSerialization edgeShardSerialization;
     private final EdgeColumnFamilies edgeColumnFamilies;
@@ -93,79 +84,13 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
     @Override
-    public Iterator<Shard> getShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
-                                      final Optional<Shard> maxShardId, final String... edgeTypes ) {
+    public Iterator<ShardEntryGroup> getShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+                                                final Optional<Shard> maxShardId, final String... edgeTypes ) {
 
         final Iterator<Shard> existingShards =
                 edgeShardSerialization.getEdgeMetaData( scope, nodeId, nodeType, maxShardId, edgeTypes );
 
-        final PushbackIterator<Shard> pushbackIterator = new PushbackIterator( existingShards );
-
-
-        final long minConflictTime = getMinTime();
-
-
-        final List<Shard> futures = new ArrayList<>();
-
-
-        //loop through all shards, any shard > now+1 should be deleted
-        while ( pushbackIterator.hasNext() ) {
-
-            final Shard shard = pushbackIterator.next();
-
-            //we're done, our current time uuid is greater than the value stored
-            if ( shard.getCreatedTime() < minConflictTime ) {
-                //push it back into the iterator
-                pushbackIterator.pushback( shard );
-                break;
-            }
-
-            futures.add( shard );
-        }
-
-
-        //clean up our future
-        Collections.sort( futures, MIN_SHARD_TIME_COMPARATOR );
-
-
-        //we have more than 1 future value, we need to remove it
-
-        MutationBatch cleanup = keyspace.prepareMutationBatch();
-
-        //remove all futures except the last one, it is the only value we shouldn't lazy remove
-        for ( int i = 1; i < futures.size(); i++ ) {
-            final Shard toRemove = futures.get( i );
-
-            final MutationBatch batch = edgeShardSerialization
-                    .removeEdgeMeta( scope, nodeId, nodeType, toRemove.getShardIndex(), edgeTypes );
-
-            cleanup.mergeShallow( batch );
-        }
-
-
-        try {
-            cleanup.execute();
-        }
-        catch ( ConnectionException e ) {
-            throw new GraphRuntimeException( "Unable to remove future shards, mutation error", e );
-        }
-
-
-        final int futuresSize = futures.size();
-
-        if ( futuresSize > 0 ) {
-            pushbackIterator.pushback( futures.get( 0 ) );
-        }
-
-
-        /**
-         * Nothing to iterate, return an iterator with 0.
-         */
-        if ( !pushbackIterator.hasNext() ) {
-            pushbackIterator.pushback( new Shard( 0l, 0l ) );
-        }
-
-        return pushbackIterator;
+        return new ShardEntryGroupIterator( existingShards, graphFig.getShardMinDelta() );
     }
 
 
@@ -173,8 +98,13 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
     public boolean auditMaxShard( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
                                   final String... edgeType ) {
 
-        final Iterator<Shard> maxShards =  edgeShardSerialization.getEdgeMetaData( scope, nodeId, nodeType, Optional.<Shard>absent(), edgeType );
-
+        /**
+         * TODO, we should change this to seek the shard based on a value. This way we can always split any shard,
+         * not just the
+         * latest
+         */
+        final Iterator<Shard> maxShards =
+                edgeShardSerialization.getEdgeMetaData( scope, nodeId, nodeType, Optional.<Shard>absent(), edgeType );
 
 
         //if the first shard has already been allocated, do nothing.
@@ -187,22 +117,22 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
         final Shard maxShard = maxShards.next();
 
 
-
         /**
          * Nothing to do, it's been created very recently, we don't create a new one
          */
-        if(maxShard.getCreatedTime() >= getMinTime()){
+        if ( maxShard.getCreatedTime() >= getMinTime() ) {
             return false;
         }
 
 
         /**
-        * Check out if we have a count for our shard allocation
-        */
+         * Check out if we have a count for our shard allocation
+         */
 
         final long count =
                 nodeShardApproximation.getCount( scope, nodeId, nodeType, maxShard.getShardIndex(), edgeType );
 
+
         if ( count < graphFig.getShardSize() ) {
             return false;
         }
@@ -214,41 +144,57 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
         Iterator<MarkedEdge> edges;
 
+        final long delta = graphFig.getShardMinDelta();
+
+        final ShardEntryGroup shardEntryGroup = new ShardEntryGroup( delta );
+        shardEntryGroup.addShard( maxShard );
+
+        final Iterator<ShardEntryGroup> shardEntryGroupIterator = Collections.singleton( shardEntryGroup ).iterator();
+
         /**
          * This is fugly, I think our allocation interface needs to get more declarative
          */
-        if(nodeType == NodeType.SOURCE){
+        if ( nodeType == NodeType.SOURCE ) {
 
-            if(edgeType.length == 1){
-                edges = shardedEdgeSerialization.getEdgesFromSource(edgeColumnFamilies, scope, new SimpleSearchByEdgeType( nodeId,  edgeType[0], Long.MAX_VALUE, null), Collections.singleton( new ShardEntries(Collections.singleton( maxShard ))).iterator()) ;
+            if ( edgeType.length == 1 ) {
+                edges = shardedEdgeSerialization.getEdgesFromSource( edgeColumnFamilies, scope,
+                        new SimpleSearchByEdgeType( nodeId, edgeType[0], Long.MAX_VALUE, null ),
+                        shardEntryGroupIterator );
             }
 
-            else if(edgeType.length == 2){
-                edges = shardedEdgeSerialization.getEdgesFromSourceByTargetType(edgeColumnFamilies, scope, new SimpleSearchByIdType( nodeId,  edgeType[0], Long.MAX_VALUE,  edgeType[1], null), Collections.singleton( new ShardEntries(Collections.singleton( maxShard ))).iterator());
+            else if ( edgeType.length == 2 ) {
+                edges = shardedEdgeSerialization.getEdgesFromSourceByTargetType( edgeColumnFamilies, scope,
+                        new SimpleSearchByIdType( nodeId, edgeType[0], Long.MAX_VALUE, edgeType[1], null ),
+                        shardEntryGroupIterator );
             }
 
-           else{
+            else {
                 throw new UnsupportedOperationException( "More than 2 edge types aren't supported" );
             }
+        }
+        else {
 
-        }    else{
-
-            if(edgeType.length == 1){
-                edges = shardedEdgeSerialization.getEdgesToTarget(edgeColumnFamilies, scope, new SimpleSearchByEdgeType( nodeId,  edgeType[0], Long.MAX_VALUE, null), Collections.singleton( new ShardEntries(Collections.singleton( maxShard ))).iterator()) ;
+            if ( edgeType.length == 1 ) {
+                edges = shardedEdgeSerialization.getEdgesToTarget( edgeColumnFamilies, scope,
+                        new SimpleSearchByEdgeType( nodeId, edgeType[0], Long.MAX_VALUE, null ),
+                        shardEntryGroupIterator );
             }
 
-            else if(edgeType.length == 2){
-                edges = shardedEdgeSerialization.getEdgesToTargetBySourceType(edgeColumnFamilies, scope, new SimpleSearchByIdType( nodeId,  edgeType[0], Long.MAX_VALUE,  edgeType[1], null), Collections.singleton( new ShardEntries(Collections.singleton( maxShard ))).iterator());
+            else if ( edgeType.length == 2 ) {
+                edges = shardedEdgeSerialization.getEdgesToTargetBySourceType( edgeColumnFamilies, scope,
+                        new SimpleSearchByIdType( nodeId, edgeType[0], Long.MAX_VALUE, edgeType[1], null ),
+                        shardEntryGroupIterator );
             }
 
-           else{
+            else {
                 throw new UnsupportedOperationException( "More than 2 edge types aren't supported" );
             }
         }
 
 
-        if(!edges.hasNext()){
-            LOG.warn( "Tried to allocate a new shard for node id {} with edge types {}, but no max value could be found in that row", nodeId, edgeType );
+        if ( !edges.hasNext() ) {
+            LOG.warn( "Tried to allocate a new shard for node id {} with edge types {}, "
+                    + "but no max value could be found in that row", nodeId, edgeType );
             return false;
         }
 
@@ -260,21 +206,34 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
         try {
-            this.edgeShardSerialization.writeEdgeMeta( scope, nodeId, nodeType,  marked.getTimestamp(), createTimestamp, edgeType ).execute();
+            this.edgeShardSerialization
+                    .writeEdgeMeta( scope, nodeId, nodeType, marked.getTimestamp(), createTimestamp, edgeType )
+                    .execute();
         }
         catch ( ConnectionException e ) {
             throw new GraphRuntimeException( "Unable to write the new edge metadata" );
         }
 
 
-
         return true;
     }
 
 
     @Override
     public long getMinTime() {
-        return timeService.getCurrentTime() - ( 2 * graphFig.getShardCacheTimeout() );
+
+        final long minimumAllowed = 2 * graphFig.getShardCacheTimeout();
+
+        final long minDelta = graphFig.getShardMinDelta();
+
+
+        if ( minDelta < minimumAllowed ) {
+            throw new GraphRuntimeException( String.format(
+                    "You must configure the property %s to be >= 2 x %s.  Otherwise you risk losing data",
+                    GraphFig.SHARD_MIN_DELTA, GraphFig.SHARD_CACHE_TIMEOUT ) );
+        }
+
+        return timeService.getCurrentTime() - minDelta;
     }
 
 
@@ -294,6 +253,4 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
             return result;
         }
     }
-
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
index 73e8d4f..ad30dc6 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardCacheImpl.java
@@ -23,8 +23,9 @@ import java.beans.PropertyChangeEvent;
 import java.beans.PropertyChangeListener;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.Iterator;
-import java.util.TreeSet;
+import java.util.TreeMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
@@ -35,7 +36,7 @@ import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardA
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardCache;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntryGroup;
 import org.apache.usergrid.persistence.graph.serialization.util.IterableUtil;
 import org.apache.usergrid.persistence.model.entity.Id;
 
@@ -95,7 +96,7 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
     @Override
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+    public ShardEntryGroup getWriteShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
                                         final long timestamp, final String... edgeType ) {
 
 
@@ -109,7 +110,7 @@ public class NodeShardCacheImpl implements NodeShardCache {
             throw new GraphRuntimeException( "Unable to load shard key for graph", e );
         }
 
-        final ShardEntries shardId = entry.getShardId( timestamp );
+        final ShardEntryGroup shardId = entry.getShardId( timestamp );
 
         if ( shardId != null ) {
             return shardId;
@@ -121,7 +122,7 @@ public class NodeShardCacheImpl implements NodeShardCache {
 
 
     @Override
-    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
+    public Iterator<ShardEntryGroup> getReadShards( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
                                                  final long maxTimestamp, final String... edgeType ) {
         final CacheKey key = new CacheKey( scope, nodeId, nodeType, edgeType );
         CacheEntry entry;
@@ -133,10 +134,10 @@ public class NodeShardCacheImpl implements NodeShardCache {
             throw new GraphRuntimeException( "Unable to load shard key for graph", e );
         }
 
-        Iterator<ShardEntries> iterator = entry.getShards( maxTimestamp );
+        Iterator<ShardEntryGroup> iterator = entry.getShards( maxTimestamp );
 
         if ( iterator == null ) {
-            return Collections.<ShardEntries>emptyList().iterator();
+            return Collections.<ShardEntryGroup>emptyList().iterator();
         }
 
         return iterator;
@@ -157,18 +158,17 @@ public class NodeShardCacheImpl implements NodeShardCache {
                                       @Override
                                       public CacheEntry load( final CacheKey key ) throws Exception {
 
-                                          //
-                                          //                          /**
-                                          //                           * Perform an audit in case we need to allocate
-                                          // a new shard
-                                          //                           */
-                                          //                          nodeShardAllocation.auditMaxShard( key.scope,
-                                          // key.id, key.types );
-                                          //                          //TODO, we need to put some sort of upper
-                                          // bounds on this, it could possibly get too large
 
+//                                                                    /**
+//                                                                     * Perform an audit in case we need to allocate a new shard
+//                                                                     */
+//                                                                    nodeShardAllocation.auditMaxShard( key.scope,
+//                                          // key.id, key.types );
+//                                          //                          //TODO, we need to put some sort of upper
+//                                          // bounds on this, it could possibly get too large
 
-                                          final Iterator<Shard> edges = nodeShardAllocation
+
+                                          final Iterator<ShardEntryGroup> edges = nodeShardAllocation
                                                   .getShards( key.scope, key.id, key.nodeType, Optional.<Shard>absent(),
                                                           key.types );
 
@@ -242,38 +242,64 @@ public class NodeShardCacheImpl implements NodeShardCache {
         /**
          * Get the list of all segments
          */
-        private TreeSet<Shard> shards;
+        private TreeMap<Long, ShardEntryGroup> shards;
 
 
-        private CacheEntry( final Iterator<Shard> shards ) {
-            this.shards = new TreeSet<>();
+        private CacheEntry( final Iterator<ShardEntryGroup> shards ) {
+            this.shards = new TreeMap<>(ShardEntriesComparator.INSTANCE);
 
-            for ( Shard shard : IterableUtil.wrap( shards ) ) {
-                this.shards.add( shard );
+            for ( ShardEntryGroup shard : IterableUtil.wrap( shards ) ) {
+                this.shards.put(shard.getMergeTarget().getShardIndex() , shard );
             }
         }
 
 
         /**
-         * Get the shard's UUID for the uuid we're attempting to seek from
+         * Get the shard's long
          */
-        public ShardEntries getShardId( final Long seek ) {
-            return bootstrapEntry();
-            //            return this.shards.floor( seek );
+        public ShardEntryGroup getShardId( final Long seek ) {
+            final Long entry = getShardEntriesForValue( seek );
+
+
+            return shards.get( entry );
         }
 
 
         /**
          * Get all shards <= this one in decending order
          */
-        public Iterator<ShardEntries> getShards( final Long maxShard ) {
-            return Collections.singleton( bootstrapEntry() ).iterator();
-            //            return this.shards.headSet(maxShard, true  ).descendingIterator();
+        public Iterator<ShardEntryGroup> getShards( final Long maxShard ) {
+           final Long entry = getShardEntriesForValue( maxShard );
+
+
+            return shards.tailMap( entry ).values().iterator();
+
+        }
+
+
+        /**
+         * Get the shard entry that should hold this value
+         * @param value
+         * @return
+         */
+        private long getShardEntriesForValue(final Long value){
+              return shards.lowerKey( value );
         }
 
 
-        private ShardEntries bootstrapEntry() {
-            return new ShardEntries( Collections.singleton( new Shard( 0l, 0l ) ) );
+
+
+
+        private static class ShardEntriesComparator implements Comparator<Long> {
+
+            private static final ShardEntriesComparator INSTANCE = new ShardEntriesComparator();
+
+
+
+            @Override
+            public int compare( final Long o1, final Long o2 ) {
+                return Long.compare( o1, o2 ) * -1;
+            }
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIterator.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIterator.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIterator.java
new file mode 100644
index 0000000..39e198e
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIterator.java
@@ -0,0 +1,99 @@
+package org.apache.usergrid.persistence.graph.serialization.impl.shard.impl;
+
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.commons.collections4.iterators.PushbackIterator;
+
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntryGroup;
+
+
+/**
+ * Utility class that will take an iterator of all shards, and combine them into an iterator
+ * of ShardEntryGroups.  These groups can then be used in a distributed system to handle concurrent reads and writes
+ */
+public class ShardEntryGroupIterator implements Iterator<ShardEntryGroup> {
+
+
+    private ShardEntryGroup next;
+    private final PushbackIterator<Shard> sourceIterator;
+    private final long minDelta;
+
+
+    /**
+     * Create a shard iterator
+     * @param sourceIterator The iterator of all shards.  Order is expected to be by the  shard index from Long.MAX to Long.MIN
+     * @param minDelta The minimum delta we allow to consider shards the same group
+     */
+    public ShardEntryGroupIterator( final Iterator<Shard> sourceIterator, final long minDelta ) {
+        this.sourceIterator = new PushbackIterator( sourceIterator );
+        this.minDelta = minDelta;
+    }
+
+
+    @Override
+    public boolean hasNext() {
+        if ( next == null ) {
+            advance();
+        }
+
+        return next == null;
+    }
+
+
+    @Override
+    public ShardEntryGroup next() {
+        if ( !hasNext() ) {
+            throw new NoSuchElementException( "No more elements exist in iterator" );
+        }
+
+
+        final ShardEntryGroup toReturn = next;
+
+        next = null;
+
+        return toReturn;
+    }
+
+
+    @Override
+    public void remove() {
+        throw new UnsupportedOperationException( "Remove is not supported" );
+    }
+
+
+    /**
+     * Advance to the next element
+     */
+    private void advance() {
+
+
+        final ShardEntryGroup shardEntryGroup = new ShardEntryGroup( minDelta );
+
+
+        /**
+         * We loop through until we've exhausted our source, or we have 2 elements, which means
+         * they're > min time allocation from one another
+         */
+        while ( sourceIterator.hasNext() ) {
+
+
+            final Shard shard = sourceIterator.next();
+
+
+            //we can't add this one to the entries, it doesn't fit within the delta, allocate a new one and break
+            if ( shardEntryGroup.addShard( shard ) ) {
+                continue;
+            }
+
+            //we can't add this shard to the current group.  Add the group and return.
+            next = shardEntryGroup;
+
+            shardEntryGroup.setNeighbor( shard );
+
+            sourceIterator.pushback( shard );
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardedEdgeSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardedEdgeSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardedEdgeSerializationImpl.java
index a0d1e6e..d2e2ddb 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardedEdgeSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardedEdgeSerializationImpl.java
@@ -46,7 +46,7 @@ import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.RowKey;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.RowKeyType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntryGroup;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardedEdgeSerialization;
 import org.apache.usergrid.persistence.graph.serialization.util.EdgeUtils;
 import org.apache.usergrid.persistence.model.entity.Id;
@@ -200,77 +200,77 @@ public class ShardedEdgeSerializationImpl implements ShardedEdgeSerialization {
 
         final DirectedEdge sourceEdge = new DirectedEdge( targetNodeId, timestamp );
 
-        final ShardEntries sourceRowKeyShard =
+        final ShardEntryGroup sourceRowKeyShard =
                 writeEdgeShardStrategy.getWriteShards( scope, sourceNodeId, NodeType.SOURCE, timestamp, type );
 
         final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> sourceCf =
                 columnFamilies.getSourceNodeCfName();
 
 
-        for ( Shard shard : sourceRowKeyShard.getEntries() ) {
-
-            final long shardId = shard.getShardIndex();
-            final RowKey sourceRowKey = new RowKey( sourceNodeId, type, shardId );
-            op.writeEdge( sourceCf, sourceRowKey, sourceEdge );
-            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type );
-        }
-
-
-        final ShardEntries sourceWithTypeRowKeyShard = writeEdgeShardStrategy
-                .getWriteShards( scope, sourceNodeId, NodeType.SOURCE, timestamp, type, targetNodeType );
-
-        final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> targetCf =
-                columnFamilies.getSourceNodeTargetTypeCfName();
-
-        for ( Shard shard : sourceWithTypeRowKeyShard.getEntries() ) {
-
-            final long shardId = shard.getShardIndex();
-            final RowKeyType sourceRowKeyType = new RowKeyType( sourceNodeId, type, targetNodeId, shardId );
-
-            op.writeEdge( targetCf, sourceRowKeyType, sourceEdge );
-            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type, targetNodeType );
-        }
-
-
-        /**
-         * write edges from target<-source
-         */
-
-        final DirectedEdge targetEdge = new DirectedEdge( sourceNodeId, timestamp );
-
-
-        final ShardEntries targetRowKeyShard =
-                writeEdgeShardStrategy.getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type );
-
-        final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> sourceByTargetCf =
-                columnFamilies.getTargetNodeCfName();
-
-        for ( Shard shard : targetRowKeyShard.getEntries() ) {
-            final long shardId = shard.getShardIndex();
-            final RowKey targetRowKey = new RowKey( targetNodeId, type, shardId );
-
-            op.writeEdge( sourceByTargetCf, targetRowKey, targetEdge );
-            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type );
-        }
-
-
-        final ShardEntries targetWithTypeRowKeyShard = writeEdgeShardStrategy
-                .getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type, sourceNodeType );
-
-        final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> targetBySourceCf =
-                columnFamilies.getTargetNodeSourceTypeCfName();
-
-
-        for ( Shard shard : targetWithTypeRowKeyShard.getEntries() ) {
-
-            final long shardId = shard.getShardIndex();
-
-            final RowKeyType targetRowKeyType = new RowKeyType( targetNodeId, type, sourceNodeId, shardId );
-
-
-            op.writeEdge( targetBySourceCf, targetRowKeyType, targetEdge );
-            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type, sourceNodeType );
-        }
+//        for ( Shard shard : sourceRowKeyShard.getEntries() ) {
+//
+//            final long shardId = shard.getShardIndex();
+//            final RowKey sourceRowKey = new RowKey( sourceNodeId, type, shardId );
+//            op.writeEdge( sourceCf, sourceRowKey, sourceEdge );
+//            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type );
+//        }
+//
+//
+//        final ShardEntryGroup sourceWithTypeRowKeyShard = writeEdgeShardStrategy
+//                .getWriteShards( scope, sourceNodeId, NodeType.SOURCE, timestamp, type, targetNodeType );
+//
+//        final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> targetCf =
+//                columnFamilies.getSourceNodeTargetTypeCfName();
+//
+//        for ( Shard shard : sourceWithTypeRowKeyShard.getEntries() ) {
+//
+//            final long shardId = shard.getShardIndex();
+//            final RowKeyType sourceRowKeyType = new RowKeyType( sourceNodeId, type, targetNodeId, shardId );
+//
+//            op.writeEdge( targetCf, sourceRowKeyType, sourceEdge );
+//            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type, targetNodeType );
+//        }
+//
+//
+//        /**
+//         * write edges from target<-source
+//         */
+//
+//        final DirectedEdge targetEdge = new DirectedEdge( sourceNodeId, timestamp );
+//
+//
+//        final ShardEntryGroup targetRowKeyShard =
+//                writeEdgeShardStrategy.getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type );
+//
+//        final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> sourceByTargetCf =
+//                columnFamilies.getTargetNodeCfName();
+//
+//        for ( Shard shard : targetRowKeyShard.getEntries() ) {
+//            final long shardId = shard.getShardIndex();
+//            final RowKey targetRowKey = new RowKey( targetNodeId, type, shardId );
+//
+//            op.writeEdge( sourceByTargetCf, targetRowKey, targetEdge );
+//            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type );
+//        }
+//
+//
+//        final ShardEntryGroup targetWithTypeRowKeyShard = writeEdgeShardStrategy
+//                .getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type, sourceNodeType );
+//
+//        final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> targetBySourceCf =
+//                columnFamilies.getTargetNodeSourceTypeCfName();
+//
+//
+//        for ( Shard shard : targetWithTypeRowKeyShard.getEntries() ) {
+//
+//            final long shardId = shard.getShardIndex();
+//
+//            final RowKeyType targetRowKeyType = new RowKeyType( targetNodeId, type, sourceNodeId, shardId );
+//
+//
+//            op.writeEdge( targetBySourceCf, targetRowKeyType, targetEdge );
+//            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type, sourceNodeType );
+//        }
 
         /**
          * Always a 0l shard, we're hard limiting 2b timestamps for the same edge
@@ -287,7 +287,7 @@ public class ShardedEdgeSerializationImpl implements ShardedEdgeSerialization {
 
     @Override
     public Iterator<MarkedEdge> getEdgeVersions( final EdgeColumnFamilies columnFamilies, final ApplicationScope scope,
-                                                 final SearchByEdge search, final Iterator<ShardEntries> shards ) {
+                                                 final SearchByEdge search, final Iterator<ShardEntryGroup> shards ) {
         ValidationUtils.validateApplicationScope( scope );
         EdgeUtils.validateSearchByEdge( search );
 
@@ -348,7 +348,7 @@ public class ShardedEdgeSerializationImpl implements ShardedEdgeSerialization {
     @Override
     public Iterator<MarkedEdge> getEdgesFromSource( final EdgeColumnFamilies columnFamilies,
                                                     final ApplicationScope scope, final SearchByEdgeType edgeType,
-                                                    final Iterator<ShardEntries> shards ) {
+                                                    final Iterator<ShardEntryGroup> shards ) {
 
         ValidationUtils.validateApplicationScope( scope );
         EdgeUtils.validateSearchByEdgeType( edgeType );
@@ -399,7 +399,7 @@ public class ShardedEdgeSerializationImpl implements ShardedEdgeSerialization {
     public Iterator<MarkedEdge> getEdgesFromSourceByTargetType( final EdgeColumnFamilies columnFamilies,
                                                                 final ApplicationScope scope,
                                                                 final SearchByIdType edgeType,
-                                                                final Iterator<ShardEntries> shards ) {
+                                                                final Iterator<ShardEntryGroup> shards ) {
 
         ValidationUtils.validateApplicationScope( scope );
         EdgeUtils.validateSearchByEdgeType( edgeType );
@@ -448,7 +448,7 @@ public class ShardedEdgeSerializationImpl implements ShardedEdgeSerialization {
     @Override
     public Iterator<MarkedEdge> getEdgesToTarget( final EdgeColumnFamilies columnFamilies, final ApplicationScope scope,
                                                   final SearchByEdgeType edgeType,
-                                                  final Iterator<ShardEntries> shards ) {
+                                                  final Iterator<ShardEntryGroup> shards ) {
         ValidationUtils.validateApplicationScope( scope );
         EdgeUtils.validateSearchByEdgeType( edgeType );
 
@@ -496,7 +496,7 @@ public class ShardedEdgeSerializationImpl implements ShardedEdgeSerialization {
     public Iterator<MarkedEdge> getEdgesToTargetBySourceType( final EdgeColumnFamilies columnFamilies,
                                                               final ApplicationScope scope,
                                                               final SearchByIdType edgeType,
-                                                              final Iterator<ShardEntries> shards ) {
+                                                              final Iterator<ShardEntryGroup> shards ) {
 
         ValidationUtils.validateApplicationScope( scope );
         EdgeUtils.validateSearchByEdgeType( edgeType );

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
index a71960b..ef438a6 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/SizebasedEdgeShardStrategy.java
@@ -27,7 +27,7 @@ import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardS
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardCache;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntryGroup;
 import org.apache.usergrid.persistence.model.entity.Id;
 
 import com.google.inject.Inject;
@@ -54,14 +54,14 @@ public class SizebasedEdgeShardStrategy implements EdgeShardStrategy {
 
 
     @Override
-    public ShardEntries getWriteShards( final ApplicationScope scope, final Id rowKeyId, final NodeType nodeType,
+    public ShardEntryGroup getWriteShards( final ApplicationScope scope, final Id rowKeyId, final NodeType nodeType,
                                         final long timestamp, final String... types ) {
         return shardCache.getWriteShards( scope, rowKeyId, nodeType, timestamp, types );
     }
 
 
     @Override
-    public Iterator<ShardEntries> getReadShards( final ApplicationScope scope, final Id rowKeyId,
+    public Iterator<ShardEntryGroup> getReadShards( final ApplicationScope scope, final Id rowKeyId,
                                                  final NodeType nodeType, final long maxTimestamp,
                                                  final String... types ) {
         return shardCache.getReadShards( scope, rowKeyId, nodeType, maxTimestamp, types );


[07/11] git commit: Finished refactor. Need to continue on shard allocation

Posted by to...@apache.org.
Finished refactor.  Need to continue on shard allocation


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/69a3faa2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/69a3faa2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/69a3faa2

Branch: refs/heads/USERGRID-188
Commit: 69a3faa2d09d984d0153cea69511b16ee18bd4f0
Parents: 7425ba5
Author: Todd Nine <tn...@apigee.com>
Authored: Wed Jul 16 11:08:10 2014 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Wed Jul 16 17:28:35 2014 -0600

----------------------------------------------------------------------
 .../UniqueValueSerializationStrategyImpl.java   |   2 +-
 .../core/astyanax/MultiTennantColumnFamily.java |  13 +-
 .../persistence/graph/guice/GraphModule.java    |  35 +-
 .../graph/serialization/EdgeSerialization.java  |   2 +-
 .../impl/EdgeSerializationImpl.java             | 940 +------------------
 .../serialization/impl/shard/DirectedEdge.java  |  41 +
 .../impl/shard/EdgeColumnFamilies.java          |  60 ++
 .../serialization/impl/shard/EdgeRowKey.java    |  44 +
 .../impl/shard/EdgeShardStrategy.java           |  27 +-
 .../impl/shard/NodeShardApproximation.java      |   2 +
 .../graph/serialization/impl/shard/RowKey.java  |  54 ++
 .../serialization/impl/shard/RowKeyType.java    |  60 ++
 .../serialization/impl/shard/ShardEntries.java  |   3 +-
 .../impl/shard/ShardedEdgeSerialization.java    | 118 +++
 .../count/NodeShardCounterSerialization.java    |   2 +-
 .../NodeShardCounterSerializationImpl.java      |  89 +-
 .../impl/shard/count/ShardKey.java              |  36 +-
 .../impl/shard/impl/EdgeRowKey.java             |  42 -
 .../impl/shard/impl/EdgeRowKeySerializer.java   |  38 +-
 .../impl/shard/impl/EdgeSearcher.java           | 124 +++
 .../impl/shard/impl/EdgeSerializer.java         |  77 ++
 .../shard/impl/EdgeShardSerializationImpl.java  |   5 +-
 .../shard/impl/NodeShardAllocationImpl.java     |  79 +-
 .../impl/shard/impl/NodeShardCacheImpl.java     |  85 +-
 .../impl/shard/impl/RowSerializer.java          |  64 ++
 .../impl/shard/impl/RowTypeSerializer.java      |  60 ++
 .../impl/shard/impl/ShardRowIterator.java       | 112 +++
 .../impl/ShardedEdgeSerializationImpl.java      | 568 +++++++++++
 .../shard/impl/SizebasedEdgeColumnFamilies.java | 150 +++
 .../shard/impl/SizebasedEdgeShardStrategy.java  |  43 +-
 .../impl/shard/impl/SourceEdgeSearcher.java     |  22 +
 .../graph/GraphManagerShardingIT.java           |   2 +
 .../impl/shard/NodeShardAllocationTest.java     | 783 +++++++--------
 33 files changed, 2210 insertions(+), 1572 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/mvcc/stage/write/UniqueValueSerializationStrategyImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/mvcc/stage/write/UniqueValueSerializationStrategyImpl.java b/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/mvcc/stage/write/UniqueValueSerializationStrategyImpl.java
index 73c038e..b7e113e 100644
--- a/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/mvcc/stage/write/UniqueValueSerializationStrategyImpl.java
+++ b/stack/corepersistence/collection/src/main/java/org/apache/usergrid/persistence/collection/mvcc/stage/write/UniqueValueSerializationStrategyImpl.java
@@ -56,7 +56,7 @@ public class UniqueValueSerializationStrategyImpl
 
     private static final EntityVersionSerializer ENTITY_VERSION_SER = new EntityVersionSerializer();
 
-    private static final MultiTennantColumnFamily<CollectionScope, Field, EntityVersion> 
+    private static final MultiTennantColumnFamily<CollectionScope, Field, EntityVersion>
         CF_UNIQUE_VALUES =
             new MultiTennantColumnFamily<CollectionScope, Field, EntityVersion>( "Unique_Values",
                 ROW_KEY_SER,

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/common/src/main/java/org/apache/usergrid/persistence/core/astyanax/MultiTennantColumnFamily.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/common/src/main/java/org/apache/usergrid/persistence/core/astyanax/MultiTennantColumnFamily.java b/stack/corepersistence/common/src/main/java/org/apache/usergrid/persistence/core/astyanax/MultiTennantColumnFamily.java
index 19e5aae..6234184 100644
--- a/stack/corepersistence/common/src/main/java/org/apache/usergrid/persistence/core/astyanax/MultiTennantColumnFamily.java
+++ b/stack/corepersistence/common/src/main/java/org/apache/usergrid/persistence/core/astyanax/MultiTennantColumnFamily.java
@@ -33,20 +33,15 @@ import com.netflix.astyanax.model.ColumnFamily;
 public class MultiTennantColumnFamily<S extends ApplicationScope, K, V>
     extends ColumnFamily<ScopedRowKey<S, K>, V> {
 
-    public MultiTennantColumnFamily( 
-            final String columnFamilyName, 
-            final Serializer<ScopedRowKey<S, K>> keySerializer,
-            final Serializer<V> columnSerializer ) {
+    public MultiTennantColumnFamily( final String columnFamilyName, final Serializer<ScopedRowKey<S, K>> keySerializer,
+                                     final Serializer<V> columnSerializer ) {
 
         super( columnFamilyName, keySerializer, columnSerializer );
     }
 
 
-    public MultiTennantColumnFamily( 
-            final String columnFamilyName, 
-            final Serializer<ScopedRowKey<S, K>> keySerializer,
-            final Serializer<V> columnSerializer,
-            final Serializer<?> defaultValueSerializer ) {
+    public MultiTennantColumnFamily( final String columnFamilyName, final Serializer<ScopedRowKey<S, K>> keySerializer,
+                                     final Serializer<V> columnSerializer, final Serializer<?> defaultValueSerializer ) {
 
         super( columnFamilyName, keySerializer, columnSerializer, defaultValueSerializer );
     }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/guice/GraphModule.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/guice/GraphModule.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/guice/GraphModule.java
index aca8d00..3089889 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/guice/GraphModule.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/guice/GraphModule.java
@@ -43,17 +43,21 @@ import org.apache.usergrid.persistence.graph.serialization.NodeSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.EdgeMetadataSerializationImpl;
 import org.apache.usergrid.persistence.graph.serialization.impl.EdgeSerializationImpl;
 import org.apache.usergrid.persistence.graph.serialization.impl.NodeSerializationImpl;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeColumnFamilies;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardStrategy;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardAllocation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardCache;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardedEdgeSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.count.NodeShardApproximationImpl;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.count.NodeShardCounterSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.count.NodeShardCounterSerializationImpl;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.EdgeShardSerializationImpl;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.NodeShardAllocationImpl;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.NodeShardCacheImpl;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.ShardedEdgeSerializationImpl;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.SizebasedEdgeColumnFamilies;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.SizebasedEdgeShardStrategy;
 
 import com.google.inject.AbstractModule;
@@ -101,7 +105,6 @@ public class GraphModule extends AbstractModule {
         bind( EdgeShardSerialization.class ).to( EdgeShardSerializationImpl.class );
 
 
-
         //Repair/cleanup classes.
         bind( EdgeMetaRepair.class ).to( EdgeMetaRepairImpl.class );
         bind( EdgeDeleteRepair.class ).to( EdgeDeleteRepairImpl.class );
@@ -111,7 +114,9 @@ public class GraphModule extends AbstractModule {
          * Add our listeners
          */
         bind( NodeDeleteListener.class ).to( NodeDeleteListenerImpl.class );
-        bind( EdgeDeleteListener.class).to( EdgeDeleteListenerImpl.class );
+        bind( EdgeDeleteListener.class ).to( EdgeDeleteListenerImpl.class );
+
+
 
 
         /**
@@ -129,7 +134,7 @@ public class GraphModule extends AbstractModule {
         migrationBinding.addBinding().to( Key.get( EdgeMetadataSerialization.class ) );
 
         //bind each singleton to the multi set.  Otherwise we won't migrate properly
-        migrationBinding.addBinding().to( Key.get( EdgeSerialization.class, StorageEdgeSerialization.class ) );
+        migrationBinding.addBinding().to( Key.get( EdgeColumnFamilies.class, StorageEdgeSerialization.class ) );
 
         migrationBinding.addBinding().to( Key.get( EdgeShardSerialization.class ) );
         migrationBinding.addBinding().to( Key.get( NodeShardCounterSerialization.class ) );
@@ -143,22 +148,34 @@ public class GraphModule extends AbstractModule {
     @Singleton
     @Inject
     @StorageEdgeSerialization
-    public EdgeSerialization permanentStorageSerialization( final NodeShardCache cache, final Keyspace keyspace,
-                                                            final CassandraConfig cassandraConfig,
-                                                            final GraphFig graphFig,
-                                                            final NodeShardApproximation shardApproximation) {
+    public EdgeSerialization storageSerialization( final NodeShardCache cache, final Keyspace keyspace,
+                                                   final CassandraConfig cassandraConfig, final GraphFig graphFig,
+                                                   final NodeShardApproximation shardApproximation,
+                                                   @StorageEdgeSerialization
+                                                   final EdgeColumnFamilies edgeColumnFamilies ) {
 
         final EdgeShardStrategy sizeBasedStrategy = new SizebasedEdgeShardStrategy( cache, shardApproximation );
 
+
+        final ShardedEdgeSerialization serialization = new ShardedEdgeSerializationImpl(keyspace, cassandraConfig, graphFig, sizeBasedStrategy);
+
+
         final EdgeSerializationImpl edgeSerialization =
-                new EdgeSerializationImpl( keyspace, cassandraConfig, graphFig, sizeBasedStrategy );
+                new EdgeSerializationImpl( keyspace, cassandraConfig, graphFig, sizeBasedStrategy, edgeColumnFamilies,
+                        serialization );
 
 
         return edgeSerialization;
     }
 
 
-
+    @Provides
+    @Singleton
+    @Inject
+    @StorageEdgeSerialization
+    public EdgeColumnFamilies storageSerializationColumnFamilies() {
+        return new SizebasedEdgeColumnFamilies();
+    }
 }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/EdgeSerialization.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/EdgeSerialization.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/EdgeSerialization.java
index b80b7ab..0caffc3 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/EdgeSerialization.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/EdgeSerialization.java
@@ -36,7 +36,7 @@ import com.netflix.astyanax.MutationBatch;
 /**
  * Simple interface for serializing ONLY an edge
  */
-public interface EdgeSerialization extends Migration {
+public interface EdgeSerialization {
 
 
     /**

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
index e49b99d..5a6e201 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
@@ -24,25 +24,19 @@ import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Iterator;
-import java.util.NoSuchElementException;
 import java.util.UUID;
 
 import javax.inject.Inject;
 
 import org.apache.cassandra.db.marshal.BytesType;
-import org.apache.cassandra.db.marshal.DynamicCompositeType;
 
 import org.apache.usergrid.persistence.core.astyanax.CassandraConfig;
-import org.apache.usergrid.persistence.core.astyanax.ColumnNameIterator;
-import org.apache.usergrid.persistence.core.astyanax.ColumnParser;
 import org.apache.usergrid.persistence.core.astyanax.ColumnTypes;
 import org.apache.usergrid.persistence.core.astyanax.CompositeFieldSerializer;
 import org.apache.usergrid.persistence.core.astyanax.IdColDynamicCompositeSerializer;
 import org.apache.usergrid.persistence.core.astyanax.IdRowCompositeSerializer;
 import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
 import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamilyDefinition;
-import org.apache.usergrid.persistence.core.astyanax.OrganizationScopedRowKeySerializer;
-import org.apache.usergrid.persistence.core.astyanax.ScopedRowKey;
 import org.apache.usergrid.persistence.core.migration.Migration;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.core.util.ValidationUtils;
@@ -54,319 +48,75 @@ import org.apache.usergrid.persistence.graph.SearchByEdgeType;
 import org.apache.usergrid.persistence.graph.SearchByIdType;
 import org.apache.usergrid.persistence.graph.impl.SimpleMarkedEdge;
 import org.apache.usergrid.persistence.graph.serialization.EdgeSerialization;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeColumnFamilies;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardStrategy;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardedEdgeSerialization;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.EdgeSearcher;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.ShardRowIterator;
 import org.apache.usergrid.persistence.graph.serialization.util.EdgeHasher;
 import org.apache.usergrid.persistence.graph.serialization.util.EdgeUtils;
 import org.apache.usergrid.persistence.model.entity.Id;
 
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.inject.Singleton;
 import com.netflix.astyanax.Keyspace;
 import com.netflix.astyanax.MutationBatch;
 import com.netflix.astyanax.Serializer;
-import com.netflix.astyanax.model.Column;
 import com.netflix.astyanax.model.CompositeBuilder;
 import com.netflix.astyanax.model.CompositeParser;
 import com.netflix.astyanax.model.DynamicComposite;
-import com.netflix.astyanax.query.RowQuery;
 import com.netflix.astyanax.serializers.AbstractSerializer;
-import com.netflix.astyanax.serializers.LongSerializer;
-import com.netflix.astyanax.util.RangeBuilder;
 
 import static com.google.common.base.Preconditions.checkNotNull;
-import static org.apache.usergrid.persistence.core.astyanax.ColumnTypes.LONG_TYPE_REVERSED;
-import static org.apache.usergrid.persistence.core.astyanax.ColumnTypes.UUID_TYPE_REVERSED;
 
 
 /**
  * Serialization for edges.  Delegates partitioning to the sharding strategy.
  */
 @Singleton
-public class EdgeSerializationImpl implements EdgeSerialization, Migration {
+public class EdgeSerializationImpl implements EdgeSerialization  {
 
 
-    //Row key with no type
-    private static final RowSerializer ROW_SERIALIZER = new RowSerializer();
-
-    //row key with target id type
-    private static final RowTypeSerializer ROW_TYPE_SERIALIZER = new RowTypeSerializer();
-
-    private static final EdgeRowKeySerializer EDGE_ROW_KEY_SERIALIZER = new EdgeRowKeySerializer();
-
-    //Edge serializers
-    private static final EdgeSerializer EDGE_SERIALIZER = new EdgeSerializer();
-
-    private static final LongSerializer LONG_SERIALIZER = LongSerializer.get();
-
-
-    /**
-     * Constant for the dynamic composite comparator type we'll need
-     */
-    public static final String EDGE_DYNAMIC_COMPOSITE_TYPE =
-            //we purposefully associate lower case "l" and "u" with reversed types.  This way we can use
-            //the default serialization in Astayanax, but get reverse order in cassandra
-            DynamicCompositeType.class.getSimpleName() + "(s=>UTF8Type,l=>" + LONG_TYPE_REVERSED + ",u=>"
-                    + UUID_TYPE_REVERSED + ")";
-
-
-    /**
-     * Get all graph edge versions
-     */
-    private final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> graphEdgeVersionsCf;
-
-
-    // column families
-    /**
-     * Edges that are from the source node. The row key is the source node
-     */
-    private final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> sourceNodeEdgesCf;
-
-
-    /**
-     * Edges that are incoming to the target node.  The target node is the row key
-     */
-    private final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> targetNodeEdgesCf;
-
-    /**
-     * The edges that are from the source node with target type.  The source node is the row key.
-     */
-    private final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> sourceNodeTargetTypeCf;
-
-
-    /**
-     * The edges that are to the target node with the source type.  The target node is the row key
-     */
-    private final MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> targetNodeSourceTypeCf;
-
     protected final Keyspace keyspace;
     protected final CassandraConfig cassandraConfig;
     protected final GraphFig graphFig;
     protected final EdgeShardStrategy edgeShardStrategy;
+    protected final EdgeColumnFamilies edgeColumnFamilies;
+    protected final ShardedEdgeSerialization shardedEdgeSerialization;
 
 
     @Inject
     public EdgeSerializationImpl( final Keyspace keyspace, final CassandraConfig cassandraConfig,
-                                  final GraphFig graphFig, final EdgeShardStrategy edgeShardStrategy ) {
+                                  final GraphFig graphFig, final EdgeShardStrategy edgeShardStrategy,
+                                  final EdgeColumnFamilies edgeColumnFamilies,
+                                  final ShardedEdgeSerialization shardedEdgeSerialization ) {
+
 
         checkNotNull( "keyspace required", keyspace );
         checkNotNull( "cassandraConfig required", cassandraConfig );
         checkNotNull( "consistencyFig required", graphFig );
-        checkNotNull( "sourceNodeCfName required", edgeShardStrategy.getSourceNodeCfName() );
-        checkNotNull( "targetNodeCfName required", edgeShardStrategy.getTargetNodeCfName() );
-        checkNotNull( "sourceNodeTargetTypeCfName required", edgeShardStrategy.getSourceNodeTargetTypeCfName() );
-        checkNotNull( "targetNodeSourceTypeCfName required", edgeShardStrategy.getTargetNodeSourceTypeCfName() );
 
 
         this.keyspace = keyspace;
         this.cassandraConfig = cassandraConfig;
         this.graphFig = graphFig;
         this.edgeShardStrategy = edgeShardStrategy;
-
-        //initialize the CF's from our implementation
-        sourceNodeEdgesCf = new MultiTennantColumnFamily<>( edgeShardStrategy.getSourceNodeCfName(),
-                new OrganizationScopedRowKeySerializer<>( ROW_SERIALIZER ), EDGE_SERIALIZER );
-
-
-        targetNodeEdgesCf = new MultiTennantColumnFamily<>( edgeShardStrategy.getTargetNodeCfName(),
-                new OrganizationScopedRowKeySerializer<>( ROW_SERIALIZER ), EDGE_SERIALIZER );
-
-
-        sourceNodeTargetTypeCf = new MultiTennantColumnFamily<>( edgeShardStrategy.getSourceNodeTargetTypeCfName(),
-                new OrganizationScopedRowKeySerializer<>( ROW_TYPE_SERIALIZER ), EDGE_SERIALIZER );
-
-
-        /**
-         * The edges that are to the target node with the source type.  The target node is the row key
-         */
-        targetNodeSourceTypeCf = new MultiTennantColumnFamily<>( edgeShardStrategy.getTargetNodeSourceTypeCfName(),
-                new OrganizationScopedRowKeySerializer<>( ROW_TYPE_SERIALIZER ), EDGE_SERIALIZER );
-
-        graphEdgeVersionsCf = new MultiTennantColumnFamily<>( edgeShardStrategy.getGraphEdgeVersions(),
-                new OrganizationScopedRowKeySerializer<>( EDGE_ROW_KEY_SERIALIZER ), LONG_SERIALIZER );
+        this.edgeColumnFamilies = edgeColumnFamilies;
+        this.shardedEdgeSerialization = shardedEdgeSerialization;
     }
 
 
     @Override
     public MutationBatch writeEdge( final ApplicationScope scope, final MarkedEdge markedEdge, final UUID timestamp ) {
-        ValidationUtils.validateApplicationScope( scope );
-        EdgeUtils.validateEdge( markedEdge );
-        ValidationUtils.verifyTimeUuid( timestamp, "timestamp" );
-
-
-        final MutationBatch batch = keyspace.prepareMutationBatch().withConsistencyLevel( cassandraConfig.getWriteCL() )
-                                            .withTimestamp( timestamp.timestamp() );
-
-        final boolean isDeleted = markedEdge.isDeleted();
-
-
-        doWrite( scope, markedEdge, new RowOp<RowKey>() {
-            @Override
-            public void writeEdge( final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> columnFamily,
-                                   final RowKey rowKey, final DirectedEdge edge ) {
-                batch.withRow( columnFamily, ScopedRowKey.fromKey( scope, rowKey ) ).putColumn( edge, isDeleted );
-            }
-
-
-            @Override
-            public void countEdge( final Id rowId, final NodeType nodeType, final long shardId, final String... types ) {
-                if ( !isDeleted ) {
-                    edgeShardStrategy.increment( scope, rowId, nodeType, shardId, 1l, types );
-                }
-            }
-
-
-            @Override
-            public void writeVersion( final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> columnFamily,
-                                      final EdgeRowKey rowKey, final long timestamp ) {
-                batch.withRow( columnFamily, ScopedRowKey.fromKey( scope, rowKey ) ).putColumn( timestamp, isDeleted );
-            }
-        } );
-
-
-        return batch;
+        return shardedEdgeSerialization.writeEdge( edgeColumnFamilies, scope, markedEdge, timestamp );
     }
 
 
     @Override
     public MutationBatch deleteEdge( final ApplicationScope scope, final MarkedEdge markedEdge, final UUID timestamp ) {
-        ValidationUtils.validateApplicationScope( scope );
-        EdgeUtils.validateEdge( markedEdge );
-        ValidationUtils.verifyTimeUuid( timestamp, "timestamp" );
-
-
-        final MutationBatch batch = keyspace.prepareMutationBatch().withConsistencyLevel( cassandraConfig.getWriteCL() )
-                                            .withTimestamp( timestamp.timestamp() );
-
-
-        doWrite( scope, markedEdge, new RowOp<RowKey>() {
-            @Override
-            public void writeEdge( final MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> columnFamily,
-                                   final RowKey rowKey, final DirectedEdge edge ) {
-                batch.withRow( columnFamily, ScopedRowKey.fromKey( scope, rowKey ) ).deleteColumn( edge );
-            }
-
-
-            @Override
-            public void countEdge( final Id rowId, final NodeType nodeType,  final long shardId,  final String... types ) {
-                edgeShardStrategy.increment( scope, rowId, nodeType, shardId, -1, types );
-            }
-
-
-            @Override
-            public void writeVersion( final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> columnFamily,
-                                      final EdgeRowKey rowKey, final long timestamp ) {
-                batch.withRow( columnFamily, ScopedRowKey.fromKey( scope, rowKey ) ).deleteColumn( timestamp );
-            }
-        } );
-
-
-        return batch;
-    }
-
-
-    /**
-     * EdgeWrite the edges internally
-     *
-     * @param scope The scope to encapsulate
-     * @param edge The edge to write
-     * @param op The row operation to invoke
-     */
-    private void doWrite( final ApplicationScope scope, final MarkedEdge edge, final RowOp op ) {
-        ValidationUtils.validateApplicationScope( scope );
-        EdgeUtils.validateEdge( edge );
-
-        final Id sourceNodeId = edge.getSourceNode();
-        final String souceNodeType = sourceNodeId.getType();
-        final Id targetNodeId = edge.getTargetNode();
-        final String targetNodeType = targetNodeId.getType();
-        final long timestamp = edge.getTimestamp();
-        final String type = edge.getType();
-
-
-        /**
-         * Key in the serializers based on the edge
-         */
-
-
-        /**
-         * write edges from source->target
-         */
-
-
-        final DirectedEdge sourceEdge = new DirectedEdge( targetNodeId, timestamp );
-
-        final ShardEntries sourceRowKeyShard = edgeShardStrategy.getWriteShards( scope, sourceNodeId, NodeType.SOURCE, timestamp, type );
-
-
-        for ( Shard shard : sourceRowKeyShard.getEntries() ) {
-
-            final long shardId = shard.getShardIndex();
-            final RowKey sourceRowKey = new RowKey( sourceNodeId, type, shardId );
-            op.writeEdge( sourceNodeEdgesCf, sourceRowKey, sourceEdge );
-            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type );
-        }
-
-
-        final ShardEntries sourceWithTypeRowKeyShard =
-                edgeShardStrategy.getWriteShards( scope, sourceNodeId, NodeType.SOURCE, timestamp, type, targetNodeType );
-
-        for ( Shard shard : sourceWithTypeRowKeyShard.getEntries() ) {
-
-            final long shardId = shard.getShardIndex();
-            final RowKeyType sourceRowKeyType = new RowKeyType( sourceNodeId, type, targetNodeId, shardId );
-
-            op.writeEdge( sourceNodeTargetTypeCf, sourceRowKeyType, sourceEdge );
-            op.countEdge( sourceNodeId, NodeType.SOURCE, shardId, type, targetNodeType );
-        }
-
-
-        /**
-         * write edges from target<-source
-         */
-
-        final DirectedEdge targetEdge = new DirectedEdge( sourceNodeId, timestamp );
-
-
-        final ShardEntries targetRowKeyShard = edgeShardStrategy.getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type );
-
-        for ( Shard shard : targetRowKeyShard.getEntries() ) {
-            final long shardId = shard.getShardIndex();
-            final RowKey targetRowKey = new RowKey( targetNodeId, type, shardId );
-
-            op.writeEdge( targetNodeEdgesCf, targetRowKey, targetEdge );
-            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type );
-        }
-
-
-        final ShardEntries targetWithTypeRowKeyShard =
-                edgeShardStrategy.getWriteShards( scope, targetNodeId, NodeType.TARGET, timestamp, type, souceNodeType );
-
-
-        for ( Shard shard : targetWithTypeRowKeyShard.getEntries() ) {
-
-            final long shardId = shard.getShardIndex();
-
-            final RowKeyType targetRowKeyType = new RowKeyType( targetNodeId, type, sourceNodeId, shardId );
-
-
-            op.writeEdge( targetNodeSourceTypeCf, targetRowKeyType, targetEdge );
-            op.countEdge( targetNodeId, NodeType.TARGET, shardId, type, souceNodeType );
-        }
-
-        /**
-         * Always a 0l shard, we're hard limiting 2b timestamps for the same edge
-         */
-        final EdgeRowKey edgeRowKey = new EdgeRowKey( sourceNodeId, type, targetNodeId, 0l);
-
-
-        /**
-         * Write this in the timestamp log for this edge of source->target
-         */
-        op.writeVersion( graphEdgeVersionsCf, edgeRowKey, timestamp );
+        return shardedEdgeSerialization.deleteEdge( edgeColumnFamilies, scope, markedEdge, timestamp );
     }
 
 
@@ -380,55 +130,17 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
         final String type = search.getType();
         final long maxTimestamp = search.getMaxTimestamp();
 
-        final EdgeSearcher<EdgeRowKey, Long, MarkedEdge> searcher =
-                new EdgeSearcher<EdgeRowKey, Long, MarkedEdge>( scope, maxTimestamp, search.last(),
-                        edgeShardStrategy.getReadShards( scope, sourceId, NodeType.SOURCE, maxTimestamp, type ) ) {
-
-                    @Override
-                    protected Serializer<Long> getSerializer() {
-                        return LONG_SERIALIZER;
-                    }
-
-
-                    @Override
-                    public void setRange( final RangeBuilder builder ) {
+        final Iterator<ShardEntries> readShards =
+                edgeShardStrategy.getReadShards( scope, sourceId, NodeType.SOURCE, maxTimestamp, type );
 
-
-                        if ( last.isPresent() ) {
-                            super.setRange( builder );
-                            return;
-                        }
-
-                        //start seeking at a value < our max version
-                        builder.setStart( maxTimestamp );
-                    }
-
-
-                    @Override
-                    protected EdgeRowKey generateRowKey( long shard ) {
-                        return new EdgeRowKey( sourceId, type, targetId, shard );
-                    }
-
-
-                    @Override
-                    protected Long getStartColumn( final Edge last ) {
-                        return last.getTimestamp();
-                    }
-
-
-                    @Override
-                    protected MarkedEdge createEdge( final Long column, final boolean marked ) {
-                        return new SimpleMarkedEdge( sourceId, type, targetId, column.longValue(), marked );
-                    }
-                };
-
-        return new ShardRowIterator<>( searcher, graphEdgeVersionsCf );
+        return shardedEdgeSerialization.getEdgeVersions( edgeColumnFamilies, scope, search, readShards );
     }
 
 
     @Override
     public Iterator<MarkedEdge> getEdgesFromSource( final ApplicationScope scope, final SearchByEdgeType edgeType ) {
 
+
         ValidationUtils.validateApplicationScope( scope );
         EdgeUtils.validateSearchByEdgeType( edgeType );
 
@@ -436,36 +148,11 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
         final String type = edgeType.getType();
         final long maxTimestamp = edgeType.getMaxTimestamp();
 
-        final EdgeSearcher<RowKey, DirectedEdge, MarkedEdge> searcher =
-                new EdgeSearcher<RowKey, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(),
-                        edgeShardStrategy.getReadShards( scope, sourceId,NodeType.SOURCE, maxTimestamp, type ) ) {
-
-
-                    @Override
-                    protected Serializer<DirectedEdge> getSerializer() {
-                        return EDGE_SERIALIZER;
-                    }
-
-
-                    @Override
-                    protected RowKey generateRowKey( long shard ) {
-                        return new RowKey( sourceId, type, shard );
-                    }
-
-
-                    @Override
-                    protected DirectedEdge getStartColumn( final Edge last ) {
-                        return new DirectedEdge( last.getTargetNode(), last.getTimestamp() );
-                    }
-
 
-                    @Override
-                    protected MarkedEdge createEdge( final DirectedEdge edge, final boolean marked ) {
-                        return new SimpleMarkedEdge( sourceId, type, edge.id, edge.timestamp, marked );
-                    }
-                };
+        final Iterator<ShardEntries> readShards =
+                edgeShardStrategy.getReadShards( scope, sourceId, NodeType.SOURCE, maxTimestamp, type );
 
-        return new ShardRowIterator<>( searcher, sourceNodeEdgesCf );
+        return shardedEdgeSerialization.getEdgesFromSource( edgeColumnFamilies, scope, edgeType, readShards );
     }
 
 
@@ -476,40 +163,16 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
         ValidationUtils.validateApplicationScope( scope );
         EdgeUtils.validateSearchByEdgeType( edgeType );
 
-        final Id targetId = edgeType.getNode();
+        final Id sourceId = edgeType.getNode();
         final String type = edgeType.getType();
         final String targetType = edgeType.getIdType();
         final long maxTimestamp = edgeType.getMaxTimestamp();
 
-        final EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge> searcher =
-                new EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(),
-                        edgeShardStrategy.getReadShards( scope, targetId, NodeType.TARGET, maxTimestamp, type, targetType ) ) {
-
-                    @Override
-                    protected Serializer<DirectedEdge> getSerializer() {
-                        return EDGE_SERIALIZER;
-                    }
-
-
-                    @Override
-                    protected RowKeyType generateRowKey( long shard ) {
-                        return new RowKeyType( targetId, type, targetType, shard );
-                    }
 
+        final Iterator<ShardEntries> readShards =   edgeShardStrategy
+                                        .getReadShards( scope, sourceId, NodeType.SOURCE, maxTimestamp, type, targetType );
 
-                    @Override
-                    protected DirectedEdge getStartColumn( final Edge last ) {
-                        return new DirectedEdge( last.getTargetNode(), last.getTimestamp() );
-                    }
-
-
-                    @Override
-                    protected MarkedEdge createEdge( final DirectedEdge edge, final boolean marked ) {
-                        return new SimpleMarkedEdge( targetId, type, edge.id, edge.timestamp, marked );
-                    }
-                };
-
-        return new ShardRowIterator( searcher, sourceNodeTargetTypeCf );
+        return shardedEdgeSerialization.getEdgesFromSourceByTargetType( edgeColumnFamilies, scope, edgeType, readShards );
     }
 
 
@@ -523,36 +186,10 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
         final String type = edgeType.getType();
         final long maxTimestamp = edgeType.getMaxTimestamp();
 
-        final EdgeSearcher<RowKey, DirectedEdge, MarkedEdge> searcher =
-                new EdgeSearcher<RowKey, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(),
-                        edgeShardStrategy.getReadShards( scope, targetId, NodeType.TARGET, maxTimestamp, type ) ) {
-
-                    @Override
-                    protected Serializer<DirectedEdge> getSerializer() {
-                        return EDGE_SERIALIZER;
-                    }
 
+        final Iterator<ShardEntries> readShards =  edgeShardStrategy.getReadShards( scope, targetId, NodeType.TARGET, maxTimestamp, type );
 
-                    @Override
-                    protected RowKey generateRowKey( long shard ) {
-                        return new RowKey( targetId, type, shard );
-                    }
-
-
-                    @Override
-                    protected DirectedEdge getStartColumn( final Edge last ) {
-                        return new DirectedEdge( last.getSourceNode(), last.getTimestamp() );
-                    }
-
-
-                    @Override
-                    protected MarkedEdge createEdge( final DirectedEdge edge, final boolean marked ) {
-                        return new SimpleMarkedEdge( edge.id, type, targetId, edge.timestamp, marked );
-                    }
-                };
-
-
-        return new ShardRowIterator<>( searcher, targetNodeEdgesCf );
+        return shardedEdgeSerialization.getEdgesToTarget( edgeColumnFamilies, scope, edgeType, readShards );
     }
 
 
@@ -569,525 +206,10 @@ public class EdgeSerializationImpl implements EdgeSerialization, Migration {
         final long maxTimestamp = edgeType.getMaxTimestamp();
 
 
-        final EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge> searcher =
-                new EdgeSearcher<RowKeyType, DirectedEdge, MarkedEdge>( scope, maxTimestamp, edgeType.last(),
-                        edgeShardStrategy.getReadShards( scope, targetId, NodeType.TARGET,  maxTimestamp, type, sourceType ) ) {
-                    @Override
-                    protected Serializer<DirectedEdge> getSerializer() {
-                        return EDGE_SERIALIZER;
-                    }
-
-
-                    @Override
-                    protected RowKeyType generateRowKey( final long shard ) {
-                        return new RowKeyType( targetId, type, sourceType, shard );
-                    }
-
-
-                    @Override
-                    protected DirectedEdge getStartColumn( final Edge last ) {
-                        return new DirectedEdge( last.getTargetNode(), last.getTimestamp() );
-                    }
-
-
-                    @Override
-                    protected MarkedEdge createEdge( final DirectedEdge edge, final boolean marked ) {
-                        return new SimpleMarkedEdge( edge.id, type, targetId, edge.timestamp, marked );
-                    }
-                };
-
-        return new ShardRowIterator<>( searcher, targetNodeSourceTypeCf );
-    }
-
-
-    @Override
-    public Collection<MultiTennantColumnFamilyDefinition> getColumnFamilies() {
-        return Arrays
-                .asList( graphCf( sourceNodeEdgesCf ), graphCf( targetNodeEdgesCf ), graphCf( sourceNodeTargetTypeCf ),
-                        graphCf( targetNodeSourceTypeCf ),
-                        new MultiTennantColumnFamilyDefinition( graphEdgeVersionsCf, BytesType.class.getSimpleName(),
-                                ColumnTypes.LONG_TYPE_REVERSED, BytesType.class.getSimpleName(),
-                                MultiTennantColumnFamilyDefinition.CacheOption.KEYS ) );
-    }
-
-
-    /**
-     * Helper to generate an edge definition by the type
-     */
-    private MultiTennantColumnFamilyDefinition graphCf( MultiTennantColumnFamily cf ) {
-        return new MultiTennantColumnFamilyDefinition( cf, BytesType.class.getSimpleName(), EDGE_DYNAMIC_COMPOSITE_TYPE,
-                BytesType.class.getSimpleName(), MultiTennantColumnFamilyDefinition.CacheOption.KEYS );
-    }
-
-
-    /**
-     * Internal class to represent edge data for serialization
-     */
-    private static class DirectedEdge {
-
-        public final long timestamp;
-        public final Id id;
-
-
-        private DirectedEdge( final Id id, final long timestamp ) {
-            this.timestamp = timestamp;
-            this.id = id;
-        }
-    }
-
-
-    /**
-     * Serializes to a source->target edge Note that we cannot set the edge type on de-serialization.  Only the target
-     * Id and version.
-     */
-    private static class EdgeSerializer extends AbstractSerializer<DirectedEdge> {
-
-        private static final IdColDynamicCompositeSerializer ID_COL_SERIALIZER = IdColDynamicCompositeSerializer.get();
-        //        private static final UUIDSerializer UUID_SERIALIZER = UUIDSerializer.get();
-        //        private static final StringSerializer STRING_SERIALIZER = StringSerializer.get().getString(;
-        //        )
-
-
-        @Override
-        public ByteBuffer toByteBuffer( final DirectedEdge edge ) {
-
-            DynamicComposite composite = new DynamicComposite();
-
-            //            //add our edge
-            //            composite.addComponent( edge.timestamp, LONG_SERIALIZER, LONG_TYPE_REVERSED,
-            //                    AbstractComposite.ComponentEquality.EQUAL );
-
-            //            //we do this explicity instead of re-using the id composite serializer b/c we want high order
-            //            //time uuids first, not second. In this column family, there is no sort
-            //            composite.addComponent( edge.id.getUuid(), UUID_SERIALIZER, UUID_TYPE_REVERSED,
-            //                    AbstractComposite.ComponentEquality.EQUAL );
-            //
-            //            composite.addComponent( edge.id.getType(), STRING_SERIALIZER );
-
-            composite.addComponent( edge.timestamp, LONG_SERIALIZER );
-
-            ID_COL_SERIALIZER.toComposite( composite, edge.id );
-
-            return composite.serialize();
-        }
-
-
-        @Override
-        public DirectedEdge fromByteBuffer( final ByteBuffer byteBuffer ) {
-            DynamicComposite composite = DynamicComposite.fromByteBuffer( byteBuffer );
-
-            Preconditions.checkArgument( composite.size() == 3, "Composite should have 3 elements" );
-
-
-            //return the version
-            final long timestamp = composite.get( 0, LONG_SERIALIZER );
-
+        Iterator<ShardEntries> readShards =   edgeShardStrategy
+                                        .getReadShards( scope, targetId, NodeType.TARGET, maxTimestamp, type, sourceType );
 
-            //parse our id
-            final Id id = ID_COL_SERIALIZER.fromComposite( composite, 1 );
-
-
-            return new DirectedEdge( id, timestamp );
-        }
-    }
-
-
-    /**
-     * Class that represents an edge row key
-     */
-    private static class RowKey {
-        public final Id nodeId;
-        public final long[] hash;
-        public final long shardId;
-
-
-        /**
-         * Create a row key with the node and the edgeType
-         */
-        public RowKey( Id nodeId, String edgeType, final long shardId ) {
-            this( nodeId, EdgeHasher.createEdgeHash( edgeType ), shardId );
-        }
-
-
-        /**
-         * Create a new row key with the hash, should only be used in deserialization or internal callers.
-         */
-        protected RowKey( Id nodeId, long[] hash, final long shardId ) {
-            this.nodeId = nodeId;
-            this.hash = hash;
-            this.shardId = shardId;
-        }
+        return shardedEdgeSerialization.getEdgesToTargetBySourceType( edgeColumnFamilies, scope, edgeType, readShards );
     }
 
-
-    /**
-     * The row key with the additional type
-     */
-    private static class RowKeyType extends RowKey {
-
-        /**
-         * Create a row key with the node id in the row key, the edge type, and the type from the typeid
-         *
-         * @param nodeId The node id in the row key
-         * @param edgeType The type of the edge
-         * @param typeId The type of hte id
-         */
-        public RowKeyType( final Id nodeId, final String edgeType, final Id typeId, final long shardId ) {
-            this( nodeId, edgeType, typeId.getType(), shardId );
-        }
-
-
-        /**
-         * Create a row key with the node id in the row key, the edge type, adn the target type from the id
-         */
-        public RowKeyType( final Id nodeId, final String edgeType, final String targetType, final long shardId ) {
-            super( nodeId, EdgeHasher.createEdgeHash( edgeType, targetType ), shardId );
-        }
-
-
-        /**
-         * Internal use in de-serializing.  Should only be used in this case or by internal callers
-         */
-        private RowKeyType( final Id nodeId, final long[] hash, final long shardId ) {
-            super( nodeId, hash, shardId );
-        }
-    }
-
-
-    /**
-     * Used to store row keys by sourceId, targetId and edgeType
-     */
-    private static class EdgeRowKey {
-        public final Id sourceId;
-        public final Id targetId;
-        public final String edgeType;
-        public final long shardId;
-
-
-        private EdgeRowKey( final Id sourceId, final String edgeType, final Id targetId, final long shardId ) {
-            this.sourceId = sourceId;
-            this.targetId = targetId;
-            this.edgeType = edgeType;
-            this.shardId = shardId;
-        }
-    }
-
-
-    /**
-     * Searcher to be used when performing the search.  Performs I/O transformation as well as parsing for the iterator.
-     * If there are more row keys available to seek, the iterator will return true
-     *
-     * @param <R> The row type
-     * @param <C> The column type
-     * @param <T> The parsed return type
-     */
-    private static abstract class EdgeSearcher<R, C, T>
-            implements ColumnParser<C, T>, Iterator<ScopedRowKey<ApplicationScope, R>> {
-
-        protected final Optional<Edge> last;
-        protected final long maxTimestamp;
-        protected final ApplicationScope scope;
-        protected final Iterator<ShardEntries> shards;
-
-
-        protected EdgeSearcher( final ApplicationScope scope, final long maxTimestamp, final Optional<Edge> last,
-                                final Iterator<ShardEntries> shards ) {
-            this.scope = scope;
-            this.maxTimestamp = maxTimestamp;
-            this.last = last;
-            this.shards = shards;
-        }
-
-
-        @Override
-        public boolean hasNext() {
-            return shards.hasNext();
-        }
-
-
-        @Override
-        public ScopedRowKey<ApplicationScope, R> next() {
-            /**
-             * Todo, multi scan
-             */
-            return ScopedRowKey.fromKey( scope, generateRowKey( shards.next().getEntries().iterator().next().getShardIndex() ) );
-        }
-
-
-        @Override
-        public void remove() {
-            throw new UnsupportedOperationException( "Remove is unsupported" );
-        }
-
-
-        /**
-         * Set the range on a search
-         */
-        public void setRange( final RangeBuilder builder ) {
-
-            //set our start range since it was supplied to us
-            if ( last.isPresent() ) {
-                C sourceEdge = getStartColumn( last.get() );
-
-
-                builder.setStart( sourceEdge, getSerializer() );
-            }
-            else {
-
-
-            }
-        }
-
-
-        public boolean hasPage() {
-            return last.isPresent();
-        }
-
-
-        @Override
-        public T parseColumn( final Column<C> column ) {
-            final C edge = column.getName();
-
-            return createEdge( edge, column.getBooleanValue() );
-        }
-
-
-        /**
-         * Get the column's serializer
-         */
-        protected abstract Serializer<C> getSerializer();
-
-
-        /**
-         * Create a row key for this search to use
-         *
-         * @param shard The shard to use in the row key
-         */
-        protected abstract R generateRowKey( final long shard );
-
-
-        /**
-         * Set the start column to begin searching from.  The last is provided
-         */
-        protected abstract C getStartColumn( final Edge last );
-
-
-        /**
-         * Create an edge to return to the user based on the directed edge provided
-         *
-         * @param column The column name
-         * @param marked The marked flag in the column value
-         */
-        protected abstract T createEdge( final C column, final boolean marked );
-    }
-
-
-    /**
-     * Class to perform serialization for row keys from edges
-     */
-    private static class RowSerializer implements CompositeFieldSerializer<RowKey> {
-
-        private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
-
-
-        @Override
-        public void toComposite( final CompositeBuilder builder, final RowKey key ) {
-
-            //add the row id to the composite
-            ID_SER.toComposite( builder, key.nodeId );
-
-            builder.addLong( key.hash[0] );
-            builder.addLong( key.hash[1] );
-            builder.addLong( key.shardId );
-        }
-
-
-        @Override
-        public RowKey fromComposite( final CompositeParser composite ) {
-
-            final Id id = ID_SER.fromComposite( composite );
-            final long[] hash = new long[] { composite.readLong(), composite.readLong() };
-            final long shard = composite.readLong();
-
-
-            return new RowKey( id, hash, shard );
-        }
-    }
-
-
-    /**
-     * Class to perform serialization for row keys from edges
-     */
-    private static class RowTypeSerializer implements CompositeFieldSerializer<RowKeyType> {
-
-        private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
-
-
-        @Override
-        public void toComposite( final CompositeBuilder builder, final RowKeyType keyType ) {
-
-            //add the row id to the composite
-            ID_SER.toComposite( builder, keyType.nodeId );
-
-            builder.addLong( keyType.hash[0] );
-            builder.addLong( keyType.hash[1] );
-            builder.addLong( keyType.shardId );
-        }
-
-
-        @Override
-        public RowKeyType fromComposite( final CompositeParser composite ) {
-
-            final Id id = ID_SER.fromComposite( composite );
-            final long[] hash = new long[] { composite.readLong(), composite.readLong() };
-            final long shard = composite.readLong();
-
-            return new RowKeyType( id, hash, shard );
-        }
-    }
-
-
-    /**
-     * Class to perform serialization for row keys from edges
-     */
-    private static class EdgeRowKeySerializer implements CompositeFieldSerializer<EdgeRowKey> {
-
-        private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
-
-
-        @Override
-        public void toComposite( final CompositeBuilder builder, final EdgeRowKey key ) {
-
-            //add the row id to the composite
-            ID_SER.toComposite( builder, key.sourceId );
-            builder.addString( key.edgeType );
-            ID_SER.toComposite( builder, key.targetId );
-            builder.addLong( key.shardId );
-        }
-
-
-        @Override
-        public EdgeRowKey fromComposite( final CompositeParser composite ) {
-
-            final Id sourceId = ID_SER.fromComposite( composite );
-            final String edgeType = composite.readString();
-            final Id targetId = ID_SER.fromComposite( composite );
-            final long shard = composite.readLong();
-
-            return new EdgeRowKey( sourceId, edgeType, targetId, shard );
-        }
-    }
-
-
-    /**
-     * Simple callback to perform puts and deletes with a common row setup code
-     *
-     * @param <R> The row key type
-     */
-    private static interface RowOp<R> {
-
-        /**
-         * Write the edge with the given data
-         */
-        void writeEdge( final MultiTennantColumnFamily<ApplicationScope, R, DirectedEdge> columnFamily, R rowKey,
-                        DirectedEdge edge );
-
-        /**
-         * Perform the count on the edge
-         */
-        void countEdge( final Id rowId, NodeType type, long shardId,  String... types );
-
-        /**
-         * Write the edge into the version cf
-         */
-        void writeVersion( final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> columnFamily,
-                           EdgeRowKey rowKey, long timestamp );
-    }
-
-
-    /**
-     * Internal iterator to iterate over multiple row keys
-     *
-     * @param <R> The row type
-     * @param <C> The column type
-     * @param <T> The parsed return type
-     */
-    private class ShardRowIterator<R, C, T> implements Iterator<T> {
-
-        private final EdgeSearcher<R, C, T> searcher;
-
-        private final MultiTennantColumnFamily<ApplicationScope, R, C> cf;
-
-        private Iterator<T> currentColumnIterator;
-
-
-        private ShardRowIterator( final EdgeSearcher<R, C, T> searcher,
-                                  final MultiTennantColumnFamily<ApplicationScope, R, C> cf ) {
-            this.searcher = searcher;
-            this.cf = cf;
-        }
-
-
-        @Override
-        public boolean hasNext() {
-            //we have more columns to return
-            if ( currentColumnIterator != null && currentColumnIterator.hasNext() ) {
-                return true;
-            }
-
-            /**
-             * We have another row key, advance to it and re-check
-             */
-            if ( searcher.hasNext() ) {
-                advanceRow();
-                return hasNext();
-            }
-
-            //we have no more columns, and no more row keys, we're done
-            return false;
-        }
-
-
-        @Override
-        public T next() {
-            if ( !hasNext() ) {
-                throw new NoSuchElementException( "There are no more rows or columns left to advance" );
-            }
-
-            return currentColumnIterator.next();
-        }
-
-
-        @Override
-        public void remove() {
-            throw new UnsupportedOperationException( "Remove is unsupported" );
-        }
-
-
-        /**
-         * Advance our iterator to the next row (assumes the check for row keys is elsewhere)
-         */
-        private void advanceRow() {
-
-            /**
-             * If the edge is present, we need to being seeking from this
-             */
-
-            final RangeBuilder rangeBuilder = new RangeBuilder().setLimit( graphFig.getScanPageSize() );
-
-
-            //set the range into the search
-            searcher.setRange( rangeBuilder );
-
-            final ScopedRowKey<ApplicationScope, R> rowKey = searcher.next();
-
-
-            RowQuery<ScopedRowKey<ApplicationScope, R>, C> query =
-                    keyspace.prepareQuery( cf ).setConsistencyLevel( cassandraConfig.getReadCL() ).getKey( rowKey )
-                            .autoPaginate( true ).withColumnRange( rangeBuilder.build() );
-
-
-            currentColumnIterator = new ColumnNameIterator<C, T>( query, searcher, searcher.hasPage() );
-        }
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/DirectedEdge.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/DirectedEdge.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/DirectedEdge.java
new file mode 100644
index 0000000..720c948
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/DirectedEdge.java
@@ -0,0 +1,41 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+import org.apache.usergrid.persistence.model.entity.Id;
+
+
+/**
+ * Internal class to represent edge data for serialization
+ */
+public class DirectedEdge {
+
+    public final long timestamp;
+    public final Id id;
+
+
+    public DirectedEdge( final Id id, final long timestamp ) {
+        this.timestamp = timestamp;
+        this.id = id;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeColumnFamilies.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeColumnFamilies.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeColumnFamilies.java
new file mode 100644
index 0000000..6f6c72d
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeColumnFamilies.java
@@ -0,0 +1,60 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
+import org.apache.usergrid.persistence.core.migration.Migration;
+import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+
+
+/**
+ * Implementation for using multiple column families
+ */
+public interface EdgeColumnFamilies extends Migration{
+
+    /**
+     * Get the name of the column family for getting source nodes
+     */
+    public MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> getSourceNodeCfName();
+
+    /**
+     * Get the name of the column family for getting target nodes
+     */
+    public MultiTennantColumnFamily<ApplicationScope, RowKey, DirectedEdge> getTargetNodeCfName();
+
+
+    /**
+     * Get the name of the column family for getting source nodes  with a target type
+     */
+    public MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> getSourceNodeTargetTypeCfName();
+
+    /**
+     * Get the name of the column family for getting target nodes with a source type
+     */
+    public MultiTennantColumnFamily<ApplicationScope, RowKeyType, DirectedEdge> getTargetNodeSourceTypeCfName();
+
+    /**
+     * Get the Graph edge versions cf
+     * @return
+     */
+    public MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> getGraphEdgeVersions();
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeRowKey.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeRowKey.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeRowKey.java
new file mode 100644
index 0000000..d7982d6
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeRowKey.java
@@ -0,0 +1,44 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+import org.apache.usergrid.persistence.model.entity.Id;
+
+
+/**
+ * Used to store row keys by sourceId, targetId and edgeType
+ */
+public class EdgeRowKey {
+    public final Id sourceId;
+    public final Id targetId;
+    public final String edgeType;
+    public final long shardId;
+
+
+    public EdgeRowKey( final Id sourceId, final String edgeType, final Id targetId, final long shardId ) {
+        this.sourceId = sourceId;
+        this.targetId = targetId;
+        this.edgeType = edgeType;
+        this.shardId = shardId;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
index 8f5d168..be4153b 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/EdgeShardStrategy.java
@@ -23,6 +23,7 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard;
 import java.util.Iterator;
 import java.util.UUID;
 
+import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.model.entity.Id;
 
@@ -65,32 +66,6 @@ public interface EdgeShardStrategy {
     public void increment(final ApplicationScope scope,final Id rowKeyId, final NodeType nodeType, long shardId, long count ,final  String... types );
 
 
-    /**
-     * Get the name of the column family for getting source nodes
-     */
-    public String getSourceNodeCfName();
-
-    /**
-     * Get the name of the column family for getting target nodes
-     */
-    public String getTargetNodeCfName();
-
-
-    /**
-     * Get the name of the column family for getting source nodes  with a target type
-     */
-    public String getSourceNodeTargetTypeCfName();
-
-    /**
-     * Get the name of the column family for getting target nodes with a source type
-     */
-    public String getTargetNodeSourceTypeCfName();
-
-    /**
-     * Get the Graph edge versions cf
-     * @return
-     */
-    public String getGraphEdgeVersions();
 
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
index f2f51ef..c700513 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardApproximation.java
@@ -65,4 +65,6 @@ public interface NodeShardApproximation {
      * @return
      */
     public boolean flushPending();
+
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/RowKey.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/RowKey.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/RowKey.java
new file mode 100644
index 0000000..9895978
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/RowKey.java
@@ -0,0 +1,54 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+import org.apache.usergrid.persistence.graph.serialization.util.EdgeHasher;
+import org.apache.usergrid.persistence.model.entity.Id;
+
+
+/**
+ * Class that represents an edge row key
+ */
+public class RowKey {
+    public final Id nodeId;
+    public final long[] hash;
+    public final long shardId;
+
+
+    /**
+     * Create a row key with the node and the edgeType
+     */
+    public RowKey( Id nodeId, String edgeType, final long shardId ) {
+        this( nodeId, EdgeHasher.createEdgeHash( edgeType ), shardId );
+    }
+
+
+    /**
+     * Create a new row key with the hash, should only be used in deserialization or internal callers.
+     */
+    public RowKey( Id nodeId, long[] hash, final long shardId ) {
+        this.nodeId = nodeId;
+        this.hash = hash;
+        this.shardId = shardId;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/RowKeyType.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/RowKeyType.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/RowKeyType.java
new file mode 100644
index 0000000..5705eb3
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/RowKeyType.java
@@ -0,0 +1,60 @@
+/*
+ *
+ *  * Licensed to the Apache Software Foundation (ASF) under one
+ *  * or more contributor license agreements.  See the NOTICE file
+ *  * distributed with this work for additional information
+ *  * regarding copyright ownership.  The ASF licenses this file
+ *  * to you under the Apache License, Version 2.0 (the
+ *  * "License"); you may not use this file except in compliance
+ *  * with the License.  You may obtain a copy of the License at
+ *  *
+ *  *    http://www.apache.org/licenses/LICENSE-2.0
+ *  *
+ *  * Unless required by applicable law or agreed to in writing,
+ *  * software distributed under the License is distributed on an
+ *  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *  * KIND, either express or implied.  See the License for the
+ *  * specific language governing permissions and limitations
+ *  * under the License.
+ *
+ */
+
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+import org.apache.usergrid.persistence.graph.serialization.util.EdgeHasher;
+import org.apache.usergrid.persistence.model.entity.Id;
+
+
+/**
+ * The row key with the additional type
+ */
+public class RowKeyType extends RowKey {
+
+    /**
+     * Create a row key with the node id in the row key, the edge type, and the type from the typeid
+     *
+     * @param nodeId The node id in the row key
+     * @param edgeType The type of the edge
+     * @param typeId The type of hte id
+     */
+    public RowKeyType( final Id nodeId, final String edgeType, final Id typeId, final long shardId ) {
+        this( nodeId, edgeType, typeId.getType(), shardId );
+    }
+
+
+    /**
+     * Create a row key with the node id in the row key, the edge type, adn the target type from the id
+     */
+    public RowKeyType( final Id nodeId, final String edgeType, final String targetType, final long shardId ) {
+        super( nodeId, EdgeHasher.createEdgeHash( edgeType, targetType ), shardId );
+    }
+
+
+    /**
+     * Internal use in de-serializing.  Should only be used in this case or by internal callers
+     */
+    public RowKeyType( final Id nodeId, final long[] hash, final long shardId ) {
+        super( nodeId, hash, shardId );
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
index 6ec5e20..570633c 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntries.java
@@ -23,7 +23,8 @@ import java.util.Set;
 
 
 /**
- * There are cases where we need to read or write to more than 1 shard.
+ * There are cases where we need to read or write to more than 1 shard.  This object encapsulates
+ * a set of shards that should be written to.
  */
 public class ShardEntries {
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardedEdgeSerialization.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardedEdgeSerialization.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardedEdgeSerialization.java
new file mode 100644
index 0000000..814700d
--- /dev/null
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardedEdgeSerialization.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+import java.util.Iterator;
+import java.util.UUID;
+
+import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+import org.apache.usergrid.persistence.graph.MarkedEdge;
+import org.apache.usergrid.persistence.graph.SearchByEdge;
+import org.apache.usergrid.persistence.graph.SearchByEdgeType;
+import org.apache.usergrid.persistence.graph.SearchByIdType;
+
+import com.netflix.astyanax.MutationBatch;
+
+
+/**
+ * Performs serialization on the shards
+ */
+public interface ShardedEdgeSerialization {
+
+    /**
+     * EdgeWrite both the source--->Target edge and the target <----- source edge into the mutation
+     *
+     * @param columnFamilies The column families to use
+     * @param scope The org scope of the graph
+     * @param markedEdge The edge to write
+     */
+    MutationBatch writeEdge( EdgeColumnFamilies columnFamilies, ApplicationScope scope, MarkedEdge markedEdge,
+                             UUID timestamp );
+
+    /**
+     * EdgeWrite both the source -->target edge and the target<--- source edge into the mutation
+     *
+     * * @param columnFamilies The column families to use
+     *
+     * @param scope The org scope of the graph
+     * @param markedEdge The edge to write
+     */
+    MutationBatch deleteEdge( EdgeColumnFamilies columnFamilies, ApplicationScope scope, MarkedEdge markedEdge,
+                              UUID timestamp );
+
+
+    /**
+     * Search for all versions of this edge < the search version.  Returns all versions
+     *
+     * @param columnFamilies The column families to use
+     * @param scope The application scope
+     * @param search The search criteria
+     * @param shards The shards to iterate when searching
+     */
+    Iterator<MarkedEdge> getEdgeVersions( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
+                                          SearchByEdge search, Iterator<ShardEntries> shards );
+
+    /**
+     * Get an iterator of all edges by edge type originating from source node
+     *
+     * @param columnFamilies The column families to use
+     * @param scope The application scope
+     * @param search The search criteria
+     * @param shards The shards to iterate when searching
+     */
+    Iterator<MarkedEdge> getEdgesFromSource( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
+                                             SearchByEdgeType search, Iterator<ShardEntries> shards );
+
+
+    /**
+     * Get an iterator of all edges by edge type originating from source node.  Also filters by target node id type
+     *
+     * @param columnFamilies The column families to use
+     * @param scope The application scope
+     * @param search The search criteria
+     * @param shards The shards to iterate when searching
+     */
+    Iterator<MarkedEdge> getEdgesFromSourceByTargetType( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
+                                                         SearchByIdType search, Iterator<ShardEntries> shards );
+
+    /**
+     * Get an iterator of all edges by edge type pointing to the target node.  Returns all versions
+     *
+     * @param columnFamilies The column families to use
+     * @param scope The application scope
+     * @param search The search criteria
+     * @param shards The shards to iterate when searching
+     */
+    Iterator<MarkedEdge> getEdgesToTarget( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
+                                           SearchByEdgeType search, Iterator<ShardEntries> shards );
+
+
+    /**
+     * Get an iterator of all edges by edge type pointing to the target node.  Also uses the source id type to limit the
+     * results
+     *
+     * @param columnFamilies The column families to use
+     * @param scope The application scope
+     * @param search The search criteria
+     * @param shards The shards to iterate when searching
+     */
+    Iterator<MarkedEdge> getEdgesToTargetBySourceType( EdgeColumnFamilies columnFamilies, ApplicationScope scope,
+                                                       SearchByIdType search, Iterator<ShardEntries> shards );
+}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerialization.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerialization.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerialization.java
index 41eb525..4b05401 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerialization.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerialization.java
@@ -27,7 +27,7 @@ import com.netflix.astyanax.MutationBatch;
 /**
  * Serialization for flushing and reading counters
  */
-public interface NodeShardCounterSerialization  extends Migration {
+public interface NodeShardCounterSerialization extends Migration {
 
 
     /**

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationImpl.java
index da318bf..c063b7c 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardCounterSerializationImpl.java
@@ -29,6 +29,8 @@ import org.apache.cassandra.db.marshal.CounterColumnType;
 
 import org.apache.usergrid.persistence.core.astyanax.CassandraConfig;
 import org.apache.usergrid.persistence.core.astyanax.ColumnTypes;
+import org.apache.usergrid.persistence.core.astyanax.CompositeFieldSerializer;
+import org.apache.usergrid.persistence.core.astyanax.IdRowCompositeSerializer;
 import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamily;
 import org.apache.usergrid.persistence.core.astyanax.MultiTennantColumnFamilyDefinition;
 import org.apache.usergrid.persistence.core.astyanax.OrganizationScopedRowKeySerializer;
@@ -36,8 +38,8 @@ import org.apache.usergrid.persistence.core.astyanax.ScopedRowKey;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.exception.GraphRuntimeException;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.EdgeRowKey;
-import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.EdgeRowKeySerializer;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
+import org.apache.usergrid.persistence.model.entity.Id;
 
 import com.google.common.base.Preconditions;
 import com.google.inject.Inject;
@@ -48,6 +50,8 @@ import com.netflix.astyanax.connectionpool.OperationResult;
 import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
 import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
 import com.netflix.astyanax.model.Column;
+import com.netflix.astyanax.model.CompositeBuilder;
+import com.netflix.astyanax.model.CompositeParser;
 import com.netflix.astyanax.serializers.LongSerializer;
 
 
@@ -56,12 +60,14 @@ import com.netflix.astyanax.serializers.LongSerializer;
 public class NodeShardCounterSerializationImpl implements NodeShardCounterSerialization {
 
 
+    private static final ShardKeySerializer SHARD_KEY_SERIALIZER = new ShardKeySerializer();
+
     /**
      * Edge shards
      */
-    private static final MultiTennantColumnFamily<ApplicationScope, EdgeRowKey, Long> EDGE_SHARD_COUNTS =
+    private static final MultiTennantColumnFamily<ApplicationScope, ShardKey, Long> EDGE_SHARD_COUNTS =
             new MultiTennantColumnFamily<>( "Edge_Shard_Counts",
-                    new OrganizationScopedRowKeySerializer<>( new EdgeRowKeySerializer() ), LongSerializer.get() );
+                    new OrganizationScopedRowKeySerializer<>( SHARD_KEY_SERIALIZER ), LongSerializer.get() );
 
 
     protected final Keyspace keyspace;
@@ -92,12 +98,11 @@ public class NodeShardCounterSerializationImpl implements NodeShardCounterSerial
             final ShardKey key = entry.getKey();
             final long value = entry.getValue().get();
 
-            final EdgeRowKey edgeRowKey = new EdgeRowKey( key.getNodeId(), key.getEdgeTypes() );
 
-            final ScopedRowKey rowKey = ScopedRowKey.fromKey( key.getScope(), edgeRowKey );
+            final ScopedRowKey rowKey = ScopedRowKey.fromKey( key.scope, key );
 
 
-            batch.withRow( EDGE_SHARD_COUNTS, rowKey ).incrementCounterColumn( key.getShardId(), value );
+            batch.withRow( EDGE_SHARD_COUNTS, rowKey ).incrementCounterColumn( key.shardId, value );
         }
 
 
@@ -108,14 +113,12 @@ public class NodeShardCounterSerializationImpl implements NodeShardCounterSerial
     @Override
     public long getCount( final ShardKey key ) {
 
-        final EdgeRowKey edgeRowKey = new EdgeRowKey( key.getNodeId(), key.getEdgeTypes() );
-
-        final ScopedRowKey rowKey = ScopedRowKey.fromKey( key.getScope(), edgeRowKey );
+        final ScopedRowKey rowKey = ScopedRowKey.fromKey( key.scope, key );
 
 
         try {
             OperationResult<Column<Long>> column =
-                    keyspace.prepareQuery( EDGE_SHARD_COUNTS ).getKey( rowKey ).getColumn( key.getShardId() ).execute();
+                    keyspace.prepareQuery( EDGE_SHARD_COUNTS ).getKey( rowKey ).getColumn( key.shardId ).execute();
 
             return column.getResult().getLongValue();
         }
@@ -136,4 +139,68 @@ public class NodeShardCounterSerializationImpl implements NodeShardCounterSerial
                         ColumnTypes.LONG_TYPE_REVERSED, CounterColumnType.class.getSimpleName(),
                         MultiTennantColumnFamilyDefinition.CacheOption.KEYS ) );
     }
+
+
+
+    private static class ShardKeySerializer implements CompositeFieldSerializer<ShardKey> {
+
+        private static final IdRowCompositeSerializer ID_SER = IdRowCompositeSerializer.get();
+
+
+        @Override
+        public void toComposite( final CompositeBuilder builder, final ShardKey key ) {
+
+            ID_SER.toComposite( builder, key.nodeId );
+
+            builder.addInteger( getValue( key.nodeType ) );
+
+            builder.addLong( key.shardId );
+
+            builder.addInteger( key.edgeTypes.length );
+
+            for ( String type : key.edgeTypes ) {
+                builder.addString( type );
+            }
+        }
+
+
+        @Override
+        public ShardKey fromComposite( final CompositeParser composite ) {
+
+            final Id sourceId = ID_SER.fromComposite( composite );
+
+            final NodeType type = getType( composite.readInteger() );
+
+            final long shardId = composite.readLong();
+
+            final int length = composite.readInteger();
+
+            String[] types = new String[length];
+
+            for ( int i = 0; i < length; i++ ) {
+                types[i] = composite.readString();
+            }
+
+            return new ShardKey(null, sourceId, type, shardId, types);
+        }
+
+
+        private int getValue( NodeType type ) {
+            if ( type == NodeType.SOURCE ) {
+                return 0;
+            }
+
+            return 1;
+        }
+
+
+        public NodeType getType( int value ) {
+            if ( value == 0 ) {
+                return NodeType.SOURCE;
+            }
+
+            return NodeType.TARGET;
+        }
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java
index 55a761b..8467042 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/ShardKey.java
@@ -30,11 +30,11 @@ import org.apache.usergrid.persistence.model.entity.Id;
  * Key for shards and counts
  */
 public class ShardKey {
-    private final ApplicationScope scope;
-    private final Id nodeId;
-    private final long shardId;
-    private final NodeType nodeType;
-    private final String[] edgeTypes;
+    public final ApplicationScope scope;
+    public final Id nodeId;
+    public final long shardId;
+    public final NodeType nodeType;
+    public final String[] edgeTypes;
 
 
     public ShardKey( final ApplicationScope scope, final Id nodeId, final NodeType nodeType, final long shardId, final String... edgeTypes ) {
@@ -47,32 +47,6 @@ public class ShardKey {
 
 
 
-
-    public ApplicationScope getScope() {
-        return scope;
-    }
-
-
-    public Id getNodeId() {
-        return nodeId;
-    }
-
-
-    public long getShardId() {
-        return shardId;
-    }
-
-
-    public String[] getEdgeTypes() {
-        return edgeTypes;
-    }
-
-
-    public NodeType getNodeType() {
-        return nodeType;
-    }
-
-
     @Override
     public boolean equals( final Object o ) {
         if ( this == o ) {


[05/11] Finished refactor. Need to continue on shard allocation

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/69a3faa2/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
index 383e32c..e582455 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
@@ -76,396 +76,397 @@ public class NodeShardAllocationTest {
         when( graphFig.getShardSize() ).thenReturn( 20000l );
         when( graphFig.getShardCacheTimeout()).thenReturn( 30000l );
     }
-
-
-
-    @Test
-    public void minTime() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final NodeShardApproximation nodeShardCounterSerialization =
-                mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, nodeShardCounterSerialization, timeService,
-                        graphFig, keyspace );
-
-
-        final long timeservicetime = System.currentTimeMillis();
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-        final long expected = timeservicetime - 2 * graphFig.getShardCacheTimeout();
-
-        final long returned = approximation.getMinTime();
-
-        assertEquals("Correct time was returned", expected, returned);
-    }
-
-
-    @Test
-    public void noShards() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final NodeShardApproximation nodeShardCounterSerialization =
-                mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, nodeShardCounterSerialization, timeService,
-                        graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-        /**
-         * Mock up returning an empty iterator, our audit shouldn't create a new shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
-
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
-
-        assertFalse( "No shard allocated", result );
-    }
-
-
-
-
-
-    @Test
-    public void existingFutureShardSameTime() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final NodeShardApproximation nodeShardCounterSerialization =
-                mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, nodeShardCounterSerialization, timeService,
-                        graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-
-        final long timeservicetime = System.currentTimeMillis();
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-        final Shard futureShard =  new Shard(10000l, timeservicetime) ;
-
-        /**
-         * Mock up returning a min shard, and a future shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList( futureShard ).iterator() );
-
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET,  type, subType );
-
-        assertFalse( "No shard allocated", result );
-    }
-
-
-    @Test
-    public void lowCountFutureShard() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final NodeShardApproximation nodeShardApproximation =
-                mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, nodeShardApproximation, timeService,
-                        graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-
-        final long timeservicetime = System.currentTimeMillis();
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-
-        /**
-         * Mock up returning a min shard, and a future shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
-
-
-        //return a shard size < our max by 1
-
-        final long count = graphFig.getShardSize() - 1;
-
-        when( nodeShardApproximation.getCount(scope, nodeId, NodeType.TARGET, 0l, type, subType )).thenReturn( count );
-
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
-
-        assertFalse( "Shard allocated", result );
-    }
-
-
-    @Test
-    public void equalCountFutureShard() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final NodeShardApproximation nodeShardApproximation =
-                mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock(MutationBatch.class);
-
-        when(keyspace.prepareMutationBatch()).thenReturn( batch );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, nodeShardApproximation, timeService,
-                        graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-
-        final long timeservicetime = System.currentTimeMillis();
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-
-        /**
-         * Mock up returning a min shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.SOURCE), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
-
-
-        final long shardCount = graphFig.getShardSize();
-
-        //return a shard size equal to our max
-        when( nodeShardApproximation
-                .getCount(   scope , nodeId, NodeType.SOURCE, 0l,type , subType  ))
-                .thenReturn( shardCount );
-
-        ArgumentCaptor<Long> shardValue = ArgumentCaptor.forClass( Long.class );
-        ArgumentCaptor<Long> timestampValue = ArgumentCaptor.forClass( Long.class );
-
-
-        //mock up our mutation
-        when( edgeShardSerialization
-                .writeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.SOURCE), shardValue.capture(), timestampValue.capture(), same( type ), same( subType ) ) )
-                .thenReturn( mock( MutationBatch.class ) );
-
-
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE,  type, subType );
-
-        assertTrue( "Shard allocated", result );
-
-        //check our new allocated UUID
-
-
-        final long savedTimestamp = timestampValue.getValue();
-
-
-
-
-
-        assertEquals( "Expected time service time", timeservicetime, savedTimestamp );
-
-        //now check our max value was set
-    }
-
-
-
-
-    @Test
-    public void futureCountShardCleanup() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final NodeShardApproximation nodeShardApproximation =
-                mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock(MutationBatch.class);
-
-        when(keyspace.prepareMutationBatch()).thenReturn( batch );
-
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, nodeShardApproximation, timeService,
-                        graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-
-        /**
-         * Use the time service to generate UUIDS
-         */
-        final long timeservicetime = System.currentTimeMillis();
-
-
-        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-
-        assertTrue("Shard cache mocked", graphFig.getShardCacheTimeout() > 0);
-
-
-        /**
-         * Simulates clock drift when 2 nodes create future shards near one another
-         */
-        final long futureTime = timeService.getCurrentTime()  + 2 * graphFig.getShardCacheTimeout();
-
-
-        final Shard minShard = new Shard(0l, 0l);
-
-        /**
-         * Simulate slow node
-         */
-
-        //our second shard is the "oldest", and hence should be returned in the iterator.  Future shard 1 and 3 should be removed
-        final Shard futureShard1 = new Shard(futureTime - 1, timeservicetime+1000);
-
-        final Shard futureShard2 = new Shard(futureTime + 10000, timeservicetime);
-
-        final Shard futureShard3 = new Shard(futureShard2.getShardIndex() + 10000, timeservicetime+2000);
-
-        /**
-         * Mock up returning a min shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ), same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList(futureShard3, futureShard2, futureShard1, minShard).iterator() );
-
-
-
-        ArgumentCaptor<Long> newLongValue = ArgumentCaptor.forClass( Long.class );
-
-
-
-
-        //mock up our mutation
-        when( edgeShardSerialization
-                .removeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.TARGET), newLongValue.capture(), same( type ), same( subType ) ) )
-                .thenReturn( mock( MutationBatch.class ) );
-
-
-        final Iterator<Shard>
-                result = approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
-
-
-        assertTrue( "Shards present", result.hasNext() );
-
-        assertEquals("Only single next shard returned", futureShard2,  result.next());
-
-        assertTrue("Shards present", result.hasNext());
-
-        assertEquals("Previous shard present", 0l, result.next().getShardIndex());
-
-        assertFalse("No shards left", result.hasNext());
-
-        /**
-         * Now we need to verify that both our mutations have been added
-         */
-
-        List<Long> values = newLongValue.getAllValues();
-
-        assertEquals("2 values removed", 2,  values.size());
-
-        assertEquals("Deleted Max Future", futureShard1.getShardIndex(), values.get( 0 ).longValue());
-        assertEquals("Deleted Next Future", futureShard3.getShardIndex(), values.get( 1 ).longValue());
-
-    }
-
-
-
-
-    @Test
-    public void noShardsReturns() {
-        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-
-        final NodeShardApproximation nodeShardApproximation =
-                mock( NodeShardApproximation.class );
-
-
-        final TimeService timeService = mock( TimeService.class );
-
-        final Keyspace keyspace = mock( Keyspace.class );
-
-        final MutationBatch batch = mock( MutationBatch.class );
-
-        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-
-        NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, nodeShardApproximation, timeService,
-                        graphFig, keyspace );
-
-        final Id nodeId = createId( "test" );
-        final String type = "type";
-        final String subType = "subType";
-
-        /**
-         * Mock up returning an empty iterator, our audit shouldn't create a new shard
-         */
-        when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
-
-        final Iterator<Shard> result = approximation.getShards( scope, nodeId, NodeType.TARGET,  Optional.<Shard>absent(), type,
-                subType );
-
-        assertEquals("0 shard allocated", 0l, result.next().getShardIndex());
-
-        assertFalse( "No shard allocated", result.hasNext() );
-    }
+//
+//
+//
+//    @Test
+//    public void minTime() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final NodeShardApproximation nodeShardCounterSerialization =
+//                mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardCounterSerialization, timeService,
+//                        graphFig, keyspace );
+//
+//
+//        final long timeservicetime = System.currentTimeMillis();
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//        final long expected = timeservicetime - 2 * graphFig.getShardCacheTimeout();
+//
+//        final long returned = approximation.getMinTime();
+//
+//        assertEquals("Correct time was returned", expected, returned);
+//    }
+//
+//
+//    @Test
+//    public void noShards() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final NodeShardApproximation nodeShardCounterSerialization =
+//                mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardCounterSerialization, timeService,
+//                        graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//        /**
+//         * Mock up returning an empty iterator, our audit shouldn't create a new shard
+//         */
+//        when( edgeShardSerialization
+//                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),  same( type ),
+//                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
+//
+//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
+//
+//        assertFalse( "No shard allocated", result );
+//    }
+//
+//
+//
+//
+//
+//    @Test
+//    public void existingFutureShardSameTime() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final NodeShardApproximation nodeShardCounterSerialization =
+//                mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardCounterSerialization, timeService,
+//                        graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//
+//        final long timeservicetime = System.currentTimeMillis();
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//        final Shard futureShard =  new Shard(10000l, timeservicetime) ;
+//
+//        /**
+//         * Mock up returning a min shard, and a future shard
+//         */
+//        when( edgeShardSerialization
+//                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET), any( Optional.class ),  same( type ),
+//                        same( subType ) ) ).thenReturn( Arrays.asList( futureShard ).iterator() );
+//
+//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET,  type, subType );
+//
+//        assertFalse( "No shard allocated", result );
+//    }
+//
+//
+//    @Test
+//    public void lowCountFutureShard() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final NodeShardApproximation nodeShardApproximation =
+//                mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardApproximation, timeService,
+//                        graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//
+//        final long timeservicetime = System.currentTimeMillis();
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//
+//        /**
+//         * Mock up returning a min shard, and a future shard
+//         */
+//        when( edgeShardSerialization
+//                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
+//                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
+//
+//
+//        //return a shard size < our max by 1
+//
+//        final long count = graphFig.getShardSize() - 1;
+//
+//        when( nodeShardApproximation.getCount(scope, nodeId, NodeType.TARGET, 0l, type, subType )).thenReturn( count );
+//
+//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
+//
+//        assertFalse( "Shard allocated", result );
+//    }
+//
+//
+//    @Test
+//    public void equalCountFutureShard() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final NodeShardApproximation nodeShardApproximation =
+//                mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock(MutationBatch.class);
+//
+//        when(keyspace.prepareMutationBatch()).thenReturn( batch );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardApproximation, timeService,
+//                        graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//
+//        final long timeservicetime = System.currentTimeMillis();
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//
+//        /**
+//         * Mock up returning a min shard
+//         */
+//        when( edgeShardSerialization
+//                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.SOURCE), any( Optional.class ),  same( type ),
+//                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l ) ).iterator() );
+//
+//
+//        final long shardCount = graphFig.getShardSize();
+//
+//        //return a shard size equal to our max
+//        when( nodeShardApproximation
+//                .getCount(   scope , nodeId, NodeType.SOURCE, 0l,type , subType  ))
+//                .thenReturn( shardCount );
+//
+//        ArgumentCaptor<Long> shardValue = ArgumentCaptor.forClass( Long.class );
+//        ArgumentCaptor<Long> timestampValue = ArgumentCaptor.forClass( Long.class );
+//
+//
+//        //mock up our mutation
+//        when( edgeShardSerialization
+//                .writeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.SOURCE), shardValue.capture(), timestampValue.capture(), same( type ), same( subType ) ) )
+//                .thenReturn( mock( MutationBatch.class ) );
+//
+//
+//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE,  type, subType );
+//
+//        assertTrue( "Shard allocated", result );
+//
+//        //check our new allocated UUID
+//
+//
+//        final long savedTimestamp = timestampValue.getValue();
+//
+//
+//
+//
+//
+//        assertEquals( "Expected time service time", timeservicetime, savedTimestamp );
+//
+//        //now check our max value was set
+//    }
+//
+//
+//
+//
+//    @Test
+//    public void futureCountShardCleanup() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final NodeShardApproximation nodeShardApproximation =
+//                mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock(MutationBatch.class);
+//
+//        when(keyspace.prepareMutationBatch()).thenReturn( batch );
+//
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardApproximation, timeService,
+//                        graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//
+//        /**
+//         * Use the time service to generate UUIDS
+//         */
+//        final long timeservicetime = System.currentTimeMillis();
+//
+//
+//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+//
+//        assertTrue("Shard cache mocked", graphFig.getShardCacheTimeout() > 0);
+//
+//
+//        /**
+//         * Simulates clock drift when 2 nodes create future shards near one another
+//         */
+//        final long futureTime = timeService.getCurrentTime()  + 2 * graphFig.getShardCacheTimeout();
+//
+//
+//        final Shard minShard = new Shard(0l, 0l);
+//
+//        /**
+//         * Simulate slow node
+//         */
+//
+//        //our second shard is the "oldest", and hence should be returned in the iterator.  Future shard 1 and 3 should be removed
+//        final Shard futureShard1 = new Shard(futureTime - 1, timeservicetime+1000);
+//
+//        final Shard futureShard2 = new Shard(futureTime + 10000, timeservicetime);
+//
+//        final Shard futureShard3 = new Shard(futureShard2.getShardIndex() + 10000, timeservicetime+2000);
+//
+//        /**
+//         * Mock up returning a min shard
+//         */
+//        when( edgeShardSerialization
+//                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ), same( type ),
+//                        same( subType ) ) ).thenReturn(
+//                Arrays.asList( futureShard3, futureShard2, futureShard1, minShard ).iterator() );
+//
+//
+//
+//        ArgumentCaptor<Long> newLongValue = ArgumentCaptor.forClass( Long.class );
+//
+//
+//
+//
+//        //mock up our mutation
+//        when( edgeShardSerialization
+//                .removeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.TARGET), newLongValue.capture(), same( type ), same( subType ) ) )
+//                .thenReturn( mock( MutationBatch.class ) );
+//
+//
+//        final Iterator<Shard>
+//                result = approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
+//
+//
+//        assertTrue( "Shards present", result.hasNext() );
+//
+//        assertEquals("Only single next shard returned", futureShard2,  result.next());
+//
+//        assertTrue("Shards present", result.hasNext());
+//
+//        assertEquals("Previous shard present", 0l, result.next().getShardIndex());
+//
+//        assertFalse("No shards left", result.hasNext());
+//
+//        /**
+//         * Now we need to verify that both our mutations have been added
+//         */
+//
+//        List<Long> values = newLongValue.getAllValues();
+//
+//        assertEquals("2 values removed", 2,  values.size());
+//
+//        assertEquals("Deleted Max Future", futureShard1.getShardIndex(), values.get( 0 ).longValue());
+//        assertEquals("Deleted Next Future", futureShard3.getShardIndex(), values.get( 1 ).longValue());
+//
+//    }
+//
+//
+//
+//
+//    @Test
+//    public void noShardsReturns() {
+//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+//
+//        final NodeShardApproximation nodeShardApproximation =
+//                mock( NodeShardApproximation.class );
+//
+//
+//        final TimeService timeService = mock( TimeService.class );
+//
+//        final Keyspace keyspace = mock( Keyspace.class );
+//
+//        final MutationBatch batch = mock( MutationBatch.class );
+//
+//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+//
+//        NodeShardAllocation approximation =
+//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardApproximation, timeService,
+//                        graphFig, keyspace );
+//
+//        final Id nodeId = createId( "test" );
+//        final String type = "type";
+//        final String subType = "subType";
+//
+//        /**
+//         * Mock up returning an empty iterator, our audit shouldn't create a new shard
+//         */
+//        when( edgeShardSerialization
+//                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
+//                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
+//
+//        final Iterator<Shard> result = approximation.getShards( scope, nodeId, NodeType.TARGET,  Optional.<Shard>absent(), type,
+//                subType );
+//
+//        assertEquals("0 shard allocated", 0l, result.next().getShardIndex());
+//
+//        assertFalse( "No shard allocated", result.hasNext() );
+//    }
 
 }


[08/11] git commit: Tests passing, need to create more tests

Posted by to...@apache.org.
Tests passing, need to create more tests


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/f3b7ead7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/f3b7ead7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/f3b7ead7

Branch: refs/heads/USERGRID-188
Commit: f3b7ead7736385f85b214c87807e995de9275eae
Parents: 69a3faa
Author: Todd Nine <tn...@apigee.com>
Authored: Thu Jul 17 16:54:56 2014 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Thu Jul 17 16:54:56 2014 -0600

----------------------------------------------------------------------
 .../shard/impl/NodeShardAllocationImpl.java     | 102 ++-
 .../impl/shard/NodeShardAllocationTest.java     | 830 ++++++++++---------
 2 files changed, 522 insertions(+), 410 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/f3b7ead7/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
index 20cb921..b808f2f 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/NodeShardAllocationImpl.java
@@ -26,18 +26,28 @@ import java.util.Comparator;
 import java.util.Iterator;
 import java.util.List;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.commons.collections4.iterators.PushbackIterator;
 
 import org.apache.usergrid.persistence.core.consistency.TimeService;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
+import org.apache.usergrid.persistence.graph.Edge;
 import org.apache.usergrid.persistence.graph.GraphFig;
+import org.apache.usergrid.persistence.graph.MarkedEdge;
 import org.apache.usergrid.persistence.graph.exception.GraphRuntimeException;
+import org.apache.usergrid.persistence.graph.impl.SimpleSearchByEdgeType;
+import org.apache.usergrid.persistence.graph.impl.SimpleSearchByIdType;
 import org.apache.usergrid.persistence.graph.serialization.EdgeSerialization;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeColumnFamilies;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.EdgeShardSerialization;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardAllocation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeShardApproximation;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.NodeType;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.Shard;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardEntries;
+import org.apache.usergrid.persistence.graph.serialization.impl.shard.ShardedEdgeSerialization;
 import org.apache.usergrid.persistence.model.entity.Id;
 
 import com.google.common.base.Optional;
@@ -53,11 +63,13 @@ import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
 public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
+    private static final Logger LOG = LoggerFactory.getLogger(NodeShardAllocationImpl.class);
+
     private static final MinShardTimeComparator MIN_SHARD_TIME_COMPARATOR = new MinShardTimeComparator();
 
     private final EdgeShardSerialization edgeShardSerialization;
-    private final EdgeSerialization edgeSerialization;
-    //    private final NodeShardCounterSerialization edgeShardCounterSerialization;
+    private final EdgeColumnFamilies edgeColumnFamilies;
+    private final ShardedEdgeSerialization shardedEdgeSerialization;
     private final NodeShardApproximation nodeShardApproximation;
     private final TimeService timeService;
     private final GraphFig graphFig;
@@ -66,11 +78,13 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
     @Inject
     public NodeShardAllocationImpl( final EdgeShardSerialization edgeShardSerialization,
-                                    final EdgeSerialization edgeSerialization,
+                                    final EdgeColumnFamilies edgeColumnFamilies,
+                                    final ShardedEdgeSerialization shardedEdgeSerialization,
                                     final NodeShardApproximation nodeShardApproximation, final TimeService timeService,
                                     final GraphFig graphFig, final Keyspace keyspace ) {
         this.edgeShardSerialization = edgeShardSerialization;
-        this.edgeSerialization = edgeSerialization;
+        this.edgeColumnFamilies = edgeColumnFamilies;
+        this.shardedEdgeSerialization = shardedEdgeSerialization;
         this.nodeShardApproximation = nodeShardApproximation;
         this.timeService = timeService;
         this.graphFig = graphFig;
@@ -159,7 +173,8 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
     public boolean auditMaxShard( final ApplicationScope scope, final Id nodeId, final NodeType nodeType,
                                   final String... edgeType ) {
 
-        final Iterator<Shard> maxShards = getShards( scope, nodeId, nodeType, Optional.<Shard>absent(), edgeType );
+        final Iterator<Shard> maxShards =  edgeShardSerialization.getEdgeMetaData( scope, nodeId, nodeType, Optional.<Shard>absent(), edgeType );
+
 
 
         //if the first shard has already been allocated, do nothing.
@@ -171,11 +186,20 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
         final Shard maxShard = maxShards.next();
 
+
+
         /**
-         * Check out if we have a count for our shard allocation
+         * Nothing to do, it's been created very recently, we don't create a new one
          */
+        if(maxShard.getCreatedTime() >= getMinTime()){
+            return false;
+        }
 
 
+        /**
+        * Check out if we have a count for our shard allocation
+        */
+
         final long count =
                 nodeShardApproximation.getCount( scope, nodeId, nodeType, maxShard.getShardIndex(), edgeType );
 
@@ -185,23 +209,63 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
 
 
         /**
-         * TODO, use the EdgeShardStrategy and ShardEdgeSerialization to audit this shard
+         * Allocate the shard
+         */
+
+        Iterator<MarkedEdge> edges;
+
+        /**
+         * This is fugly, I think our allocation interface needs to get more declarative
          */
+        if(nodeType == NodeType.SOURCE){
+
+            if(edgeType.length == 1){
+                edges = shardedEdgeSerialization.getEdgesFromSource(edgeColumnFamilies, scope, new SimpleSearchByEdgeType( nodeId,  edgeType[0], Long.MAX_VALUE, null), Collections.singleton( new ShardEntries(Collections.singleton( maxShard ))).iterator()) ;
+            }
 
-        //get the max edge, in this shard, and write it.
+            else if(edgeType.length == 2){
+                edges = shardedEdgeSerialization.getEdgesFromSourceByTargetType(edgeColumnFamilies, scope, new SimpleSearchByIdType( nodeId,  edgeType[0], Long.MAX_VALUE,  edgeType[1], null), Collections.singleton( new ShardEntries(Collections.singleton( maxShard ))).iterator());
+            }
 
+           else{
+                throw new UnsupportedOperationException( "More than 2 edge types aren't supported" );
+            }
 
-        //try to get a lock here, and fail if one isn't present
+        }    else{
+
+            if(edgeType.length == 1){
+                edges = shardedEdgeSerialization.getEdgesToTarget(edgeColumnFamilies, scope, new SimpleSearchByEdgeType( nodeId,  edgeType[0], Long.MAX_VALUE, null), Collections.singleton( new ShardEntries(Collections.singleton( maxShard ))).iterator()) ;
+            }
+
+            else if(edgeType.length == 2){
+                edges = shardedEdgeSerialization.getEdgesToTargetBySourceType(edgeColumnFamilies, scope, new SimpleSearchByIdType( nodeId,  edgeType[0], Long.MAX_VALUE,  edgeType[1], null), Collections.singleton( new ShardEntries(Collections.singleton( maxShard ))).iterator());
+            }
+
+           else{
+                throw new UnsupportedOperationException( "More than 2 edge types aren't supported" );
+            }
+        }
+
+
+        if(!edges.hasNext()){
+            LOG.warn( "Tried to allocate a new shard for node id {} with edge types {}, but no max value could be found in that row", nodeId, edgeType );
+            return false;
+        }
+
+        //we have a next, allocate it based on the max
+
+        MarkedEdge marked = edges.next();
+
+        final long createTimestamp = timeService.getCurrentTime();
+
+
+        try {
+            this.edgeShardSerialization.writeEdgeMeta( scope, nodeId, nodeType,  marked.getTimestamp(), createTimestamp, edgeType ).execute();
+        }
+        catch ( ConnectionException e ) {
+            throw new GraphRuntimeException( "Unable to write the new edge metadata" );
+        }
 
-        //        final long newShardTime = timeService.getCurrentTime() + graphFig.getShardCacheTimeout() * 2;
-        //
-        //
-        //        try {
-        //            this.edgeShardSerialization.writeEdgeMeta( scope, nodeId, newShardTime, edgeType ).execute();
-        //        }
-        //        catch ( ConnectionException e ) {
-        //            throw new GraphRuntimeException( "Unable to write the new edge metadata" );
-        //        }
 
 
         return true;
@@ -230,4 +294,6 @@ public class NodeShardAllocationImpl implements NodeShardAllocation {
             return result;
         }
     }
+
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/f3b7ead7/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
index e582455..42b3dc2 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
@@ -31,6 +31,9 @@ import org.mockito.ArgumentCaptor;
 import org.apache.usergrid.persistence.core.consistency.TimeService;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.GraphFig;
+import org.apache.usergrid.persistence.graph.MarkedEdge;
+import org.apache.usergrid.persistence.graph.SearchByIdType;
+import org.apache.usergrid.persistence.graph.impl.SimpleMarkedEdge;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.NodeShardAllocationImpl;
 import org.apache.usergrid.persistence.model.entity.Id;
 import org.apache.usergrid.persistence.model.util.UUIDGenerator;
@@ -76,397 +79,440 @@ public class NodeShardAllocationTest {
         when( graphFig.getShardSize() ).thenReturn( 20000l );
         when( graphFig.getShardCacheTimeout()).thenReturn( 30000l );
     }
-//
-//
-//
-//    @Test
-//    public void minTime() {
-//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-//
-//        final NodeShardApproximation nodeShardCounterSerialization =
-//                mock( NodeShardApproximation.class );
-//
-//
-//        final TimeService timeService = mock( TimeService.class );
-//
-//        final Keyspace keyspace = mock( Keyspace.class );
-//
-//
-//        NodeShardAllocation approximation =
-//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardCounterSerialization, timeService,
-//                        graphFig, keyspace );
-//
-//
-//        final long timeservicetime = System.currentTimeMillis();
-//
-//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-//
-//        final long expected = timeservicetime - 2 * graphFig.getShardCacheTimeout();
-//
-//        final long returned = approximation.getMinTime();
-//
-//        assertEquals("Correct time was returned", expected, returned);
-//    }
-//
-//
-//    @Test
-//    public void noShards() {
-//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-//
-//        final NodeShardApproximation nodeShardCounterSerialization =
-//                mock( NodeShardApproximation.class );
-//
-//
-//        final TimeService timeService = mock( TimeService.class );
-//
-//        final Keyspace keyspace = mock( Keyspace.class );
-//
-//        final MutationBatch batch = mock( MutationBatch.class );
-//
-//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-//
-//        NodeShardAllocation approximation =
-//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardCounterSerialization, timeService,
-//                        graphFig, keyspace );
-//
-//        final Id nodeId = createId( "test" );
-//        final String type = "type";
-//        final String subType = "subType";
-//
-//        /**
-//         * Mock up returning an empty iterator, our audit shouldn't create a new shard
-//         */
-//        when( edgeShardSerialization
-//                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),  same( type ),
-//                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
-//
-//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
-//
-//        assertFalse( "No shard allocated", result );
-//    }
-//
-//
-//
-//
-//
-//    @Test
-//    public void existingFutureShardSameTime() {
-//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-//
-//        final NodeShardApproximation nodeShardCounterSerialization =
-//                mock( NodeShardApproximation.class );
-//
-//
-//        final TimeService timeService = mock( TimeService.class );
-//
-//        final Keyspace keyspace = mock( Keyspace.class );
-//
-//
-//        final MutationBatch batch = mock( MutationBatch.class );
-//
-//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-//
-//
-//        NodeShardAllocation approximation =
-//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardCounterSerialization, timeService,
-//                        graphFig, keyspace );
-//
-//        final Id nodeId = createId( "test" );
-//        final String type = "type";
-//        final String subType = "subType";
-//
-//
-//        final long timeservicetime = System.currentTimeMillis();
-//
-//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-//
-//        final Shard futureShard =  new Shard(10000l, timeservicetime) ;
-//
-//        /**
-//         * Mock up returning a min shard, and a future shard
-//         */
-//        when( edgeShardSerialization
-//                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET), any( Optional.class ),  same( type ),
-//                        same( subType ) ) ).thenReturn( Arrays.asList( futureShard ).iterator() );
-//
-//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET,  type, subType );
-//
-//        assertFalse( "No shard allocated", result );
-//    }
-//
-//
-//    @Test
-//    public void lowCountFutureShard() {
-//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-//
-//        final NodeShardApproximation nodeShardApproximation =
-//                mock( NodeShardApproximation.class );
-//
-//
-//        final TimeService timeService = mock( TimeService.class );
-//
-//        final Keyspace keyspace = mock( Keyspace.class );
-//
-//        final MutationBatch batch = mock( MutationBatch.class );
-//
-//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-//
-//
-//        NodeShardAllocation approximation =
-//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardApproximation, timeService,
-//                        graphFig, keyspace );
-//
-//        final Id nodeId = createId( "test" );
-//        final String type = "type";
-//        final String subType = "subType";
-//
-//
-//        final long timeservicetime = System.currentTimeMillis();
-//
-//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-//
-//
-//        /**
-//         * Mock up returning a min shard, and a future shard
-//         */
-//        when( edgeShardSerialization
-//                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
-//                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
-//
-//
-//        //return a shard size < our max by 1
-//
-//        final long count = graphFig.getShardSize() - 1;
-//
-//        when( nodeShardApproximation.getCount(scope, nodeId, NodeType.TARGET, 0l, type, subType )).thenReturn( count );
-//
-//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
-//
-//        assertFalse( "Shard allocated", result );
-//    }
-//
-//
-//    @Test
-//    public void equalCountFutureShard() {
-//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-//
-//        final NodeShardApproximation nodeShardApproximation =
-//                mock( NodeShardApproximation.class );
-//
-//
-//        final TimeService timeService = mock( TimeService.class );
-//
-//        final Keyspace keyspace = mock( Keyspace.class );
-//
-//        final MutationBatch batch = mock(MutationBatch.class);
-//
-//        when(keyspace.prepareMutationBatch()).thenReturn( batch );
-//
-//
-//        NodeShardAllocation approximation =
-//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardApproximation, timeService,
-//                        graphFig, keyspace );
-//
-//        final Id nodeId = createId( "test" );
-//        final String type = "type";
-//        final String subType = "subType";
-//
-//
-//        final long timeservicetime = System.currentTimeMillis();
-//
-//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-//
-//
-//        /**
-//         * Mock up returning a min shard
-//         */
-//        when( edgeShardSerialization
-//                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.SOURCE), any( Optional.class ),  same( type ),
-//                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l ) ).iterator() );
-//
-//
-//        final long shardCount = graphFig.getShardSize();
-//
-//        //return a shard size equal to our max
-//        when( nodeShardApproximation
-//                .getCount(   scope , nodeId, NodeType.SOURCE, 0l,type , subType  ))
-//                .thenReturn( shardCount );
-//
-//        ArgumentCaptor<Long> shardValue = ArgumentCaptor.forClass( Long.class );
-//        ArgumentCaptor<Long> timestampValue = ArgumentCaptor.forClass( Long.class );
-//
-//
-//        //mock up our mutation
-//        when( edgeShardSerialization
-//                .writeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.SOURCE), shardValue.capture(), timestampValue.capture(), same( type ), same( subType ) ) )
-//                .thenReturn( mock( MutationBatch.class ) );
-//
-//
-//        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE,  type, subType );
-//
-//        assertTrue( "Shard allocated", result );
-//
-//        //check our new allocated UUID
-//
-//
-//        final long savedTimestamp = timestampValue.getValue();
-//
-//
-//
-//
-//
-//        assertEquals( "Expected time service time", timeservicetime, savedTimestamp );
-//
-//        //now check our max value was set
-//    }
-//
-//
-//
-//
-//    @Test
-//    public void futureCountShardCleanup() {
-//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-//
-//        final NodeShardApproximation nodeShardApproximation =
-//                mock( NodeShardApproximation.class );
-//
-//
-//        final TimeService timeService = mock( TimeService.class );
-//
-//        final Keyspace keyspace = mock( Keyspace.class );
-//
-//        final MutationBatch batch = mock(MutationBatch.class);
-//
-//        when(keyspace.prepareMutationBatch()).thenReturn( batch );
-//
-//
-//        NodeShardAllocation approximation =
-//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardApproximation, timeService,
-//                        graphFig, keyspace );
-//
-//        final Id nodeId = createId( "test" );
-//        final String type = "type";
-//        final String subType = "subType";
-//
-//
-//        /**
-//         * Use the time service to generate UUIDS
-//         */
-//        final long timeservicetime = System.currentTimeMillis();
-//
-//
-//        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
-//
-//        assertTrue("Shard cache mocked", graphFig.getShardCacheTimeout() > 0);
-//
-//
-//        /**
-//         * Simulates clock drift when 2 nodes create future shards near one another
-//         */
-//        final long futureTime = timeService.getCurrentTime()  + 2 * graphFig.getShardCacheTimeout();
-//
-//
-//        final Shard minShard = new Shard(0l, 0l);
-//
-//        /**
-//         * Simulate slow node
-//         */
-//
-//        //our second shard is the "oldest", and hence should be returned in the iterator.  Future shard 1 and 3 should be removed
-//        final Shard futureShard1 = new Shard(futureTime - 1, timeservicetime+1000);
-//
-//        final Shard futureShard2 = new Shard(futureTime + 10000, timeservicetime);
-//
-//        final Shard futureShard3 = new Shard(futureShard2.getShardIndex() + 10000, timeservicetime+2000);
-//
-//        /**
-//         * Mock up returning a min shard
-//         */
-//        when( edgeShardSerialization
-//                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ), same( type ),
-//                        same( subType ) ) ).thenReturn(
-//                Arrays.asList( futureShard3, futureShard2, futureShard1, minShard ).iterator() );
-//
-//
-//
-//        ArgumentCaptor<Long> newLongValue = ArgumentCaptor.forClass( Long.class );
-//
-//
-//
-//
-//        //mock up our mutation
-//        when( edgeShardSerialization
-//                .removeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.TARGET), newLongValue.capture(), same( type ), same( subType ) ) )
-//                .thenReturn( mock( MutationBatch.class ) );
-//
-//
-//        final Iterator<Shard>
-//                result = approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
-//
-//
-//        assertTrue( "Shards present", result.hasNext() );
-//
-//        assertEquals("Only single next shard returned", futureShard2,  result.next());
-//
-//        assertTrue("Shards present", result.hasNext());
-//
-//        assertEquals("Previous shard present", 0l, result.next().getShardIndex());
-//
-//        assertFalse("No shards left", result.hasNext());
-//
-//        /**
-//         * Now we need to verify that both our mutations have been added
-//         */
-//
-//        List<Long> values = newLongValue.getAllValues();
-//
-//        assertEquals("2 values removed", 2,  values.size());
-//
-//        assertEquals("Deleted Max Future", futureShard1.getShardIndex(), values.get( 0 ).longValue());
-//        assertEquals("Deleted Next Future", futureShard3.getShardIndex(), values.get( 1 ).longValue());
-//
-//    }
-//
-//
-//
-//
-//    @Test
-//    public void noShardsReturns() {
-//        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
-//
-//        final NodeShardApproximation nodeShardApproximation =
-//                mock( NodeShardApproximation.class );
-//
-//
-//        final TimeService timeService = mock( TimeService.class );
-//
-//        final Keyspace keyspace = mock( Keyspace.class );
-//
-//        final MutationBatch batch = mock( MutationBatch.class );
-//
-//        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
-//
-//        NodeShardAllocation approximation =
-//                new NodeShardAllocationImpl( edgeShardSerialization, edgeSerialization, nodeShardApproximation, timeService,
-//                        graphFig, keyspace );
-//
-//        final Id nodeId = createId( "test" );
-//        final String type = "type";
-//        final String subType = "subType";
-//
-//        /**
-//         * Mock up returning an empty iterator, our audit shouldn't create a new shard
-//         */
-//        when( edgeShardSerialization
-//                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
-//                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
-//
-//        final Iterator<Shard> result = approximation.getShards( scope, nodeId, NodeType.TARGET,  Optional.<Shard>absent(), type,
-//                subType );
-//
-//        assertEquals("0 shard allocated", 0l, result.next().getShardIndex());
-//
-//        assertFalse( "No shard allocated", result.hasNext() );
-//    }
+
+
+
+    @Test
+    public void minTime() {
+        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+
+        final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+
+        final NodeShardApproximation nodeShardCounterSerialization =
+                mock( NodeShardApproximation.class );
+
+
+        final TimeService timeService = mock( TimeService.class );
+
+        final Keyspace keyspace = mock( Keyspace.class );
+
+
+        NodeShardAllocation approximation =
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies,  shardedEdgeSerialization, nodeShardCounterSerialization, timeService,
+                        graphFig, keyspace );
+
+
+        final long timeservicetime = System.currentTimeMillis();
+
+        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+
+        final long expected = timeservicetime - 2 * graphFig.getShardCacheTimeout();
+
+        final long returned = approximation.getMinTime();
+
+        assertEquals("Correct time was returned", expected, returned);
+    }
+
+
+    @Test
+    public void noShards() {
+        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+
+        final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+
+
+        final NodeShardApproximation nodeShardCounterSerialization =
+                mock( NodeShardApproximation.class );
+
+
+        final TimeService timeService = mock( TimeService.class );
+
+        final Keyspace keyspace = mock( Keyspace.class );
+
+        final MutationBatch batch = mock( MutationBatch.class );
+
+        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+
+        NodeShardAllocation approximation =
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardCounterSerialization, timeService,
+                        graphFig, keyspace );
+
+        final Id nodeId = createId( "test" );
+        final String type = "type";
+        final String subType = "subType";
+
+        /**
+         * Mock up returning an empty iterator, our audit shouldn't create a new shard
+         */
+        when( edgeShardSerialization
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),  same( type ),
+                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
+
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
+
+        assertFalse( "No shard allocated", result );
+    }
+
+
+
+
+
+    @Test
+    public void existingFutureShardSameTime() {
+        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+
+              final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+
+              final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+
+        final NodeShardApproximation nodeShardCounterSerialization =
+                mock( NodeShardApproximation.class );
+
+
+        final TimeService timeService = mock( TimeService.class );
+
+        final Keyspace keyspace = mock( Keyspace.class );
+
+
+        final MutationBatch batch = mock( MutationBatch.class );
+
+        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+
+
+        NodeShardAllocation approximation =
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardCounterSerialization, timeService,
+                        graphFig, keyspace );
+
+        final Id nodeId = createId( "test" );
+        final String type = "type";
+        final String subType = "subType";
+
+
+        final long timeservicetime = System.currentTimeMillis();
+
+        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+
+        final Shard futureShard =  new Shard(10000l, timeservicetime) ;
+
+        /**
+         * Mock up returning a min shard, and a future shard
+         */
+        when( edgeShardSerialization
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET), any( Optional.class ),  same( type ),
+                        same( subType ) ) ).thenReturn( Arrays.asList( futureShard ).iterator() );
+
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET,  type, subType );
+
+        assertFalse( "No shard allocated", result );
+    }
+
+
+    @Test
+    public void lowCountFutureShard() {
+        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+
+              final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+
+              final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+
+        final NodeShardApproximation nodeShardApproximation =
+                mock( NodeShardApproximation.class );
+
+
+        final TimeService timeService = mock( TimeService.class );
+
+        final Keyspace keyspace = mock( Keyspace.class );
+
+        final MutationBatch batch = mock( MutationBatch.class );
+
+        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+
+
+        NodeShardAllocation approximation =
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardApproximation, timeService,
+                        graphFig, keyspace );
+
+        final Id nodeId = createId( "test" );
+        final String type = "type";
+        final String subType = "subType";
+
+
+        final long timeservicetime = System.currentTimeMillis();
+
+        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+
+
+        /**
+         * Mock up returning a min shard, and a future shard
+         */
+        when( edgeShardSerialization
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
+                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
+
+
+        //return a shard size < our max by 1
+
+        final long count = graphFig.getShardSize() - 1;
+
+        when( nodeShardApproximation.getCount(scope, nodeId, NodeType.TARGET, 0l, type, subType )).thenReturn( count );
+
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
+
+        assertFalse( "Shard allocated", result );
+    }
+
+
+    @Test
+    public void equalCountFutureShard() {
+        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+
+        final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+
+        final NodeShardApproximation nodeShardApproximation =
+                mock( NodeShardApproximation.class );
+
+
+        final TimeService timeService = mock( TimeService.class );
+
+        final Keyspace keyspace = mock( Keyspace.class );
+
+        final MutationBatch batch = mock(MutationBatch.class);
+
+        when(keyspace.prepareMutationBatch()).thenReturn( batch );
+
+
+        NodeShardAllocation approximation =
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardApproximation, timeService,
+                        graphFig, keyspace );
+
+        final Id nodeId = createId( "test" );
+        final String type = "type";
+        final String subType = "subType";
+
+
+        final long timeservicetime = System.currentTimeMillis();
+
+        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+
+
+        /**
+         * Mock up returning a min shard
+         */
+        when( edgeShardSerialization
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.SOURCE), any( Optional.class ),  same( type ),
+                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l ) ).iterator() );
+
+
+        final long shardCount = graphFig.getShardSize();
+
+        //return a shard size equal to our max
+        when( nodeShardApproximation
+                .getCount(   scope , nodeId, NodeType.SOURCE, 0l,type , subType  ))
+                .thenReturn( shardCount );
+
+        ArgumentCaptor<Long> shardValue = ArgumentCaptor.forClass( Long.class );
+        ArgumentCaptor<Long> timestampValue = ArgumentCaptor.forClass( Long.class );
+
+
+
+
+        //mock up our mutation
+        when( edgeShardSerialization
+                .writeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.SOURCE), shardValue.capture(), timestampValue.capture(), same( type ), same( subType ) ) )
+                .thenReturn( mock( MutationBatch.class ) );
+
+
+        final SimpleMarkedEdge returnedEdge = new SimpleMarkedEdge( nodeId, type, createId("subType"), 10005l, false );
+        final Iterator<MarkedEdge> edgeIterator = Collections.singleton( (MarkedEdge)returnedEdge ).iterator();
+
+        //mock up returning the value
+        when( shardedEdgeSerialization.getEdgesFromSourceByTargetType( same( edgeColumnFamilies ), same( scope ),
+                any( SearchByIdType.class ), any(Iterator.class) )).thenReturn( edgeIterator );
+
+
+
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE,  type, subType );
+
+        assertTrue( "Shard allocated", result );
+
+        //check our new allocated UUID
+
+
+        final long savedTimestamp = timestampValue.getValue();
+
+
+        assertEquals( "Expected time service time", timeservicetime, savedTimestamp );
+
+
+
+        //now check our max value was set
+
+        final long savedShardPivot = shardValue.getValue();
+
+        assertEquals("Expected max value to be the same", returnedEdge.getTimestamp(), savedShardPivot);
+    }
+
+
+
+
+    @Test
+    public void futureCountShardCleanup() {
+        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+
+        final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+
+        final NodeShardApproximation nodeShardApproximation =
+                mock( NodeShardApproximation.class );
+
+
+        final TimeService timeService = mock( TimeService.class );
+
+        final Keyspace keyspace = mock( Keyspace.class );
+
+        final MutationBatch batch = mock(MutationBatch.class);
+
+        when(keyspace.prepareMutationBatch()).thenReturn( batch );
+
+
+        NodeShardAllocation approximation =
+                new NodeShardAllocationImpl(edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardApproximation, timeService,
+                        graphFig, keyspace );
+
+        final Id nodeId = createId( "test" );
+        final String type = "type";
+        final String subType = "subType";
+
+
+        /**
+         * Use the time service to generate UUIDS
+         */
+        final long timeservicetime = System.currentTimeMillis();
+
+
+        when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
+
+        assertTrue("Shard cache mocked", graphFig.getShardCacheTimeout() > 0);
+
+
+        /**
+         * Simulates clock drift when 2 nodes create future shards near one another
+         */
+        final long futureTime = timeService.getCurrentTime()  + 2 * graphFig.getShardCacheTimeout();
+
+
+        final Shard minShard = new Shard(0l, 0l);
+
+        /**
+         * Simulate slow node
+         */
+
+        //our second shard is the "oldest", and hence should be returned in the iterator.  Future shard 1 and 3 should be removed
+        final Shard futureShard1 = new Shard(futureTime - 1, timeservicetime+1000);
+
+        final Shard futureShard2 = new Shard(futureTime + 10000, timeservicetime);
+
+        final Shard futureShard3 = new Shard(futureShard2.getShardIndex() + 10000, timeservicetime+2000);
+
+        /**
+         * Mock up returning a min shard
+         */
+        when( edgeShardSerialization
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ), same( type ),
+                        same( subType ) ) ).thenReturn(
+                Arrays.asList( futureShard3, futureShard2, futureShard1, minShard ).iterator() );
+
+
+
+        ArgumentCaptor<Long> newLongValue = ArgumentCaptor.forClass( Long.class );
+
+
+
+
+        //mock up our mutation
+        when( edgeShardSerialization
+                .removeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.TARGET), newLongValue.capture(), same( type ), same( subType ) ) )
+                .thenReturn( mock( MutationBatch.class ) );
+
+
+        final Iterator<Shard>
+                result = approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
+
+
+        assertTrue( "Shards present", result.hasNext() );
+
+        assertEquals("Only single next shard returned", futureShard2,  result.next());
+
+        assertTrue("Shards present", result.hasNext());
+
+        assertEquals("Previous shard present", 0l, result.next().getShardIndex());
+
+        assertFalse("No shards left", result.hasNext());
+
+        /**
+         * Now we need to verify that both our mutations have been added
+         */
+
+        List<Long> values = newLongValue.getAllValues();
+
+        assertEquals("2 values removed", 2,  values.size());
+
+        assertEquals("Deleted Max Future", futureShard1.getShardIndex(), values.get( 0 ).longValue());
+        assertEquals("Deleted Next Future", futureShard3.getShardIndex(), values.get( 1 ).longValue());
+
+    }
+
+
+
+
+    @Test
+    public void noShardsReturns() {
+        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+
+              final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+
+              final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+
+        final NodeShardApproximation nodeShardApproximation =
+                mock( NodeShardApproximation.class );
+
+
+        final TimeService timeService = mock( TimeService.class );
+
+        final Keyspace keyspace = mock( Keyspace.class );
+
+        final MutationBatch batch = mock( MutationBatch.class );
+
+        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+
+        NodeShardAllocation approximation =
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardApproximation, timeService,
+                        graphFig, keyspace );
+
+        final Id nodeId = createId( "test" );
+        final String type = "type";
+        final String subType = "subType";
+
+        /**
+         * Mock up returning an empty iterator, our audit shouldn't create a new shard
+         */
+        when( edgeShardSerialization
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
+                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
+
+        final Iterator<Shard> result = approximation.getShards( scope, nodeId, NodeType.TARGET,  Optional.<Shard>absent(), type,
+                subType );
+
+        assertEquals("0 shard allocated", 0l, result.next().getShardIndex());
+
+        assertFalse( "No shard allocated", result.hasNext() );
+    }
 
 }


[09/11] Checkpoint, still a WIP. Broken stuff.

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
index 42b3dc2..8c60e2c 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
@@ -20,9 +20,9 @@ package org.apache.usergrid.persistence.graph.serialization.impl.shard;
 
 
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Iterator;
-import java.util.List;
 
 import org.junit.Before;
 import org.junit.Test;
@@ -33,6 +33,7 @@ import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.graph.GraphFig;
 import org.apache.usergrid.persistence.graph.MarkedEdge;
 import org.apache.usergrid.persistence.graph.SearchByIdType;
+import org.apache.usergrid.persistence.graph.exception.GraphRuntimeException;
 import org.apache.usergrid.persistence.graph.impl.SimpleMarkedEdge;
 import org.apache.usergrid.persistence.graph.serialization.impl.shard.impl.NodeShardAllocationImpl;
 import org.apache.usergrid.persistence.model.entity.Id;
@@ -46,6 +47,7 @@ import static junit.framework.TestCase.assertTrue;
 import static org.apache.usergrid.persistence.graph.test.util.EdgeTestUtils.createId;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.same;
@@ -77,21 +79,22 @@ public class NodeShardAllocationTest {
 
         when( graphFig.getShardCacheSize() ).thenReturn( 10000l );
         when( graphFig.getShardSize() ).thenReturn( 20000l );
-        when( graphFig.getShardCacheTimeout()).thenReturn( 30000l );
-    }
 
+        final long timeout = 30000;
+        when( graphFig.getShardCacheTimeout() ).thenReturn( timeout );
+        when( graphFig.getShardMinDelta() ).thenReturn( timeout * 2 );
+    }
 
 
     @Test
     public void minTime() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
 
-        final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
 
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
 
-        final NodeShardApproximation nodeShardCounterSerialization =
-                mock( NodeShardApproximation.class );
+        final NodeShardApproximation nodeShardCounterSerialization = mock( NodeShardApproximation.class );
 
 
         final TimeService timeService = mock( TimeService.class );
@@ -100,8 +103,8 @@ public class NodeShardAllocationTest {
 
 
         NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies,  shardedEdgeSerialization, nodeShardCounterSerialization, timeService,
-                        graphFig, keyspace );
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+                        nodeShardCounterSerialization, timeService, graphFig, keyspace );
 
 
         final long timeservicetime = System.currentTimeMillis();
@@ -112,7 +115,7 @@ public class NodeShardAllocationTest {
 
         final long returned = approximation.getMinTime();
 
-        assertEquals("Correct time was returned", expected, returned);
+        assertEquals( "Correct time was returned", expected, returned );
     }
 
 
@@ -120,13 +123,12 @@ public class NodeShardAllocationTest {
     public void noShards() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
 
-        final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
 
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
 
 
-        final NodeShardApproximation nodeShardCounterSerialization =
-                mock( NodeShardApproximation.class );
+        final NodeShardApproximation nodeShardCounterSerialization = mock( NodeShardApproximation.class );
 
 
         final TimeService timeService = mock( TimeService.class );
@@ -138,8 +140,8 @@ public class NodeShardAllocationTest {
         when( keyspace.prepareMutationBatch() ).thenReturn( batch );
 
         NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardCounterSerialization, timeService,
-                        graphFig, keyspace );
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+                        nodeShardCounterSerialization, timeService, graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -149,8 +151,8 @@ public class NodeShardAllocationTest {
          * Mock up returning an empty iterator, our audit shouldn't create a new shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),
+                        same( type ), same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
 
         final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
 
@@ -158,19 +160,15 @@ public class NodeShardAllocationTest {
     }
 
 
-
-
-
     @Test
     public void existingFutureShardSameTime() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
 
-              final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
 
-              final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
 
-        final NodeShardApproximation nodeShardCounterSerialization =
-                mock( NodeShardApproximation.class );
+        final NodeShardApproximation nodeShardCounterSerialization = mock( NodeShardApproximation.class );
 
 
         final TimeService timeService = mock( TimeService.class );
@@ -184,8 +182,8 @@ public class NodeShardAllocationTest {
 
 
         NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardCounterSerialization, timeService,
-                        graphFig, keyspace );
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+                        nodeShardCounterSerialization, timeService, graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -196,16 +194,16 @@ public class NodeShardAllocationTest {
 
         when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
 
-        final Shard futureShard =  new Shard(10000l, timeservicetime) ;
+        final Shard futureShard = new Shard( 10000l, timeservicetime );
 
         /**
          * Mock up returning a min shard, and a future shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList( futureShard ).iterator() );
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
+                        same( type ), same( subType ) ) ).thenReturn( Arrays.asList( futureShard ).iterator() );
 
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET,  type, subType );
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
 
         assertFalse( "No shard allocated", result );
     }
@@ -215,12 +213,11 @@ public class NodeShardAllocationTest {
     public void lowCountFutureShard() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
 
-              final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
 
-              final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
 
-        final NodeShardApproximation nodeShardApproximation =
-                mock( NodeShardApproximation.class );
+        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
 
 
         final TimeService timeService = mock( TimeService.class );
@@ -233,8 +230,8 @@ public class NodeShardAllocationTest {
 
 
         NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardApproximation, timeService,
-                        graphFig, keyspace );
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+                        nodeShardApproximation, timeService, graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -250,15 +247,16 @@ public class NodeShardAllocationTest {
          * Mock up returning a min shard, and a future shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard(0l, 0l) ).iterator() );
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
+                        same( type ), same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l ) ).iterator() );
 
 
         //return a shard size < our max by 1
 
         final long count = graphFig.getShardSize() - 1;
 
-        when( nodeShardApproximation.getCount(scope, nodeId, NodeType.TARGET, 0l, type, subType )).thenReturn( count );
+        when( nodeShardApproximation.getCount( scope, nodeId, NodeType.TARGET, 0l, type, subType ) )
+                .thenReturn( count );
 
         final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.TARGET, type, subType );
 
@@ -270,26 +268,25 @@ public class NodeShardAllocationTest {
     public void equalCountFutureShard() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
 
-        final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
 
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
 
-        final NodeShardApproximation nodeShardApproximation =
-                mock( NodeShardApproximation.class );
+        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
 
 
         final TimeService timeService = mock( TimeService.class );
 
         final Keyspace keyspace = mock( Keyspace.class );
 
-        final MutationBatch batch = mock(MutationBatch.class);
+        final MutationBatch batch = mock( MutationBatch.class );
 
-        when(keyspace.prepareMutationBatch()).thenReturn( batch );
+        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
 
 
         NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardApproximation, timeService,
-                        graphFig, keyspace );
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+                        nodeShardApproximation, timeService, graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -305,39 +302,38 @@ public class NodeShardAllocationTest {
          * Mock up returning a min shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.SOURCE), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l ) ).iterator() );
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), any( Optional.class ),
+                        same( type ), same( subType ) ) ).thenReturn( Arrays.asList( new Shard( 0l, 0l ) ).iterator() );
 
 
         final long shardCount = graphFig.getShardSize();
 
         //return a shard size equal to our max
-        when( nodeShardApproximation
-                .getCount(   scope , nodeId, NodeType.SOURCE, 0l,type , subType  ))
+        when( nodeShardApproximation.getCount( scope, nodeId, NodeType.SOURCE, 0l, type, subType ) )
                 .thenReturn( shardCount );
 
         ArgumentCaptor<Long> shardValue = ArgumentCaptor.forClass( Long.class );
         ArgumentCaptor<Long> timestampValue = ArgumentCaptor.forClass( Long.class );
 
 
-
-
         //mock up our mutation
         when( edgeShardSerialization
-                .writeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.SOURCE), shardValue.capture(), timestampValue.capture(), same( type ), same( subType ) ) )
+                .writeEdgeMeta( same( scope ), same( nodeId ), eq( NodeType.SOURCE ), shardValue.capture(),
+                        timestampValue.capture(), same( type ), same( subType ) ) )
                 .thenReturn( mock( MutationBatch.class ) );
 
 
-        final SimpleMarkedEdge returnedEdge = new SimpleMarkedEdge( nodeId, type, createId("subType"), 10005l, false );
-        final Iterator<MarkedEdge> edgeIterator = Collections.singleton( (MarkedEdge)returnedEdge ).iterator();
+        final SimpleMarkedEdge returnedEdge =
+                new SimpleMarkedEdge( nodeId, type, createId( "subType" ), 10005l, false );
+        final Iterator<MarkedEdge> edgeIterator = Collections.singleton( ( MarkedEdge ) returnedEdge ).iterator();
 
         //mock up returning the value
-        when( shardedEdgeSerialization.getEdgesFromSourceByTargetType( same( edgeColumnFamilies ), same( scope ),
-                any( SearchByIdType.class ), any(Iterator.class) )).thenReturn( edgeIterator );
+        when( shardedEdgeSerialization
+                .getEdgesFromSourceByTargetType( same( edgeColumnFamilies ), same( scope ), any( SearchByIdType.class ),
+                        any( Iterator.class ) ) ).thenReturn( edgeIterator );
 
 
-
-        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE,  type, subType );
+        final boolean result = approximation.auditMaxShard( scope, nodeId, NodeType.SOURCE, type, subType );
 
         assertTrue( "Shard allocated", result );
 
@@ -350,41 +346,37 @@ public class NodeShardAllocationTest {
         assertEquals( "Expected time service time", timeservicetime, savedTimestamp );
 
 
-
         //now check our max value was set
 
         final long savedShardPivot = shardValue.getValue();
 
-        assertEquals("Expected max value to be the same", returnedEdge.getTimestamp(), savedShardPivot);
+        assertEquals( "Expected max value to be the same", returnedEdge.getTimestamp(), savedShardPivot );
     }
 
 
-
-
     @Test
     public void futureCountShardCleanup() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
 
-        final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
 
-        final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
 
-        final NodeShardApproximation nodeShardApproximation =
-                mock( NodeShardApproximation.class );
+        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
 
 
         final TimeService timeService = mock( TimeService.class );
 
         final Keyspace keyspace = mock( Keyspace.class );
 
-        final MutationBatch batch = mock(MutationBatch.class);
+        final MutationBatch batch = mock( MutationBatch.class );
 
-        when(keyspace.prepareMutationBatch()).thenReturn( batch );
+        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
 
 
         NodeShardAllocation approximation =
-                new NodeShardAllocationImpl(edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardApproximation, timeService,
-                        graphFig, keyspace );
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+                        nodeShardApproximation, timeService, graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -392,100 +384,132 @@ public class NodeShardAllocationTest {
 
 
         /**
-         * Use the time service to generate UUIDS
+         * Use the time service to generate timestamps
          */
-        final long timeservicetime = System.currentTimeMillis();
+        final long timeservicetime = 10000;
 
 
         when( timeService.getCurrentTime() ).thenReturn( timeservicetime );
 
-        assertTrue("Shard cache mocked", graphFig.getShardCacheTimeout() > 0);
+        assertTrue( "Shard cache mocked", graphFig.getShardCacheTimeout() > 0 );
 
 
         /**
          * Simulates clock drift when 2 nodes create future shards near one another
          */
-        final long futureTime = timeService.getCurrentTime()  + 2 * graphFig.getShardCacheTimeout();
+        final long minDelta = graphFig.getShardMinDelta();
+
 
+        final Shard minShard = new Shard( 0l, 0l );
 
-        final Shard minShard = new Shard(0l, 0l);
+        //a shard that isn't our minimum, but exists after compaction
+        final Shard compactedShard = new Shard( 5000, 1000 );
 
         /**
-         * Simulate slow node
+         * Simulate different node time allocation
          */
 
-        //our second shard is the "oldest", and hence should be returned in the iterator.  Future shard 1 and 3 should be removed
-        final Shard futureShard1 = new Shard(futureTime - 1, timeservicetime+1000);
+        final long minTime = 10000;
+        //our second shard is the "oldest", and hence should be returned in the iterator.  Future shard 1 and 3
+        // should be removed
+
+        //this should get dropped, It's allocated after future shard2 even though the time is less
+        final Shard futureShard1 = new Shard( 10000, minTime + minDelta );
 
-        final Shard futureShard2 = new Shard(futureTime + 10000, timeservicetime);
+        //should get kept.
+        final Shard futureShard2 = new Shard( 10005, minTime );
 
-        final Shard futureShard3 = new Shard(futureShard2.getShardIndex() + 10000, timeservicetime+2000);
+        //should be removed
+        final Shard futureShard3 = new Shard( 10010, minTime + minDelta / 2 );
 
         /**
          * Mock up returning a min shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ), same( type ),
-                        same( subType ) ) ).thenReturn(
-                Arrays.asList( futureShard3, futureShard2, futureShard1, minShard ).iterator() );
-
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
+                        same( type ), same( subType ) ) ).thenReturn(
+                Arrays.asList( futureShard3, futureShard2, futureShard1, compactedShard, minShard ).iterator() );
 
 
         ArgumentCaptor<Long> newLongValue = ArgumentCaptor.forClass( Long.class );
 
 
-
-
         //mock up our mutation
         when( edgeShardSerialization
-                .removeEdgeMeta( same( scope ), same( nodeId ), eq(NodeType.TARGET), newLongValue.capture(), same( type ), same( subType ) ) )
-                .thenReturn( mock( MutationBatch.class ) );
+                .removeEdgeMeta( same( scope ), same( nodeId ), eq( NodeType.TARGET ), newLongValue.capture(),
+                        same( type ), same( subType ) ) ).thenReturn( mock( MutationBatch.class ) );
 
 
-        final Iterator<Shard>
-                result = approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
+        final Iterator<ShardEntryGroup> result =
+                approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
 
 
         assertTrue( "Shards present", result.hasNext() );
 
-        assertEquals("Only single next shard returned", futureShard2,  result.next());
 
-        assertTrue("Shards present", result.hasNext());
+        ShardEntryGroup shardEntryGroup = result.next();
 
-        assertEquals("Previous shard present", 0l, result.next().getShardIndex());
+        assertEquals( "Future shard returned", futureShard1, shardEntryGroup.getMergeTarget() );
 
-        assertFalse("No shards left", result.hasNext());
 
-        /**
-         * Now we need to verify that both our mutations have been added
-         */
+        //now verify all 4 are in this group.  This is because the first shard (0,0) (n-1_ may be the only shard other
+        //nodes see while we're rolling our state.  This means it should be read and merged from as well
 
-        List<Long> values = newLongValue.getAllValues();
+        Collection<Shard> writeShards = shardEntryGroup.getWriteShards();
 
-        assertEquals("2 values removed", 2,  values.size());
+        assertEquals( "Shard size as expected", 4, writeShards.size() );
 
-        assertEquals("Deleted Max Future", futureShard1.getShardIndex(), values.get( 0 ).longValue());
-        assertEquals("Deleted Next Future", futureShard3.getShardIndex(), values.get( 1 ).longValue());
+        assertTrue( writeShards.contains( futureShard1 ) );
+        assertTrue( writeShards.contains( futureShard2 ) );
+        assertTrue( writeShards.contains( futureShard3 ) );
+        assertTrue( writeShards.contains( compactedShard ) );
 
-    }
+
+        Collection<Shard> readShards = shardEntryGroup.getReadShards( minTime + minDelta );
+
+        assertEquals( "Shard size as expected", 4, readShards.size() );
+
+        assertTrue( readShards.contains( futureShard1 ) );
+        assertTrue( readShards.contains( futureShard2 ) );
+        assertTrue( readShards.contains( futureShard3 ) );
+        assertTrue( readShards.contains( compactedShard ) );
+
+
+        assertTrue( "Shards present", result.hasNext() );
+
+        shardEntryGroup = result.next();
+
+        writeShards = shardEntryGroup.getWriteShards();
+
+
+        assertTrue( "Previous shard present", writeShards.contains( minShard ) );
+
+
+        writeShards = shardEntryGroup.getReadShards( minTime + minDelta );
 
 
+        assertTrue( "Previous shard present", writeShards.contains( minShard ) );
+
+
+        assertFalse( "No shards left", result.hasNext() );
+    }
 
 
     @Test
     public void noShardsReturns() {
         final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
 
-              final EdgeColumnFamilies edgeColumnFamilies = mock(EdgeColumnFamilies.class);
+        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
 
-              final ShardedEdgeSerialization shardedEdgeSerialization = mock(ShardedEdgeSerialization.class);
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
 
-        final NodeShardApproximation nodeShardApproximation =
-                mock( NodeShardApproximation.class );
+        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
 
 
         final TimeService timeService = mock( TimeService.class );
 
+        when( timeService.getCurrentTime() ).thenReturn( 10000l );
+
         final Keyspace keyspace = mock( Keyspace.class );
 
         final MutationBatch batch = mock( MutationBatch.class );
@@ -493,8 +517,8 @@ public class NodeShardAllocationTest {
         when( keyspace.prepareMutationBatch() ).thenReturn( batch );
 
         NodeShardAllocation approximation =
-                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, nodeShardApproximation, timeService,
-                        graphFig, keyspace );
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+                        nodeShardApproximation, timeService, graphFig, keyspace );
 
         final Id nodeId = createId( "test" );
         final String type = "type";
@@ -504,15 +528,114 @@ public class NodeShardAllocationTest {
          * Mock up returning an empty iterator, our audit shouldn't create a new shard
          */
         when( edgeShardSerialization
-                .getEdgeMetaData( same( scope ), same( nodeId ), eq(NodeType.TARGET), any( Optional.class ),  same( type ),
-                        same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
+                .getEdgeMetaData( same( scope ), same( nodeId ), eq( NodeType.TARGET ), any( Optional.class ),
+                        same( type ), same( subType ) ) ).thenReturn( Collections.<Shard>emptyList().iterator() );
+
+        final Iterator<ShardEntryGroup> result =
+                approximation.getShards( scope, nodeId, NodeType.TARGET, Optional.<Shard>absent(), type, subType );
+
+
+        ShardEntryGroup shardEntryGroup = result.next();
+
+        final Shard expected = new Shard( 0, 0 );
+
+        assertEquals( "Future shard returned", expected, shardEntryGroup.getMergeTarget() );
+
+
+        //now verify all 4 are in this group.  This is because the first shard (0,0) (n-1_ may be the only shard other
+        //nodes see while we're rolling our state.  This means it should be read and merged from as well
+
+        Collection<Shard> writeShards = shardEntryGroup.getWriteShards();
 
-        final Iterator<Shard> result = approximation.getShards( scope, nodeId, NodeType.TARGET,  Optional.<Shard>absent(), type,
-                subType );
+        Collection<Shard> readShards = shardEntryGroup.getReadShards( 10000l );
+
+
+        assertTrue( "0 shard allocated", writeShards.contains( expected ) );
+
+        assertTrue( "0 shard allocated", readShards.contains( expected ) );
 
-        assertEquals("0 shard allocated", 0l, result.next().getShardIndex());
 
         assertFalse( "No shard allocated", result.hasNext() );
     }
 
+
+    @Test
+    public void invalidConfiguration() {
+
+        final GraphFig graphFig = mock( GraphFig.class );
+
+        final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class );
+
+        final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class );
+
+        final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class );
+
+        final NodeShardApproximation nodeShardApproximation = mock( NodeShardApproximation.class );
+
+
+        /**
+         * Return 100000 milliseconds
+         */
+        final TimeService timeService = mock( TimeService.class );
+
+        final long time = 100000l;
+
+        when( timeService.getCurrentTime() ).thenReturn( time );
+
+
+        final long cacheTimeout = 30000l;
+
+        when( graphFig.getShardCacheTimeout() ).thenReturn( 30000l );
+
+
+        final long tooSmallDelta = ( long ) ( ( cacheTimeout * 2 ) * .99 );
+
+        when( graphFig.getShardMinDelta() ).thenReturn( tooSmallDelta );
+
+
+        final Keyspace keyspace = mock( Keyspace.class );
+
+        final MutationBatch batch = mock( MutationBatch.class );
+
+        when( keyspace.prepareMutationBatch() ).thenReturn( batch );
+
+
+        NodeShardAllocation approximation =
+                new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization,
+                        nodeShardApproximation, timeService, graphFig, keyspace );
+
+
+        /**
+         * Should throw an exception
+         */
+        try {
+            approximation.getMinTime();
+            fail( "Should have thrown a GraphRuntimeException" );
+        }
+        catch ( GraphRuntimeException gre ) {
+            //swallow
+        }
+
+        //now test something that passes.
+
+        final long minDelta = cacheTimeout * 2;
+
+        when( graphFig.getShardMinDelta() ).thenReturn( minDelta );
+
+        long returned = approximation.getMinTime();
+
+        long expectedReturned = time - minDelta;
+
+        assertEquals( expectedReturned, returned );
+
+        final long delta = cacheTimeout * 4;
+
+        when( graphFig.getShardMinDelta() ).thenReturn( delta );
+
+        returned = approximation.getMinTime();
+
+        expectedReturned = time - delta;
+
+        assertEquals( expectedReturned, returned );
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
new file mode 100644
index 0000000..4e23d83
--- /dev/null
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.usergrid.persistence.graph.serialization.impl.shard;
+
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+
+/**
+ * Test for the group functionality
+ */
+public class ShardEntryGroupTest {
+
+    @Test
+    public void singleEntry() {
+
+        final long delta = 10000;
+
+        Shard rootShard = new Shard( 0, 0 );
+
+        ShardEntryGroup shardEntryGroup = new ShardEntryGroup( delta );
+
+        final boolean result = shardEntryGroup.addShard( rootShard );
+
+        assertTrue( "Shard added", result );
+
+        assertFalse( "Single shard cannot be deleted", shardEntryGroup.canBeDeleted( rootShard ) );
+
+        assertSame( "Same shard for merge target", rootShard, shardEntryGroup.getMergeTarget() );
+
+        assertFalse( "Merge cannot be run with a single shard", shardEntryGroup.needsCompaction( 0 ) );
+    }
+
+
+    @Test
+    public void allocatedWithinDelta() {
+
+        final long delta = 10000;
+
+        Shard firstShard = new Shard( 1000, 1000 );
+
+        Shard secondShard = new Shard( 1000, 1000 );
+
+
+        ShardEntryGroup shardEntryGroup = new ShardEntryGroup( delta );
+
+        final boolean result = shardEntryGroup.addShard( rootShard );
+
+        assertTrue( "Shard added", result );
+
+        assertFalse( "Single shard cannot be deleted", shardEntryGroup.canBeDeleted( rootShard ) );
+
+        assertSame( "Same shard for merge target", rootShard, shardEntryGroup.getMergeTarget() );
+
+        assertFalse( "Merge cannot be run with a single shard", shardEntryGroup.needsCompaction( 0 ) );
+    }
+}
+
+
+

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/3ecd603f/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java
index 51448b7..2879d5b 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/count/NodeShardApproximationTest.java
@@ -454,6 +454,12 @@ public class NodeShardApproximationTest {
 
 
         @Override
+        public long getShardMinDelta() {
+            return 0;  //To change body of implemented methods use File | Settings | File Templates.
+        }
+
+
+        @Override
         public long getShardCacheSize() {
             return 0;  //To change body of implemented methods use File | Settings | File Templates.
         }