You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@usergrid.apache.org by to...@apache.org on 2015/08/25 18:09:48 UTC

[1/2] usergrid git commit: Updates test case to test for new behavior

Repository: usergrid
Updated Branches:
  refs/heads/USERGRID-909 9f3bf2b3a -> 915031173


Updates test case to test for new behavior


Project: http://git-wip-us.apache.org/repos/asf/usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/usergrid/commit/0794fff2
Tree: http://git-wip-us.apache.org/repos/asf/usergrid/tree/0794fff2
Diff: http://git-wip-us.apache.org/repos/asf/usergrid/diff/0794fff2

Branch: refs/heads/USERGRID-909
Commit: 0794fff20410053bc24e3e44a5a09bb28a1fe83b
Parents: 9f3bf2b
Author: Todd Nine <tn...@apigee.com>
Authored: Tue Aug 25 08:48:29 2015 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Tue Aug 25 08:48:29 2015 -0600

----------------------------------------------------------------------
 .../impl/shard/ShardEntryGroup.java             |  6 +-
 .../shard/impl/ShardGroupCompactionImpl.java    |  4 +-
 .../impl/shard/ShardEntryGroupTest.java         | 62 +++++++-------------
 3 files changed, 24 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/usergrid/blob/0794fff2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
index f1bc42f..3764e8e 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
@@ -170,7 +170,7 @@ public class ShardEntryGroup {
          * The shards in this set can be combined, we should only write to the compaction target to avoid
          * adding data to other shards
          */
-        if ( !isTooSmallToCompact() && shouldCompact( currentTime ) ) {
+        if ( !isTooSmallToCompact() && shouldCompact(  ) ) {
 
             final Shard compactionTarget = getCompactionTarget();
 
@@ -278,11 +278,9 @@ public class ShardEntryGroup {
     /**
      * Returns true if the newest created shard is path the currentTime - delta
      *
-     * @param currentTime The current system time in milliseconds
-     *
      * @return True if these shards can safely be combined into a single shard, false otherwise
      */
-    public boolean shouldCompact( final long currentTime ) {
+    public boolean shouldCompact( ) {
 
         /**
          * We don't have enough shards to compact, ignore

http://git-wip-us.apache.org/repos/asf/usergrid/blob/0794fff2/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java
index 7c1c816..c1a70e2 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java
@@ -147,7 +147,7 @@ public class ShardGroupCompactionImpl implements ShardGroupCompaction {
         Preconditions.checkNotNull( group, "group cannot be null" );
         Preconditions.checkArgument( group.isCompactionPending(), "Compaction is pending" );
         Preconditions
-            .checkArgument( group.shouldCompact( startTime ), "Compaction cannot be run yet.  Ignoring compaction." );
+            .checkArgument( group.shouldCompact( ), "Compaction cannot be run yet.  Ignoring compaction." );
 
         if(LOG.isDebugEnabled()) {
             LOG.debug("Compacting shard group. count is {} ", countAudits.get());
@@ -401,7 +401,7 @@ public class ShardGroupCompactionImpl implements ShardGroupCompaction {
             /**
              * Do the compaction
              */
-            if ( group.shouldCompact( timeService.getCurrentTime() ) ) {
+            if ( group.shouldCompact( ) ) {
                 /**
                  * It's already compacting, don't do anything
                  */

http://git-wip-us.apache.org/repos/asf/usergrid/blob/0794fff2/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
index 240c3ff..9745091 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
@@ -50,7 +50,7 @@ public class ShardEntryGroupTest {
 
         assertNull( "No merge target found", shardEntryGroup.getCompactionTarget() );
 
-        assertFalse( "Merge cannot be run with a single shard", shardEntryGroup.shouldCompact( Long.MAX_VALUE ) );
+        assertFalse( "Merge cannot be run with a single shard", shardEntryGroup.shouldCompact() );
     }
 
 
@@ -122,21 +122,9 @@ public class ShardEntryGroupTest {
 
         assertEquals( "Min compaction target found", firstShard, shardEntryGroup.getCompactionTarget() );
 
-        //shouldn't return true, since we haven't passed delta time in the second shard
-        assertFalse( "Merge cannot be run within min time",
-                shardEntryGroup.shouldCompact( firstShard.getCreatedTime() ) );
+        //we should compact these
+        assertTrue( "Merge should be run", shardEntryGroup.shouldCompact(  ) );
 
-        //shouldn't return true, since we haven't passed delta time in the second shard
-        assertFalse( "Merge cannot be run within min time",
-                shardEntryGroup.shouldCompact( secondShard.getCreatedTime() ) );
-
-        //we haven't passed the delta in the neighbor that would be our source, shard2, we shouldn't return true
-        //we read from shard2 and write to shard1
-        assertFalse( "Merge cannot be run with after min time",
-                shardEntryGroup.shouldCompact( firstShard.getCreatedTime()) );
-
-        assertTrue( "Merge should be run with after min time",
-                shardEntryGroup.shouldCompact( secondShard.getCreatedTime()) );
     }
 
 
@@ -208,20 +196,10 @@ public class ShardEntryGroupTest {
 
         assertEquals( "Same shard for merge target", secondShard, shardEntryGroup.getCompactionTarget() );
 
-        //shouldn't return true, since we haven't passed delta time in the second shard
-        assertFalse( "Merge cannot be run within min time",
-                shardEntryGroup.shouldCompact( firstShard.getCreatedTime()  ) );
-
-        //shouldn't return true, since we haven't passed delta time in the second shard
-        assertFalse( "Merge cannot be run within min time",
-                shardEntryGroup.shouldCompact( secondShard.getCreatedTime()  ) );
+        //Should return true, we can merge
+        assertTrue( "Merge cannot be run within min time",
+                shardEntryGroup.shouldCompact() );
 
-
-        assertFalse( "Merge cannot be run within min time",
-                shardEntryGroup.shouldCompact( secondShard.getCreatedTime()  + 1 ) );
-
-        assertTrue( "Merge should be run with after min time",
-                shardEntryGroup.shouldCompact( firstShard.getCreatedTime()  + 1 ) );
     }
 
 
@@ -268,61 +246,61 @@ public class ShardEntryGroupTest {
     @Test
     public void getAllWriteShardsNotPastCompaction() {
 
-        Shard firstShard = new Shard( 1000, 10000, false );
+        Shard ignoredProposedShard = new Shard( 1000, 10000, false );
 
-        Shard secondShard = new Shard( 999, 9000, false );
+        Shard newAllocatedCompactionTarget = new Shard( 999, 9000, false );
 
         Shard compactedShard = new Shard( 900, 8000, true );
 
 
         ShardEntryGroup shardEntryGroup = new ShardEntryGroup( );
 
-        boolean result = shardEntryGroup.addShard( firstShard );
+        boolean result = shardEntryGroup.addShard( ignoredProposedShard );
 
         assertTrue( "Shard added", result );
 
-        result = shardEntryGroup.addShard( secondShard );
+        result = shardEntryGroup.addShard( newAllocatedCompactionTarget );
 
-        assertTrue( " Shard added", result );
+        assertTrue( "Shard added", result );
 
         result = shardEntryGroup.addShard( compactedShard );
 
         assertTrue( "Shard added", result );
 
 
-        Collection<Shard> writeShards = shardEntryGroup.getWriteShards( firstShard.getCreatedTime()  );
+        Collection<Shard> writeShards = shardEntryGroup.getWriteShards( ignoredProposedShard.getCreatedTime() );
 
         assertEquals( "Shard size correct", 1, writeShards.size() );
 
-        assertTrue( "Root shard present", writeShards.contains( compactedShard ) );
+        assertTrue( "Lowest new shard present", writeShards.contains( newAllocatedCompactionTarget ) );
 
 
-        writeShards = shardEntryGroup.getWriteShards( secondShard.getCreatedTime()  );
+        writeShards = shardEntryGroup.getWriteShards( newAllocatedCompactionTarget.getCreatedTime() );
 
         assertEquals( "Shard size correct", 1, writeShards.size() );
 
-        assertTrue( "Third shard present", writeShards.contains( compactedShard ) );
+        assertTrue( "Lowest new shard present", writeShards.contains( newAllocatedCompactionTarget ) );
 
 
         /**
          * Not the max created timestamp, shouldn't return less than all shards
          */
-        writeShards = shardEntryGroup.getWriteShards( secondShard.getCreatedTime() + 1  );
+        writeShards = shardEntryGroup.getWriteShards( newAllocatedCompactionTarget.getCreatedTime() + 1  );
 
         assertEquals( "Shard size correct", 1, writeShards.size() );
 
 
-        assertTrue( "Second shard present", writeShards.contains( compactedShard ) );
+        assertTrue( "Second shard present", writeShards.contains( newAllocatedCompactionTarget ) );
 
 
-        assertEquals( "Compaction target correct", secondShard, shardEntryGroup.getCompactionTarget() );
+        assertEquals( "Compaction target correct", newAllocatedCompactionTarget, shardEntryGroup.getCompactionTarget() );
 
-        writeShards = shardEntryGroup.getWriteShards( firstShard.getCreatedTime() + 1  );
+        writeShards = shardEntryGroup.getWriteShards( ignoredProposedShard.getCreatedTime() + 1  );
 
         assertEquals( "Shard size correct", 1, writeShards.size() );
 
 
-        assertTrue( "Second shard present", writeShards.contains( secondShard ) );
+        assertTrue( "Second shard present", writeShards.contains( newAllocatedCompactionTarget ) );
     }
 
 


[2/2] usergrid git commit: Updated more tests

Posted by to...@apache.org.
Updated more tests


Project: http://git-wip-us.apache.org/repos/asf/usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/usergrid/commit/91503117
Tree: http://git-wip-us.apache.org/repos/asf/usergrid/tree/91503117
Diff: http://git-wip-us.apache.org/repos/asf/usergrid/diff/91503117

Branch: refs/heads/USERGRID-909
Commit: 9150311739c2ffdc7389f3aa3e27e9e05cff8e4c
Parents: 0794fff
Author: Todd Nine <tn...@apigee.com>
Authored: Tue Aug 25 10:09:44 2015 -0600
Committer: Todd Nine <tn...@apigee.com>
Committed: Tue Aug 25 10:09:44 2015 -0600

----------------------------------------------------------------------
 .../impl/EdgeSerializationImpl.java             | 20 +++----
 .../impl/shard/ShardEntryGroup.java             | 33 ++++++------
 .../impl/shard/NodeShardAllocationTest.java     | 35 +++++-------
 .../impl/shard/ShardEntryGroupTest.java         | 57 ++++++++++++++++----
 .../impl/shard/ShardGroupCompactionTest.java    | 17 +++---
 .../shard/impl/ShardEntryGroupIteratorTest.java | 16 +++---
 6 files changed, 104 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/usergrid/blob/91503117/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
index 4c1ae79..7d14056 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/EdgeSerializationImpl.java
@@ -114,7 +114,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
         final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNode( sourceNode, edgeType );
 
         final Collection<Shard> sourceWriteShards =
-                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceEdgeMeta ).getWriteShards( now );
+                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceEdgeMeta ).getWriteShards( );
 
         final MutationBatch batch = shardedEdgeSerialization
                 .writeEdgeFromSource( edgeColumnFamilies, scope, markedEdge, sourceWriteShards, sourceEdgeMeta,
@@ -129,7 +129,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
 
         final Collection<Shard> sourceTargetTypeWriteShards =
                 edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceTargetTypeEdgeMeta )
-                                 .getWriteShards( now );
+                                 .getWriteShards( );
 
         batch.mergeShallow( shardedEdgeSerialization
                 .writeEdgeFromSourceWithTargetType( edgeColumnFamilies, scope, markedEdge, sourceTargetTypeWriteShards,
@@ -144,7 +144,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
         final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNode( targetNode, edgeType );
 
         final Collection<Shard> targetWriteShards =
-                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetEdgeMeta ).getWriteShards( now );
+                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetEdgeMeta ).getWriteShards( );
 
         batch.mergeShallow( shardedEdgeSerialization
                 .writeEdgeToTarget( edgeColumnFamilies, scope, markedEdge, targetWriteShards, targetEdgeMeta,
@@ -160,7 +160,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
 
         final Collection<Shard> targetSourceTypeWriteShards =
                 edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetSourceTypeEdgeMeta )
-                                 .getWriteShards( now );
+                                 .getWriteShards( );
 
         batch.mergeShallow( shardedEdgeSerialization
                 .writeEdgeToTargetWithSourceType( edgeColumnFamilies, scope, markedEdge, targetSourceTypeWriteShards,
@@ -174,7 +174,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
         final DirectedEdgeMeta edgeVersionsMeta = DirectedEdgeMeta.fromEdge( sourceNode, targetNode, edgeType );
 
         final Collection<Shard> edgeVersionsShards =
-                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, edgeVersionsMeta ).getWriteShards( now );
+                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, edgeVersionsMeta ).getWriteShards( );
 
         batch.mergeShallow( shardedEdgeSerialization
                 .writeEdgeVersions( edgeColumnFamilies, scope, markedEdge, edgeVersionsShards,
@@ -203,7 +203,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
         final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNode( sourceNode, edgeType );
 
         final Collection<Shard> sourceWriteShards =
-                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceEdgeMeta ).getWriteShards( now );
+                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceEdgeMeta ).getWriteShards( );
 
         final MutationBatch batch = shardedEdgeSerialization
                 .deleteEdgeFromSource( edgeColumnFamilies, scope, markedEdge, sourceWriteShards, sourceEdgeMeta,
@@ -218,7 +218,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
 
         final Collection<Shard> sourceTargetTypeWriteShards =
                 edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceTargetTypeEdgeMeta )
-                                 .getWriteShards( now );
+                                 .getWriteShards( );
 
         batch.mergeShallow( shardedEdgeSerialization
                 .deleteEdgeFromSourceWithTargetType( edgeColumnFamilies, scope, markedEdge, sourceTargetTypeWriteShards,
@@ -233,7 +233,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
         final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNode( targetNode, edgeType );
 
         final Collection<Shard> targetWriteShards =
-                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetEdgeMeta ).getWriteShards( now );
+                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetEdgeMeta ).getWriteShards(  );
 
         batch.mergeShallow( shardedEdgeSerialization
                 .deleteEdgeToTarget( edgeColumnFamilies, scope, markedEdge, targetWriteShards, targetEdgeMeta,
@@ -249,7 +249,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
 
         final Collection<Shard> targetSourceTypeWriteShards =
                 edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetSourceTypeEdgeMeta )
-                                 .getWriteShards( now );
+                                 .getWriteShards( );
 
         batch.mergeShallow( shardedEdgeSerialization
                 .deleteEdgeToTargetWithSourceType( edgeColumnFamilies, scope, markedEdge, targetSourceTypeWriteShards,
@@ -263,7 +263,7 @@ public class EdgeSerializationImpl implements EdgeSerialization {
         final DirectedEdgeMeta edgeVersionsMeta = DirectedEdgeMeta.fromEdge( sourceNode, targetNode, edgeType );
 
         final Collection<Shard> edgeVersionsShards =
-                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, edgeVersionsMeta ).getWriteShards( now );
+                edgeShardStrategy.getWriteShards( scope, edgeTimestamp, edgeVersionsMeta ).getWriteShards( );
 
         batch.mergeShallow( shardedEdgeSerialization
                 .deleteEdgeVersions( edgeColumnFamilies, scope, markedEdge, edgeVersionsShards,

http://git-wip-us.apache.org/repos/asf/usergrid/blob/91503117/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
index 3764e8e..34dece0 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroup.java
@@ -44,8 +44,6 @@ public class ShardEntryGroup {
 
     private List<Shard> shards;
 
-    private long maxCreatedTime;
-
     private Shard compactionTarget;
 
     private Shard rootShard;
@@ -56,11 +54,12 @@ public class ShardEntryGroup {
      */
     public ShardEntryGroup() {
         this.shards = new ArrayList<>();
-        this.maxCreatedTime = 0;
     }
 
 
     /**
+     * Shard insertion order is assumed to be  from Shard.shardIndex == Long.MAX to Shard.shardIndex == Long.MIN
+     *
      * Only add a shard if it is within the rules require to meet a group.  The rules are outlined below.
      *
      * Case 1)  First shard in the group, always added
@@ -103,15 +102,13 @@ public class ShardEntryGroup {
     private void addShardInternal( final Shard shard ) {
         shards.add( shard );
 
-        maxCreatedTime = Math.max( maxCreatedTime, shard.getCreatedTime() );
-
         //we're changing our structure, unset the compaction target
         compactionTarget = null;
     }
 
 
     /**
-     * Return the minum shard based on time indexes
+     * Return the minimum shard based on time indexes
      */
     public Shard getMinShard() {
         final int size = shards.size();
@@ -162,9 +159,9 @@ public class ShardEntryGroup {
 
 
     /**
-     * Get the entries, with the max shard time being first. We write to all shards until they're migrated
+     * Get the entries, with the earliest allocated uncompacted shard being first
      */
-    public Collection<Shard> getWriteShards( long currentTime ) {
+    public Collection<Shard> getWriteShards() {
 
         /**
          * The shards in this set can be combined, we should only write to the compaction target to avoid
@@ -179,6 +176,7 @@ public class ShardEntryGroup {
             return Collections.singleton( compactionTarget );
         }
 
+
         final Shard staticShard = getRootShard();
 
 
@@ -242,19 +240,23 @@ public class ShardEntryGroup {
             return null;
         }
 
-        //Start seeking from the end of our group.  The first shard we encounter that is not compacted is our
-        // compaction target
+        Shard compactionCandidate = null;
+
+        //Start seeking from the end of our group.  The lowest timestamp uncompacted shard is our target
         //NOTE: This does not mean we can compact, rather it's just an indication that we have a target set.
-        for ( int i = lastIndex - 1; i > -1; i-- ) {
-            final Shard compactionCandidate = shards.get( i );
+        for ( int i = 0; i < lastIndex; i++ ) {
+            final Shard currentTargetCompaction = shards.get( i );
 
 
-            if ( !compactionCandidate.isCompacted() ) {
-                compactionTarget = compactionCandidate;
-                break;
+            //the shard is not compacted, and we've either never set a candidate
+            //or the candidate has a higher created timestamp than our current shard
+            if ( !currentTargetCompaction.isCompacted() && (compactionCandidate == null || currentTargetCompaction.getCreatedTime() < compactionCandidate.getCreatedTime())) {
+                compactionCandidate = currentTargetCompaction;
             }
         }
 
+        compactionTarget = compactionCandidate;
+
         return compactionTarget;
     }
 
@@ -310,7 +312,6 @@ public class ShardEntryGroup {
     public String toString() {
         return "ShardEntryGroup{" +
             "shards=" + shards +
-            ", maxCreatedTime=" + maxCreatedTime +
             ", compactionTarget=" + compactionTarget +
             '}';
     }

http://git-wip-us.apache.org/repos/asf/usergrid/blob/91503117/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
index 265c997..6542a72 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/NodeShardAllocationTest.java
@@ -540,11 +540,14 @@ public class NodeShardAllocationTest {
         //now verify all 4 are in this group.  This is because the first shard (0,0) (n-1_ may be the only shard other
         //nodes see while we're rolling our state.  This means it should be read and merged from as well
 
-        Collection<Shard> writeShards = shardEntryGroup.getWriteShards( minTime  );
+        Collection<Shard> writeShards = shardEntryGroup.getWriteShards(  );
 
         assertEquals( "Shard size as expected", 1, writeShards.size() );
 
-        assertTrue( writeShards.contains( compactedShard ) );
+        /**
+         * Here we should get "futureShard2" it is the first allocated uncompacted shard, and therefore should be the shard that is used.
+         */
+        assertTrue( "Minimum allocated new shard", writeShards.contains( futureShard2 ) );
 
 
         Collection<Shard> readShards = shardEntryGroup.getReadShards();
@@ -552,24 +555,26 @@ public class NodeShardAllocationTest {
         assertEquals( "Shard size as expected", 2, readShards.size() );
 
         assertTrue( readShards.contains( futureShard1 ) );
+        assertTrue( readShards.contains( futureShard2 ) );
+        assertTrue( readShards.contains( futureShard3 ) );
         assertTrue( readShards.contains( compactedShard ) );
 
 
-        assertTrue( "Shards present", result.hasNext() );
+        assertTrue( "Next shard group present", result.hasNext() );
 
         shardEntryGroup = result.next();
 
 
-        writeShards = shardEntryGroup.getWriteShards( minTime  );
+        writeShards = shardEntryGroup.getWriteShards(  );
 
 
         assertTrue( "Previous shard present", writeShards.contains( minShard ) );
 
 
-        writeShards = shardEntryGroup.getReadShards();
+        readShards = shardEntryGroup.getReadShards();
 
 
-        assertTrue( "Previous shard present", writeShards.contains( minShard ) );
+        assertTrue( "Previous shard present", readShards.contains( minShard ) );
 
 
         assertFalse( "No shards left", result.hasNext() );
@@ -609,19 +614,6 @@ public class NodeShardAllocationTest {
         final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromTargetNodeSourceType( nodeId, type, subType );
 
 
-        /**
-         * Mock up returning an empty iterator, our audit shouldn't create a new shard
-         */
-        when( edgeShardSerialization
-                .getShardMetaDataLocal( same( scope ), any( Optional.class ), same( directedEdgeMeta ) ) )
-                .thenReturn( Collections.<Shard>emptyList().iterator() );
-
-
-        ArgumentCaptor<Shard> shardArgumentCaptor = ArgumentCaptor.forClass( Shard.class );
-
-        when( edgeShardSerialization
-                .writeShardMeta( same( scope ), shardArgumentCaptor.capture(), same( directedEdgeMeta ) ) )
-                .thenReturn( batch );
 
 
         final Iterator<ShardEntryGroup> result =
@@ -635,14 +627,11 @@ public class NodeShardAllocationTest {
         assertEquals( "Shard size expected", 1, shardEntryGroup.entrySize() );
 
 
-        //ensure we persisted the new shard.
-        assertEquals( "Root shard was persisted", rootShard, shardArgumentCaptor.getValue() );
-
 
         //now verify all 4 are in this group.  This is because the first shard (0,0) (n-1_ may be the only shard other
         //nodes see while we're rolling our state.  This means it should be read and merged from as well
 
-        Collection<Shard> writeShards = shardEntryGroup.getWriteShards( timeService.getCurrentTime() );
+        Collection<Shard> writeShards = shardEntryGroup.getWriteShards( );
 
         Collection<Shard> readShards = shardEntryGroup.getReadShards();
 

http://git-wip-us.apache.org/repos/asf/usergrid/blob/91503117/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
index 9745091..4dcff13 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardEntryGroupTest.java
@@ -55,7 +55,7 @@ public class ShardEntryGroupTest {
 
 
     @Test
-    public void allocatedWithinDelta() {
+    public void minTimestampSelected() {
 
         Shard firstShard = new Shard( 1000, 1000, false );
 
@@ -70,7 +70,8 @@ public class ShardEntryGroupTest {
 
         result = shardEntryGroup.addShard( firstShard );
 
-        assertTrue( " Shard added", result );
+        assertTrue( "Shard added", result );
+
 
 
         assertFalse( "First shard cannot be deleted", shardEntryGroup.canBeDeleted( firstShard ) );
@@ -82,8 +83,6 @@ public class ShardEntryGroupTest {
         assertNull( "Can't compact, no min compacted shard present", shardEntryGroup.getCompactionTarget() );
 
 
-        //TODO, should this blow up in general?  We don't have a compacted shard at the lower bounds,
-        // which shouldn't be allowed
 
     }
 
@@ -123,11 +122,51 @@ public class ShardEntryGroupTest {
         assertEquals( "Min compaction target found", firstShard, shardEntryGroup.getCompactionTarget() );
 
         //we should compact these
-        assertTrue( "Merge should be run", shardEntryGroup.shouldCompact(  ) );
+        assertTrue( "Merge should be run", shardEntryGroup.shouldCompact() );
 
     }
 
 
+
+    @Test
+    public void lowerTimestampHigherShard() {
+
+        Shard firstShard = new Shard( 1100, 1001, false );
+
+        //should be compacted away. Even though it has a closer value, it's a higher timestamp, so wasn't written first
+        Shard secondShard = new Shard( 1000, 1000, false );
+
+        Shard compactedShard = new Shard( 500, 200, true );
+
+
+        ShardEntryGroup shardEntryGroup = new ShardEntryGroup(  );
+
+        boolean result = shardEntryGroup.addShard( firstShard );
+
+        assertTrue( "Shard added", result );
+
+        result = shardEntryGroup.addShard( secondShard );
+
+        assertTrue( "Shard added", result );
+
+        result = shardEntryGroup.addShard( compactedShard );
+
+        assertTrue( "Shard added", result );
+
+
+
+        assertFalse( "First shard cannot be deleted", shardEntryGroup.canBeDeleted( secondShard  ) );
+
+        assertTrue( "Second shard can be deleted", shardEntryGroup.canBeDeleted( firstShard  ) );
+
+        assertEquals( "Can't compact, no min compacted shard present", secondShard, shardEntryGroup.getCompactionTarget() );
+
+
+
+    }
+
+
+
     @Test
     public void multipleShardGroups() {
 
@@ -268,14 +307,14 @@ public class ShardEntryGroupTest {
         assertTrue( "Shard added", result );
 
 
-        Collection<Shard> writeShards = shardEntryGroup.getWriteShards( ignoredProposedShard.getCreatedTime() );
+        Collection<Shard> writeShards = shardEntryGroup.getWriteShards( );
 
         assertEquals( "Shard size correct", 1, writeShards.size() );
 
         assertTrue( "Lowest new shard present", writeShards.contains( newAllocatedCompactionTarget ) );
 
 
-        writeShards = shardEntryGroup.getWriteShards( newAllocatedCompactionTarget.getCreatedTime() );
+        writeShards = shardEntryGroup.getWriteShards( );
 
         assertEquals( "Shard size correct", 1, writeShards.size() );
 
@@ -285,7 +324,7 @@ public class ShardEntryGroupTest {
         /**
          * Not the max created timestamp, shouldn't return less than all shards
          */
-        writeShards = shardEntryGroup.getWriteShards( newAllocatedCompactionTarget.getCreatedTime() + 1  );
+        writeShards = shardEntryGroup.getWriteShards(  );
 
         assertEquals( "Shard size correct", 1, writeShards.size() );
 
@@ -295,7 +334,7 @@ public class ShardEntryGroupTest {
 
         assertEquals( "Compaction target correct", newAllocatedCompactionTarget, shardEntryGroup.getCompactionTarget() );
 
-        writeShards = shardEntryGroup.getWriteShards( ignoredProposedShard.getCreatedTime() + 1  );
+        writeShards = shardEntryGroup.getWriteShards( );
 
         assertEquals( "Shard size correct", 1, writeShards.size() );
 

http://git-wip-us.apache.org/repos/asf/usergrid/blob/91503117/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardGroupCompactionTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardGroupCompactionTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardGroupCompactionTest.java
index ab457d3..47825a0 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardGroupCompactionTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/ShardGroupCompactionTest.java
@@ -98,14 +98,15 @@ public class ShardGroupCompactionTest {
 
         DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode( IdGenerator.createId( "source" ), "test" );
 
-        try {
-            compaction.compact( this.scope, directedEdgeMeta, group );
-            fail( "I should not reach this point" );
-        }
-        catch ( Throwable t ) {
-            assertEquals( "Correct error message returned", "Compaction cannot be run yet.  Ignoring compaction.",
-                    t.getMessage() );
-        }
+        fail("Write more of these tests");
+//        try {
+//            compaction.compact( this.scope, directedEdgeMeta, group );
+//            fail( "I should not reach this point" );
+//        }
+//        catch ( Throwable t ) {
+//            assertEquals( "Correct error message returned", "Compaction cannot be run yet.  Ignoring compaction.",
+//                    t.getMessage() );
+//        }
     }
 
 

http://git-wip-us.apache.org/repos/asf/usergrid/blob/91503117/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIteratorTest.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIteratorTest.java b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIteratorTest.java
index 238e238..2e8ea3c 100644
--- a/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIteratorTest.java
+++ b/stack/corepersistence/graph/src/test/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardEntryGroupIteratorTest.java
@@ -96,14 +96,14 @@ public class ShardEntryGroupIteratorTest {
         assertTrue( "Min shard present", readShards.contains( minShard ) );
 
 
-        Collection<Shard> writeShards = group.getWriteShards( 0 );
+        Collection<Shard> writeShards = group.getWriteShards( );
 
         assertEquals( "Min shard present", 1, writeShards.size() );
 
         assertTrue( "Min shard present", writeShards.contains( minShard ) );
 
 
-        writeShards = group.getWriteShards( Long.MAX_VALUE );
+        writeShards = group.getWriteShards( );
 
         assertEquals( "Min shard present", 1, writeShards.size() );
 
@@ -181,14 +181,14 @@ public class ShardEntryGroupIteratorTest {
         assertTrue( "shardGroup3Shard1 shard present", readShards.contains( shardGroup3Shard1 ) );
 
 
-        Collection<Shard> writeShards = group.getWriteShards( 0 );
+        Collection<Shard> writeShards = group.getWriteShards( );
 
         assertEquals( "Min shard present", 1, writeShards.size() );
 
 
         assertTrue( "shardGroup3Shard1 shard present", writeShards.contains( shardGroup3Shard1 ) );
 
-        writeShards = group.getWriteShards( shardGroup3Shard3.getCreatedTime() + delta );
+        writeShards = group.getWriteShards( );
 
         assertEquals( "Min shard present", 1, writeShards.size() );
 
@@ -222,14 +222,14 @@ public class ShardEntryGroupIteratorTest {
         assertTrue( "shardGroup2Shard2 shard present", readShards.contains( shardGroup2Shard2 ) );
 
 
-        writeShards = group.getWriteShards( 0 );
+        writeShards = group.getWriteShards( );
 
         assertEquals( "Min shard present", 1, writeShards.size() );
 
         assertTrue( "shardGroup2Shard1 shard present", writeShards.contains( shardGroup2Shard1 ) );
 
 
-        writeShards = group.getWriteShards( shardGroup2Shard2.getCreatedTime() + delta + 1 );
+        writeShards = group.getWriteShards( );
 
         assertEquals( "Both shards present", 1, writeShards.size() );
 
@@ -258,14 +258,14 @@ public class ShardEntryGroupIteratorTest {
         assertTrue( "shardGroup1Shard2 shard present", readShards.contains( shardGroup1Shard2 ) );
 
 
-        writeShards = group.getWriteShards( 0 );
+        writeShards = group.getWriteShards( );
 
         assertEquals( "Min shard present", 1, writeShards.size() );
 
         assertTrue( "shardGroup1Shard1 shard present", writeShards.contains( shardGroup1Shard1 ) );
 
 
-        writeShards = group.getWriteShards( shardGroup1Shard3.getCreatedTime() + delta + 1 );
+        writeShards = group.getWriteShards( );
 
         assertEquals( "Both shards present", 1, writeShards.size() );