You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@usergrid.apache.org by to...@apache.org on 2015/02/13 00:15:56 UTC

[01/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Repository: incubator-usergrid
Updated Branches:
  refs/heads/USERGRID-390 1203f55de -> bd7437344


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/IntersectionIteratorTest.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/IntersectionIteratorTest.java b/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/IntersectionIteratorTest.java
deleted file mode 100644
index 9f27463..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/IntersectionIteratorTest.java
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-
-import org.junit.Test;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-
-/** @author tnine */
-public class IntersectionIteratorTest {
-
-    @Test
-    public void mutipleIterators() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id5 = UUIDUtils.minTimeUUID( 5 );
-        UUID id6 = UUIDUtils.minTimeUUID( 6 );
-        UUID id7 = UUIDUtils.minTimeUUID( 7 );
-        UUID id8 = UUIDUtils.minTimeUUID( 8 );
-        UUID id9 = UUIDUtils.minTimeUUID( 9 );
-        UUID id10 = UUIDUtils.minTimeUUID( 10 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator first = new InOrderIterator( 100 );
-        first.add( id9 );
-        first.add( id8 );
-        first.add( id1 );
-        first.add( id2 );
-        first.add( id3 );
-
-
-
-        InOrderIterator second = new InOrderIterator( 100 );
-        second.add( id1 );
-        second.add( id2 );
-        second.add( id3 );
-        second.add( id4 );
-        second.add( id8 );
-        second.add( id10 );
-
-        InOrderIterator third = new InOrderIterator( 100 );
-        third.add( id1 );
-        third.add( id3 );
-        third.add( id5 );
-        third.add( id6 );
-        third.add( id7 );
-        third.add( id8 );
-
-        InOrderIterator fourth = new InOrderIterator( 100 );
-        fourth.add( id1 );
-        fourth.add( id2 );
-        fourth.add( id3 );
-        fourth.add( id6 );
-        fourth.add( id8 );
-        fourth.add( id10 );
-
-        IntersectionIterator intersection = new IntersectionIterator( 100 );
-        intersection.addIterator( first );
-        intersection.addIterator( second );
-        intersection.addIterator( third );
-        intersection.addIterator( fourth );
-
-        Iterator<ScanColumn> union = intersection.next().iterator();
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( union.hasNext() );
-        assertEquals( id8, union.next().getUUID() );
-
-        assertTrue( union.hasNext() );
-        assertEquals( id1, union.next().getUUID() );
-
-        assertTrue( union.hasNext() );
-        assertEquals( id3, union.next().getUUID() );
-
-        assertFalse( union.hasNext() );
-    }
-
-
-    @Test
-    public void oneIterator() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator first = new InOrderIterator( 100 );
-        first.add( id1 );
-        first.add( id2 );
-        first.add( id3 );
-        first.add( id4 );
-
-        IntersectionIterator intersection = new IntersectionIterator( 100 );
-        intersection.addIterator( first );
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( intersection.hasNext() );
-
-        Set<ScanColumn> page = intersection.next();
-
-        Iterator<ScanColumn> union = page.iterator();
-
-        assertEquals( id1, union.next().getUUID() );
-
-        assertTrue( union.hasNext() );
-        assertEquals( id2, union.next().getUUID() );
-
-        assertTrue( union.hasNext() );
-        assertEquals( id3, union.next().getUUID() );
-
-        assertTrue( union.hasNext() );
-        assertEquals( id4, union.next().getUUID() );
-
-        assertFalse( union.hasNext() );
-    }
-
-
-    @Test
-    public void noIterator() {
-        IntersectionIterator union = new IntersectionIterator( 100 );
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertFalse( union.hasNext() );
-    }
-
-
-    @Test
-    public void largeIntersection() {
-
-        int size = 10000;
-        int firstIntersection = 100;
-        int secondIntersection = 200;
-
-        UUID[] firstSet = new UUID[size];
-        UUID[] secondSet = new UUID[size];
-        UUID[] thirdSet = new UUID[size];
-
-        InOrderIterator first = new InOrderIterator( 100 );
-        InOrderIterator second = new InOrderIterator( 100 );
-        InOrderIterator third = new InOrderIterator( 100 );
-
-        List<UUID> results = new ArrayList<UUID>( size / secondIntersection );
-
-        for ( int i = 0; i < size; i++ ) {
-            firstSet[i] = UUIDUtils.newTimeUUID();
-            // every 100 elements, set the element equal to the first set. This way we
-            // have intersection
-
-            if ( i % firstIntersection == 0 ) {
-                secondSet[i] = firstSet[i];
-            }
-            else {
-                secondSet[i] = UUIDUtils.newTimeUUID();
-            }
-
-            if ( i % secondIntersection == 0 ) {
-                thirdSet[i] = firstSet[i];
-                results.add( firstSet[i] );
-            }
-
-            else {
-                thirdSet[i] = UUIDUtils.newTimeUUID();
-            }
-        }
-
-        first.add( firstSet );
-
-        reverse( secondSet );
-        //reverse the second
-        second.add( secondSet );
-        third.add( thirdSet );
-
-        //now itersect them and make sure we get all results in a small set
-
-        int numPages = 2;
-        int pageSize = results.size() / numPages;
-
-        IntersectionIterator intersection = new IntersectionIterator( pageSize );
-        intersection.addIterator( first );
-        intersection.addIterator( second );
-        intersection.addIterator( third );
-
-        assertTrue( intersection.hasNext() );
-
-
-        Iterator<UUID> expected = results.iterator();
-        Set<ScanColumn> resultSet = intersection.next();
-        Iterator<ScanColumn> union = resultSet.iterator();
-
-
-        while ( union.hasNext() ) {
-            assertTrue( expected.hasNext() );
-            assertEquals( expected.next(), union.next().getUUID() );
-        }
-
-
-        //now get the 2nd page
-        resultSet = intersection.next();
-        union = resultSet.iterator();
-
-
-        while ( union.hasNext() ) {
-            assertTrue( expected.hasNext() );
-            assertEquals( expected.next(), union.next().getUUID() );
-        }
-
-        //no more elements
-        assertFalse( intersection.hasNext() );
-        assertFalse( expected.hasNext() );
-    }
-
-
-    /**
-     * Tests that when there are multiple iterators, and one in the "middle" of the list returns no results, it will
-     * short circuit since no results will be possible
-     */
-    @Test
-    public void mutipleIteratorsNoIntersection() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id6 = UUIDUtils.minTimeUUID( 6 );
-        UUID id8 = UUIDUtils.minTimeUUID( 8 );
-        UUID id9 = UUIDUtils.minTimeUUID( 9 );
-        UUID id10 = UUIDUtils.minTimeUUID( 10 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator first = new InOrderIterator( 100 );
-        first.add( id1 );
-        first.add( id2 );
-        first.add( id3 );
-        first.add( id8 );
-        first.add( id9 );
-
-        InOrderIterator second = new InOrderIterator( 100 );
-        second.add( id1 );
-        second.add( id2 );
-        second.add( id3 );
-        second.add( id4 );
-        second.add( id8 );
-        second.add( id10 );
-
-        InOrderIterator third = new InOrderIterator( 100 );
-
-        InOrderIterator fourth = new InOrderIterator( 100 );
-        fourth.add( id1 );
-        fourth.add( id2 );
-        fourth.add( id3 );
-        fourth.add( id6 );
-        fourth.add( id8 );
-        fourth.add( id10 );
-
-        IntersectionIterator intersection = new IntersectionIterator( 100 );
-        intersection.addIterator( first );
-        intersection.addIterator( second );
-        intersection.addIterator( third );
-        intersection.addIterator( fourth );
-
-        Iterator<ScanColumn> union = intersection.next().iterator();
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertFalse( union.hasNext() );
-    }
-
-
-    private void reverse( UUID[] array ) {
-
-        UUID temp = null;
-
-        for ( int i = 0; i < array.length / 2; i++ ) {
-            temp = array[i];
-            array[i] = array[array.length - i - 1];
-            array[array.length - i - 1] = temp;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/IteratorHelper.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/IteratorHelper.java b/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/IteratorHelper.java
deleted file mode 100644
index 049296e..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/IteratorHelper.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-
-/**
- *
- * @author: tnine
- *
- */
-public class IteratorHelper {
-
-    public static ScanColumn uuidColumn( UUID value ) {
-        return new UUIDIndexSliceParser.UUIDColumn( value, ByteBuffer.allocate( 0 ) );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/SubtractionIteratorTest.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/SubtractionIteratorTest.java b/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/SubtractionIteratorTest.java
deleted file mode 100644
index 02ec0d6..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/SubtractionIteratorTest.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.Set;
-import java.util.UUID;
-
-import org.junit.Test;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.apache.usergrid.persistence.query.ir.result.IteratorHelper.uuidColumn;
-
-
-/** @author tnine */
-public class SubtractionIteratorTest {
-
-    @Test
-    public void smallerSubtract() {
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id5 = UUIDUtils.minTimeUUID( 5 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator keep = new InOrderIterator( 2 );
-        keep.add( id1 );
-        keep.add( id2 );
-        keep.add( id3 );
-        keep.add( id4 );
-        keep.add( id5 );
-
-        InOrderIterator subtract = new InOrderIterator( 2 );
-        subtract.add( id1 );
-        subtract.add( id3 );
-        subtract.add( id5 );
-
-        SubtractionIterator sub = new SubtractionIterator( 100 );
-        sub.setKeepIterator( keep );
-        sub.setSubtractIterator( subtract );
-
-        // now make sure it's right, only 2 and 8 aren't intersected
-        Set<ScanColumn> page = sub.next();
-
-        assertTrue( page.contains( uuidColumn( id2 ) ) );
-        assertTrue( page.contains( uuidColumn( id4 ) ) );
-
-        assertEquals( 2, page.size() );
-    }
-
-
-    @Test
-    public void smallerKeep() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id5 = UUIDUtils.minTimeUUID( 5 );
-        UUID id6 = UUIDUtils.minTimeUUID( 6 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator keep = new InOrderIterator( 100 );
-        keep.add( id1 );
-        keep.add( id2 );
-        keep.add( id5 );
-        keep.add( id6 );
-
-        InOrderIterator subtract = new InOrderIterator( 100 );
-        subtract.add( id1 );
-        subtract.add( id3 );
-        subtract.add( id4 );
-        subtract.add( id5 );
-        subtract.add( id6 );
-
-        SubtractionIterator sub = new SubtractionIterator( 100 );
-        sub.setKeepIterator( keep );
-        sub.setSubtractIterator( subtract );
-
-        // now make sure it's right, only 2 and 8 aren't intersected
-
-        Set<ScanColumn> page = sub.next();
-
-        assertTrue( page.contains( uuidColumn( id2 ) ) );
-
-        assertEquals( 1, page.size() );
-    }
-
-
-    @Test
-    public void smallerKeepRemoveAll() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id5 = UUIDUtils.minTimeUUID( 5 );
-        UUID id6 = UUIDUtils.minTimeUUID( 6 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator keep = new InOrderIterator( 100 );
-        keep.add( id1 );
-        keep.add( id3 );
-        keep.add( id4 );
-
-        InOrderIterator subtract = new InOrderIterator( 100 );
-        subtract.add( id1 );
-        subtract.add( id2 );
-        subtract.add( id3 );
-        subtract.add( id4 );
-        subtract.add( id5 );
-        subtract.add( id6 );
-
-        SubtractionIterator sub = new SubtractionIterator( 100 );
-        sub.setKeepIterator( keep );
-        sub.setSubtractIterator( subtract );
-
-        // now make sure it's right, only 2 and 8 aren't intersected
-
-        assertFalse( sub.hasNext() );
-    }
-
-
-    @Test
-    public void noKeep() {
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator keep = new InOrderIterator( 100 );
-
-        InOrderIterator subtract = new InOrderIterator( 100 );
-        subtract.add( id1 );
-
-        SubtractionIterator sub = new SubtractionIterator( 100 );
-        sub.setKeepIterator( keep );
-        sub.setSubtractIterator( subtract );
-
-        assertFalse( sub.hasNext() );
-    }
-
-
-    @Test
-    public void noSubtract() {
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-
-        //keep only id 1
-        InOrderIterator keep = new InOrderIterator( 100 );
-        keep.add( id1 );
-
-        InOrderIterator subtract = new InOrderIterator( 100 );
-
-
-        SubtractionIterator sub = new SubtractionIterator( 100 );
-        sub.setKeepIterator( keep );
-        sub.setSubtractIterator( subtract );
-
-        assertTrue( sub.hasNext() );
-        Set<ScanColumn> page = sub.next();
-
-        assertTrue( page.contains( uuidColumn( id1 ) ) );
-        assertEquals( 1, page.size() );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/UnionIteratorTest.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/UnionIteratorTest.java b/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/UnionIteratorTest.java
deleted file mode 100644
index dae5b45..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/UnionIteratorTest.java
+++ /dev/null
@@ -1,467 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.UUID;
-
-import org.junit.Test;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import me.prettyprint.cassandra.serializers.UUIDSerializer;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.apache.usergrid.persistence.query.ir.result.IteratorHelper.uuidColumn;
-
-
-/**
- * @author tnine
- */
-public class UnionIteratorTest {
-
-    @Test
-    public void testMutipleIterators() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id5 = UUIDUtils.minTimeUUID( 5 );
-        UUID id6 = UUIDUtils.minTimeUUID( 6 );
-        UUID id7 = UUIDUtils.minTimeUUID( 7 );
-        UUID id8 = UUIDUtils.minTimeUUID( 8 );
-        UUID id9 = UUIDUtils.minTimeUUID( 9 );
-        UUID id10 = UUIDUtils.minTimeUUID( 10 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator first = new InOrderIterator( 100 );
-        first.add( id1 );
-        first.add( id2 );
-        first.add( id3 );
-        first.add( id8 );
-        first.add( id9 );
-
-        InOrderIterator second = new InOrderIterator( 100 );
-        second.add( id1 );
-        second.add( id2 );
-        second.add( id3 );
-        second.add( id4 );
-        second.add( id8 );
-        second.add( id10 );
-
-        InOrderIterator third = new InOrderIterator( 100 );
-        third.add( id6 );
-        third.add( id7 );
-        third.add( id1 );
-        third.add( id3 );
-        third.add( id5 );
-        third.add( id8 );
-
-        InOrderIterator fourth = new InOrderIterator( 100 );
-        fourth.add( id1 );
-        fourth.add( id6 );
-        fourth.add( id2 );
-        fourth.add( id3 );
-        fourth.add( id8 );
-        fourth.add( id9 );
-
-
-        UnionIterator iter = new UnionIterator( 100, 0, null );
-        iter.addIterator( first );
-        iter.addIterator( second );
-        iter.addIterator( third );
-        iter.addIterator( fourth );
-
-        Set<ScanColumn> union = iter.next();
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( union.contains( uuidColumn( id1 ) ) );
-        assertTrue( union.contains( uuidColumn( id2 ) ) );
-        assertTrue( union.contains( uuidColumn( id3 ) ) );
-        assertTrue( union.contains( uuidColumn( id4 ) ) );
-        assertTrue( union.contains( uuidColumn( id5 ) ) );
-        assertTrue( union.contains( uuidColumn( id6 ) ) );
-        assertTrue( union.contains( uuidColumn( id7 ) ) );
-        assertTrue( union.contains( uuidColumn( id8 ) ) );
-        assertTrue( union.contains( uuidColumn( id9 ) ) );
-        assertTrue( union.contains( uuidColumn( id10 ) ) );
-    }
-
-
-    @Test
-    public void testOneIterator() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator first = new InOrderIterator( 100 );
-        first.add( id1 );
-        first.add( id2 );
-        first.add( id3 );
-        first.add( id4 );
-
-        UnionIterator union = new UnionIterator( 100, 0, null );
-        union.addIterator( first );
-
-        Set<ScanColumn> ids = union.next();
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( ids.contains( uuidColumn( id1 ) ) );
-        assertTrue( ids.contains( uuidColumn( id2 ) ) );
-        assertTrue( ids.contains( uuidColumn( id3 ) ) );
-        assertTrue( ids.contains( uuidColumn( id4 ) ) );
-
-        assertFalse( union.hasNext() );
-    }
-
-
-    @Test
-    public void testEmptyFirstIterator() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-
-        // we should get intersection on 1, 3, and 8
-        InOrderIterator first = new InOrderIterator( 100 );
-
-        InOrderIterator second = new InOrderIterator( 100 );
-        second.add( id1 );
-        second.add( id2 );
-        second.add( id3 );
-        second.add( id4 );
-
-        UnionIterator union = new UnionIterator( 100, 0, null );
-        union.addIterator( first );
-        union.addIterator( second );
-
-        Set<ScanColumn> ids = union.next();
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( ids.contains( uuidColumn( id1 ) ) );
-        assertTrue( ids.contains( uuidColumn( id2 ) ) );
-        assertTrue( ids.contains( uuidColumn( id3 ) ) );
-        assertTrue( ids.contains( uuidColumn( id4 ) ) );
-
-        assertFalse( union.hasNext() );
-    }
-
-
-    @Test
-    public void testNoIterator() {
-
-        UnionIterator union = new UnionIterator( 100, 0, null );
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertFalse( union.hasNext() );
-    }
-
-
-    @Test
-    public void largeUnionTest() {
-
-        int size = 10000;
-        int firstIntersection = 100;
-        int secondIntersection = 200;
-
-        int pageSize = 20;
-
-        UUID[] firstSet = new UUID[size];
-        UUID[] secondSet = new UUID[size];
-        UUID[] thirdSet = new UUID[size];
-
-        InOrderIterator first = new InOrderIterator( pageSize / 2 );
-        InOrderIterator second = new InOrderIterator( pageSize / 2 );
-        InOrderIterator third = new InOrderIterator( pageSize / 2 );
-
-        Set<UUID> results = new HashSet<UUID>( size );
-
-        for ( int i = 0; i < size; i++ ) {
-            firstSet[i] = UUIDUtils.newTimeUUID();
-            // every 100 elements, set the element equal to the first set. This way we
-            // have intersection
-
-            results.add( firstSet[i] );
-
-            if ( i % firstIntersection == 0 ) {
-                secondSet[i] = firstSet[i];
-            }
-            else {
-                secondSet[i] = UUIDUtils.newTimeUUID();
-                results.add( secondSet[i] );
-            }
-
-            if ( i % secondIntersection == 0 ) {
-                thirdSet[i] = firstSet[i];
-            }
-
-            else {
-                thirdSet[i] = UUIDUtils.newTimeUUID();
-                results.add( thirdSet[i] );
-            }
-        }
-
-        first.add( firstSet );
-
-        reverse( secondSet );
-        // reverse the second
-        second.add( secondSet );
-        third.add( thirdSet );
-
-        // now intersect them and make sure we get all results in a small set
-        UnionIterator union = new UnionIterator( pageSize, 0, null );
-        union.addIterator( first );
-        union.addIterator( second );
-        union.addIterator( third );
-
-
-        while ( union.hasNext() ) {
-
-            // now get the 2nd page
-            Set<ScanColumn> resultSet = union.next();
-
-            for ( ScanColumn col : resultSet ) {
-                boolean existed = results.remove( col.getUUID() );
-
-                assertTrue( "Duplicate element was detected", existed );
-            }
-        }
-
-        assertEquals( 0, results.size() );
-        assertFalse( union.hasNext() );
-    }
-
-
-    @Test
-    public void iterationCompleted() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id5 = UUIDUtils.minTimeUUID( 5 );
-
-
-        UnionIterator union = new UnionIterator( 5, 0, null );
-
-        InOrderIterator first = new InOrderIterator( 100 );
-
-        InOrderIterator second = new InOrderIterator( 100 );
-        second.add( id1 );
-        second.add( id2 );
-        second.add( id3 );
-        second.add( id4 );
-        second.add( id5 );
-
-        union.addIterator( first );
-        union.addIterator( second );
-
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( union.hasNext() );
-
-        Set<ScanColumn> ids = union.next();
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( ids.contains( uuidColumn( id1 ) ) );
-        assertTrue( ids.contains( uuidColumn( id2 ) ) );
-        assertTrue( ids.contains( uuidColumn( id3 ) ) );
-        assertTrue( ids.contains( uuidColumn( id4 ) ) );
-        assertTrue( ids.contains( uuidColumn( id5 ) ) );
-
-        //now try to get the next page
-        ids = union.next();
-        assertNull( ids );
-    }
-
-
-    @Test
-    public void nullCursorBytes() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id5 = UUIDUtils.minTimeUUID( 5 );
-
-
-        InOrderIterator second = new InOrderIterator( 100 );
-        second.add( id1 );
-        second.add( id2 );
-        second.add( id3 );
-        second.add( id4 );
-        second.add( id5 );
-
-        UnionIterator union = new UnionIterator( 100, 1, null );
-
-        union.addIterator( second );
-
-        Set<ScanColumn> ids = union.next();
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( ids.contains( uuidColumn( id1 ) ) );
-        assertTrue( ids.contains( uuidColumn( id2 ) ) );
-        assertTrue( ids.contains( uuidColumn( id3 ) ) );
-        assertTrue( ids.contains( uuidColumn( id4 ) ) );
-        assertTrue( ids.contains( uuidColumn( id5 ) ) );
-    }
-
-
-    @Test
-    public void validCursorBytes() {
-
-
-        ByteBuffer cursor = UUIDSerializer.get().toByteBuffer( UUIDUtils.minTimeUUID( 4 ) );
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id5 = UUIDUtils.minTimeUUID( 5 );
-
-
-        InOrderIterator second = new InOrderIterator( 100 );
-        second.add( id1 );
-        second.add( id2 );
-        second.add( id3 );
-        second.add( id4 );
-        second.add( id5 );
-
-        UnionIterator union = new UnionIterator( 100, 1, cursor );
-
-        union.addIterator( second );
-
-        Set<ScanColumn> ids = union.next();
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertFalse( ids.contains( uuidColumn( id1 ) ) );
-        assertFalse( ids.contains( uuidColumn( id2 ) ) );
-        assertFalse( ids.contains( uuidColumn( id3 ) ) );
-        assertFalse( ids.contains( uuidColumn( id4 ) ) );
-        assertTrue( ids.contains( uuidColumn( id5 ) ) );
-    }
-
-
-    @Test
-    public void resetCorrect() {
-
-        UUID id1 = UUIDUtils.minTimeUUID( 1 );
-        UUID id2 = UUIDUtils.minTimeUUID( 2 );
-        UUID id3 = UUIDUtils.minTimeUUID( 3 );
-        UUID id4 = UUIDUtils.minTimeUUID( 4 );
-        UUID id5 = UUIDUtils.minTimeUUID( 5 );
-        UUID id6 = UUIDUtils.minTimeUUID( 6 );
-        UUID id7 = UUIDUtils.minTimeUUID( 75 );
-
-
-        UnionIterator union = new UnionIterator( 5, 0, null );
-
-        InOrderIterator first = new InOrderIterator( 100 );
-        first.add( id3 );
-        first.add( id6 );
-        first.add( id4 );
-
-
-        InOrderIterator second = new InOrderIterator( 100 );
-        second.add( id7 );
-        second.add( id1 );
-        second.add( id2 );
-        second.add( id5 );
-
-
-        union.addIterator( first );
-        union.addIterator( second );
-
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( union.hasNext() );
-
-        Set<ScanColumn> ids = union.next();
-
-
-        assertEquals(5, ids.size());
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( ids.contains( uuidColumn( id1 ) ) );
-        assertTrue( ids.contains( uuidColumn( id2 ) ) );
-        assertTrue( ids.contains( uuidColumn( id3 ) ) );
-        assertTrue( ids.contains( uuidColumn( id4 ) ) );
-        assertTrue( ids.contains( uuidColumn( id5 ) ) );
-
-        ids = union.next();
-
-
-        assertEquals(2, ids.size());
-
-        assertTrue( ids.contains( uuidColumn( id6 ) ) );
-        assertTrue( ids.contains( uuidColumn( id7 ) ) );
-
-        //now try to get the next page
-        ids = union.next();
-        assertNull( ids );
-
-        //now reset and re-test
-        union.reset();
-
-        ids = union.next();
-
-        assertEquals(5, ids.size());
-
-
-        // now make sure it's right, only 1, 3 and 8 intersect
-        assertTrue( ids.contains( uuidColumn( id1 ) ) );
-        assertTrue( ids.contains( uuidColumn( id2 ) ) );
-        assertTrue( ids.contains( uuidColumn( id3 ) ) );
-        assertTrue( ids.contains( uuidColumn( id4 ) ) );
-        assertTrue( ids.contains( uuidColumn( id5 ) ) );
-
-
-        ids = union.next();
-
-        assertEquals(2, ids.size());
-
-        assertTrue( ids.contains( uuidColumn( id6 ) ) );
-        assertTrue( ids.contains( uuidColumn( id7 ) ) );
-
-
-        //now try to get the next page
-        ids = union.next();
-        assertNull( ids );
-    }
-
-
-    private void reverse( UUID[] array ) {
-
-        UUID temp = null;
-
-        for ( int i = 0; i < array.length / 2; i++ ) {
-            temp = array[i];
-            array[i] = array[array.length - i - 1];
-            array[array.length - i - 1] = temp;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/resources/usergrid-test-context.xml
----------------------------------------------------------------------
diff --git a/stack/core/src/test/resources/usergrid-test-context.xml b/stack/core/src/test/resources/usergrid-test-context.xml
index 8c246d5..69b0a23 100644
--- a/stack/core/src/test/resources/usergrid-test-context.xml
+++ b/stack/core/src/test/resources/usergrid-test-context.xml
@@ -26,7 +26,7 @@
 	http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util-3.1.xsd
 	http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.1.xsd">
 
-	
+
     <!--  configure our test properties -->
     <bean id="properties"
           class="org.springframework.beans.factory.config.PropertiesFactoryBean">
@@ -44,8 +44,7 @@
 
     <import resource="classpath:/usergrid-core-context.xml"/>
 
-    <bean id="setup" class="org.apache.usergrid.corepersistence.HybridSetup">
-        <constructor-arg ref="properties"/>
+    <bean id="setup" class="org.apache.usergrid.corepersistence.CpSetup">
         <constructor-arg ref="entityManagerFactory"/>
         <constructor-arg ref="cassandraService"/>
     </bean>

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/pom.xml
----------------------------------------------------------------------
diff --git a/stack/pom.xml b/stack/pom.xml
index 1f5348d..c574be1 100644
--- a/stack/pom.xml
+++ b/stack/pom.xml
@@ -209,7 +209,8 @@
     https://issues.apache.org/jira/browse/USERGRID-224
     <module>launcher</module>
     -->
-    <module>mongo-emulator</module>
+    <!-- rebuild after EM/RM refactor -->
+    <!--<module>mongo-emulator</module>-->
     <!--
     Re-enable when query-validator updated to work with Core Persistence.
     https://issues.apache.org/jira/browse/USERGRID-221

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/query-validator/src/test/resources/usergrid-test-context.xml
----------------------------------------------------------------------
diff --git a/stack/query-validator/src/test/resources/usergrid-test-context.xml b/stack/query-validator/src/test/resources/usergrid-test-context.xml
index 03bb2e5..b34d1a3 100644
--- a/stack/query-validator/src/test/resources/usergrid-test-context.xml
+++ b/stack/query-validator/src/test/resources/usergrid-test-context.xml
@@ -46,8 +46,7 @@
 
     <bean id="binaryStore" class="org.apache.usergrid.services.assets.data.LocalFileBinaryStore"/>
 
-    <bean id="setup" class="org.apache.usergrid.corepersistence.HybridSetup">
-        <constructor-arg ref="properties"/>
+    <bean id="setup" class="org.apache.usergrid.corepersistence.CpSetup">
         <constructor-arg ref="entityManagerFactory"/>
         <constructor-arg ref="cassandraService"/>
     </bean>


[05/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/SetupImpl.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/SetupImpl.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/SetupImpl.java
deleted file mode 100644
index 3716958..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/SetupImpl.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra;
-
-
-import java.util.UUID;
-import me.prettyprint.hector.api.ddl.ComparatorType;
-import static me.prettyprint.hector.api.factory.HFactory.createColumnFamilyDefinition;
-import org.apache.usergrid.mq.cassandra.QueuesCF;
-import org.apache.usergrid.persistence.EntityManagerFactory;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.getCfDefs;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.APPLICATIONS_CF;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.DEFAULT_APPLICATION;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.DEFAULT_ORGANIZATION;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.MANAGEMENT_APPLICATION;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.PRINCIPAL_TOKEN_CF;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.PROPERTIES_CF;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.STATIC_APPLICATION_KEYSPACE;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.SYSTEM_KEYSPACE;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.TOKENS_CF;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.USE_VIRTUAL_KEYSPACES;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.keyspaceForApplication;
-import org.apache.usergrid.persistence.entities.Application;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * Cassandra-specific setup utilities.
- *
- * @author edanuff
- */
-public class SetupImpl implements Setup {
-
-    private static final Logger logger = LoggerFactory.getLogger( SetupImpl.class );
-
-    private final org.apache.usergrid.persistence.EntityManagerFactory emf;
-    private final CassandraService cass;
-
-
-    public SetupImpl( EntityManagerFactory emf, CassandraService cass ) {
-        this.emf = emf;
-        this.cass = cass;
-    }
-
-
-    public synchronized void init() throws Exception {
-        cass.init();
-        setupSystemKeyspace();
-        setupStaticKeyspace();
-        createDefaultApplications();
-    }
-
-
-    public void createDefaultApplications() throws Exception {
-        // TODO unique check?
-        ( ( EntityManagerFactory ) emf ).initializeApplication( 
-                DEFAULT_ORGANIZATION, emf.getDefaultAppId(), DEFAULT_APPLICATION, null );
-
-        ( ( EntityManagerFactory ) emf ).initializeApplication( 
-                DEFAULT_ORGANIZATION, emf.getManagementAppId(), MANAGEMENT_APPLICATION, null );
-    }
-
-
-    /**
-     * Initialize system keyspace.
-     *
-     * @throws Exception the exception
-     */
-    public void setupSystemKeyspace() throws Exception {
-
-        logger.info( "Initialize system keyspace" );
-
-        cass.createColumnFamily( SYSTEM_KEYSPACE, createColumnFamilyDefinition( 
-                SYSTEM_KEYSPACE, APPLICATIONS_CF, ComparatorType.BYTESTYPE ) );
-
-        cass.createColumnFamily( SYSTEM_KEYSPACE, createColumnFamilyDefinition( 
-                SYSTEM_KEYSPACE, PROPERTIES_CF, ComparatorType.BYTESTYPE ) );
-
-        cass.createColumnFamily( SYSTEM_KEYSPACE, createColumnFamilyDefinition( 
-                SYSTEM_KEYSPACE, TOKENS_CF, ComparatorType.BYTESTYPE ) );
-
-        cass.createColumnFamily( SYSTEM_KEYSPACE, createColumnFamilyDefinition( 
-                SYSTEM_KEYSPACE, PRINCIPAL_TOKEN_CF, ComparatorType.UUIDTYPE ) );
-
-        logger.info( "System keyspace initialized" );
-    }
-
-
-    /**
-     * Initialize application keyspace.
-     *
-     * @param applicationId the application id
-     * @param applicationName the application name
-     *
-     * @throws Exception the exception
-     */
-    @Override
-    public void setupApplicationKeyspace( 
-            final UUID applicationId, String applicationName ) throws Exception {
-
-        if ( !USE_VIRTUAL_KEYSPACES ) {
-            String app_keyspace = keyspaceForApplication( applicationId );
-
-            logger.info( "Creating application keyspace " + app_keyspace + " for " 
-                    + applicationName + " application" );
-
-            cass.createColumnFamily( app_keyspace, createColumnFamilyDefinition( 
-                    SYSTEM_KEYSPACE, APPLICATIONS_CF, ComparatorType.BYTESTYPE ) );
-
-            cass.createColumnFamilies( app_keyspace, getCfDefs( ApplicationCF.class, app_keyspace));
-            cass.createColumnFamilies( app_keyspace, getCfDefs( QueuesCF.class, app_keyspace ) );
-        }
-    }
-
-
-    public void setupStaticKeyspace() throws Exception {
-
-        if ( USE_VIRTUAL_KEYSPACES ) {
-
-            logger.info( "Creating static application keyspace " + STATIC_APPLICATION_KEYSPACE );
-
-            cass.createColumnFamily( STATIC_APPLICATION_KEYSPACE,
-                    createColumnFamilyDefinition( STATIC_APPLICATION_KEYSPACE, APPLICATIONS_CF,
-                            ComparatorType.BYTESTYPE ) );
-
-            cass.createColumnFamilies( STATIC_APPLICATION_KEYSPACE,
-                    getCfDefs( ApplicationCF.class, STATIC_APPLICATION_KEYSPACE ) );
-            cass.createColumnFamilies( STATIC_APPLICATION_KEYSPACE,
-                    getCfDefs( QueuesCF.class, STATIC_APPLICATION_KEYSPACE ) );
-        }
-    }
-
-
-    public boolean keyspacesExist() {
-        return cass.checkKeyspacesExist();
-    }
-
-
-    public static void logCFPermissions() {
-        System.out.println( SYSTEM_KEYSPACE + "." + APPLICATIONS_CF + ".<rw>=usergrid" );
-        System.out.println( SYSTEM_KEYSPACE + "." + PROPERTIES_CF + ".<rw>=usergrid" );
-        for ( CFEnum cf : ApplicationCF.values() ) {
-            System.out.println( STATIC_APPLICATION_KEYSPACE + "." + cf + ".<rw>=usergrid" );
-        }
-        for ( CFEnum cf : QueuesCF.values() ) {
-            System.out.println( STATIC_APPLICATION_KEYSPACE + "." + cf + ".<rw>=usergrid" );
-        }
-    }
-
-
-    /** @return staticly constructed reference to the management application */
-    public static Application getManagementApp() {
-        return SystemDefaults.managementApp;
-    }
-
-
-    /** @return statically constructed reference to the default application */
-    public static Application getDefaultApp() {
-        return SystemDefaults.defaultApp;
-    }
-
-
-    static class SystemDefaults {
-
-        private static final Application managementApp = 
-                new Application( EntityManagerFactoryImpl.MANAGEMENT_APPLICATION_ID);
-
-        private static final Application defaultApp = 
-                new Application( EntityManagerFactoryImpl.DEFAULT_APPLICATION_ID);
-
-        static {
-            managementApp.setName( MANAGEMENT_APPLICATION );
-            defaultApp.setName( DEFAULT_APPLICATION );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/ConnectedIndexScanner.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/ConnectedIndexScanner.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/ConnectedIndexScanner.java
deleted file mode 100644
index b412df8..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/ConnectedIndexScanner.java
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra.index;
-
-
-import java.nio.ByteBuffer;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-
-import org.springframework.util.Assert;
-import org.apache.usergrid.persistence.cassandra.CassandraService;
-
-import com.yammer.metrics.annotation.Metered;
-
-import me.prettyprint.hector.api.beans.HColumn;
-
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_COMPOSITE_DICTIONARIES;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-
-
-/**
- * @author tnine
- */
-public class ConnectedIndexScanner implements IndexScanner {
-
-    private final CassandraService cass;
-    private final UUID applicationId;
-    private final boolean reversed;
-    private final int pageSize;
-    private final String dictionaryType;
-    private final UUID entityId;
-    private final Iterator<String> connectionTypes;
-    private final boolean skipFirst;
-
-
-    /**
-     * Pointer to our next start read
-     */
-    private ByteBuffer start;
-
-    /**
-     * Set to the original value to start scanning from
-     */
-    private ByteBuffer scanStart;
-
-    /**
-     * Iterator for our results from the last page load
-     */
-    private LinkedHashSet<HColumn<ByteBuffer, ByteBuffer>> lastResults;
-
-    /**
-     * True if our last load loaded a full page size.
-     */
-    private boolean hasMore = true;
-
-    private String currentConnectionType;
-
-
-    public ConnectedIndexScanner( CassandraService cass, String dictionaryType, UUID applicationId, UUID entityId,
-                                  Iterator<String> connectionTypes, ByteBuffer start, boolean reversed, int pageSize,
-                                  boolean skipFirst ) {
-
-        Assert.notNull( entityId, "Entity id for row key construction must be specified when searching graph indexes" );
-        // create our start and end ranges
-        this.scanStart = start;
-        this.cass = cass;
-        this.applicationId = applicationId;
-        this.entityId = entityId;
-        this.start = scanStart;
-        this.reversed = reversed;
-        this.pageSize = pageSize;
-        this.dictionaryType = dictionaryType;
-        this.connectionTypes = connectionTypes;
-        this.skipFirst = skipFirst;
-
-
-        if ( connectionTypes.hasNext() ) {
-            currentConnectionType = connectionTypes.next();
-        }
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.cassandra.index.IndexScanner#reset()
-     */
-    @Override
-    public void reset() {
-        hasMore = true;
-        start = scanStart;
-    }
-
-
-    /**
-     * Search the collection index using all the buckets for the given collection. Load the next page. Return false if
-     * nothing was loaded, true otherwise
-     */
-
-    public boolean load() throws Exception {
-
-        // nothing left to load
-        if ( !hasMore ) {
-            return false;
-        }
-
-        boolean skipFirst = this.skipFirst && start == scanStart;
-
-        int totalSelectSize = pageSize + 1;
-
-        //we're discarding the first, so increase our total size by 1 since this value will be inclusive in the seek
-        if ( skipFirst ) {
-            totalSelectSize++;
-        }
-
-
-        lastResults = new LinkedHashSet<HColumn<ByteBuffer, ByteBuffer>>();
-
-
-        //cleanup columns for later logic
-        //pointer to the first col we load
-        HColumn<ByteBuffer, ByteBuffer> first = null;
-
-        //pointer to the last column we load
-        HColumn<ByteBuffer, ByteBuffer> last = null;
-
-        //go through each connection type until we exhaust the result sets
-        while ( currentConnectionType != null ) {
-
-            //only load a delta size to get this next page
-            int selectSize = totalSelectSize - lastResults.size();
-
-
-            Object key = key( entityId, dictionaryType, currentConnectionType );
-
-
-            List<HColumn<ByteBuffer, ByteBuffer>> results =
-                    cass.getColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_COMPOSITE_DICTIONARIES, key,
-                            start, null, selectSize, reversed );
-
-            final int resultSize = results.size();
-
-            if(resultSize > 0){
-
-                last = results.get( resultSize -1 );
-
-                if(first == null ){
-                    first = results.get( 0 );
-                }
-            }
-
-            lastResults.addAll( results );
-
-
-            // we loaded a full page, there might be more
-            if ( resultSize == selectSize ) {
-                hasMore = true;
-
-                //we've loaded a full page
-                break;
-            }
-            else {
-
-                //we're done, there's no more connection types and we've loaded all cols for this type.
-                if ( !connectionTypes.hasNext() ) {
-                    hasMore = false;
-                    currentConnectionType = null;
-                    break;
-                }
-
-                //we have more connection types, but we've reached the end of this type,
-                // keep going in the loop to load the next page
-
-                currentConnectionType = connectionTypes.next();
-            }
-        }
-
-        //remove the first element, we need to skip it
-        if ( skipFirst && first != null) {
-            lastResults.remove( first  );
-        }
-
-        if ( hasMore && last != null ) {
-            // set the bytebuffer for the next pass
-            start = last.getName();
-            lastResults.remove( last );
-        }
-
-        return lastResults != null && lastResults.size() > 0;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.lang.Iterable#iterator()
-     */
-    @Override
-    public Iterator<Set<HColumn<ByteBuffer, ByteBuffer>>> iterator() {
-        return this;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#hasNext()
-     */
-    @Override
-    public boolean hasNext() {
-
-        // We've either 1) paged everything we should and have 1 left from our
-        // "next page" pointer
-        // Our currently buffered results don't exist or don't have a next. Try to
-        // load them again if they're less than the page size
-        if ( lastResults == null && hasMore ) {
-            try {
-                return load();
-            }
-            catch ( Exception e ) {
-                throw new RuntimeException( "Error loading next page of indexbucket scanner", e );
-            }
-        }
-
-        return false;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#next()
-     */
-    @Override
-    @Metered( group = "core", name = "IndexBucketScanner_load" )
-    public Set<HColumn<ByteBuffer, ByteBuffer>> next() {
-        Set<HColumn<ByteBuffer, ByteBuffer>> returnVal = lastResults;
-
-        lastResults = null;
-
-        return returnVal;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#remove()
-     */
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException( "You can't remove from a result set, only advance" );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.cassandra.index.IndexScanner#getPageSize()
-     */
-    @Override
-    public int getPageSize() {
-        return pageSize;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexBucketScanner.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexBucketScanner.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexBucketScanner.java
deleted file mode 100644
index b2ca591..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexBucketScanner.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra.index;
-
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NavigableSet;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.IndexBucketLocator;
-import org.apache.usergrid.persistence.IndexBucketLocator.IndexType;
-import org.apache.usergrid.persistence.cassandra.ApplicationCF;
-import org.apache.usergrid.persistence.cassandra.CassandraService;
-
-import com.yammer.metrics.annotation.Metered;
-
-import me.prettyprint.hector.api.beans.HColumn;
-
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-
-
-/**
- * A simple class to make working with index buckets easier. Scans all buckets and merges the results into a single
- * column list to allow easy backwards compatibility with existing code
- *
- * @author tnine
- */
-public class IndexBucketScanner implements IndexScanner {
-
-    private final CassandraService cass;
-    private final IndexBucketLocator indexBucketLocator;
-    private final UUID applicationId;
-    private final Object keyPrefix;
-    private final ApplicationCF columnFamily;
-    private final Object finish;
-    private final boolean reversed;
-    private final int pageSize;
-    private final String[] indexPath;
-    private final IndexType indexType;
-    private final boolean skipFirst;
-
-    /** Pointer to our next start read */
-    private Object start;
-
-    /** Set to the original value to start scanning from */
-    private Object scanStart;
-
-    /** Iterator for our results from the last page load */
-    private TreeSet<HColumn<ByteBuffer, ByteBuffer>> lastResults;
-
-    /** True if our last load loaded a full page size. */
-    private boolean hasMore = true;
-
-
-
-    public IndexBucketScanner( CassandraService cass, IndexBucketLocator locator, ApplicationCF columnFamily,
-                               UUID applicationId, IndexType indexType, Object keyPrefix, Object start, Object finish,
-                               boolean reversed, int pageSize, boolean skipFirst, String... indexPath) {
-        this.cass = cass;
-        this.indexBucketLocator = locator;
-        this.applicationId = applicationId;
-        this.keyPrefix = keyPrefix;
-        this.columnFamily = columnFamily;
-        this.start = start;
-        this.finish = finish;
-        this.reversed = reversed;
-        this.skipFirst = skipFirst;
-
-        //we always add 1 to the page size.  This is because we pop the last column for the next page of results
-        this.pageSize = pageSize+1;
-        this.indexPath = indexPath;
-        this.indexType = indexType;
-        this.scanStart = start;
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.cassandra.index.IndexScanner#reset()
-     */
-    @Override
-    public void reset() {
-        hasMore = true;
-        start = scanStart;
-    }
-
-
-    /**
-     * Search the collection index using all the buckets for the given collection. Load the next page. Return false if
-     * nothing was loaded, true otherwise
-     *
-     * @return True if the data could be loaded
-     */
-
-    public boolean load() throws Exception {
-
-        // nothing left to load
-        if ( !hasMore ) {
-            return false;
-        }
-
-        List<String> keys = indexBucketLocator.getBuckets( applicationId, indexType, indexPath );
-
-        List<Object> cassKeys = new ArrayList<Object>( keys.size() );
-
-        for ( String bucket : keys ) {
-            cassKeys.add( key( keyPrefix, bucket ) );
-        }
-
-        //if we skip the first we need to set the load to page size +2, since we'll discard the first
-        //and start paging at the next entity, otherwise we'll just load the page size we need
-        int selectSize = pageSize;
-
-        //we purposefully use instance equality.  If it's a pointer to the same value, we need to increase by 1
-        //since we'll be skipping the first value
-
-        final boolean firstPageSkipFirst = this.skipFirst &&  start == scanStart;
-
-        if(firstPageSkipFirst){
-            selectSize++;
-        }
-
-        TreeSet<HColumn<ByteBuffer, ByteBuffer>> resultsTree = IndexMultiBucketSetLoader
-                .load( cass, columnFamily, applicationId, cassKeys, start, finish, selectSize, reversed );
-
-        //remove the first element, it's from a cursor value and we don't want to retain it
-
-
-        // we loaded a full page, there might be more
-        if ( resultsTree.size() == selectSize ) {
-            hasMore = true;
-
-
-            // set the bytebuffer for the next pass
-            start = resultsTree.pollLast().getName();
-        }
-        else {
-            hasMore = false;
-        }
-
-        //remove the first element since it needs to be skipped AFTER the size check. Otherwise it will fail
-        if ( firstPageSkipFirst ) {
-            resultsTree.pollFirst();
-        }
-
-        lastResults = resultsTree;
-
-        return lastResults != null && lastResults.size() > 0;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.lang.Iterable#iterator()
-     */
-    @Override
-    public Iterator<Set<HColumn<ByteBuffer, ByteBuffer>>> iterator() {
-        return this;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#hasNext()
-     */
-    @Override
-    public boolean hasNext() {
-
-        // We've either 1) paged everything we should and have 1 left from our
-        // "next page" pointer
-        // Our currently buffered results don't exist or don't have a next. Try to
-        // load them again if they're less than the page size
-        if ( lastResults == null && hasMore ) {
-            try {
-                return load();
-            }
-            catch ( Exception e ) {
-                throw new RuntimeException( "Error loading next page of indexbucket scanner", e );
-            }
-        }
-
-        return false;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#next()
-     */
-    @Override
-    @Metered(group = "core", name = "IndexBucketScanner_load")
-    public NavigableSet<HColumn<ByteBuffer, ByteBuffer>> next() {
-        NavigableSet<HColumn<ByteBuffer, ByteBuffer>> returnVal = lastResults;
-
-        lastResults = null;
-
-        return returnVal;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#remove()
-     */
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException( "You can't remove from a result set, only advance" );
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.cassandra.index.IndexScanner#getPageSize()
-     */
-    @Override
-    public int getPageSize() {
-        return pageSize;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexMultiBucketSetLoader.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexMultiBucketSetLoader.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexMultiBucketSetLoader.java
deleted file mode 100644
index 30b54ba..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexMultiBucketSetLoader.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra.index;
-
-
-import java.nio.ByteBuffer;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeSet;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.cassandra.ApplicationCF;
-import org.apache.usergrid.persistence.cassandra.CassandraService;
-
-import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.TypeParser;
-
-import me.prettyprint.hector.api.beans.HColumn;
-
-
-/** @author tnine */
-public class IndexMultiBucketSetLoader {
-
-
-    /**
-     *
-     */
-    private static final long serialVersionUID = 1L;
-
-
-    /**
-     * Loads and sorts columns from each bucket in memory.  This will return a contiguous set of columns as if they'd
-     * been
-     * read from a single row
-     */
-    public static TreeSet<HColumn<ByteBuffer, ByteBuffer>> load( CassandraService cass, ApplicationCF columnFamily,
-                                                                 UUID applicationId, List<Object> rowKeys, Object start,
-                                                                 Object finish, int resultSize, boolean reversed )
-            throws Exception {
-        Map<ByteBuffer, List<HColumn<ByteBuffer, ByteBuffer>>> results =
-                cass.multiGetColumns( cass.getApplicationKeyspace( applicationId ), columnFamily, rowKeys, start,
-                        finish, resultSize, reversed );
-
-        final Comparator<ByteBuffer> comparator = reversed ? new DynamicCompositeReverseComparator( columnFamily ) :
-                                                  new DynamicCompositeForwardComparator( columnFamily );
-
-        TreeSet<HColumn<ByteBuffer, ByteBuffer>> resultsTree =
-                new TreeSet<HColumn<ByteBuffer, ByteBuffer>>( new Comparator<HColumn<ByteBuffer, ByteBuffer>>() {
-
-                    @Override
-                    public int compare( HColumn<ByteBuffer, ByteBuffer> first,
-                                        HColumn<ByteBuffer, ByteBuffer> second ) {
-
-                        return comparator.compare( first.getName(), second.getName() );
-                    }
-                } );
-
-        for ( List<HColumn<ByteBuffer, ByteBuffer>> cols : results.values() ) {
-
-            for ( HColumn<ByteBuffer, ByteBuffer> col : cols ) {
-                resultsTree.add( col );
-
-                // trim if we're over size
-                if ( resultsTree.size() > resultSize ) {
-                    resultsTree.pollLast();
-                }
-            }
-        }
-
-        return resultsTree;
-    }
-
-
-    private static abstract class DynamicCompositeComparator implements Comparator<ByteBuffer> {
-        @SuppressWarnings("rawtypes")
-        protected final AbstractType dynamicComposite;
-
-
-        protected DynamicCompositeComparator( ApplicationCF cf ) {
-            // should never happen, this will blow up during development if this fails
-            try {
-                dynamicComposite = TypeParser.parse( cf.getComparator() );
-            }
-            catch ( Exception e ) {
-                throw new RuntimeException( e );
-            }
-        }
-    }
-
-
-    private static class DynamicCompositeForwardComparator extends DynamicCompositeComparator {
-
-        /**
-         * @param cf
-         */
-        protected DynamicCompositeForwardComparator( ApplicationCF cf ) {
-            super( cf );
-        }
-
-
-        @SuppressWarnings("unchecked")
-        @Override
-        public int compare( ByteBuffer o1, ByteBuffer o2 ) {
-            return dynamicComposite.compare( o1, o2 );
-        }
-    }
-
-
-    private static class DynamicCompositeReverseComparator extends DynamicCompositeComparator {
-        /**
-         * @param cf
-         */
-        protected DynamicCompositeReverseComparator( ApplicationCF cf ) {
-            super( cf );
-        }
-
-
-        @SuppressWarnings("unchecked")
-        @Override
-        public int compare( ByteBuffer o1, ByteBuffer o2 ) {
-            return dynamicComposite.compare( o2, o1 );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexScanner.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexScanner.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexScanner.java
deleted file mode 100644
index a938ca3..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/IndexScanner.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra.index;
-
-
-import java.nio.ByteBuffer;
-import java.util.Iterator;
-import java.util.Set;
-
-import me.prettyprint.hector.api.beans.HColumn;
-
-
-/**
- * Interface for scanning all index buckets.
- *
- * @author tnine
- */
-public interface IndexScanner
-        extends Iterable<Set<HColumn<ByteBuffer, ByteBuffer>>>, Iterator<Set<HColumn<ByteBuffer, ByteBuffer>>> {
-
-    /** Reset the scanner back to the start */
-    public void reset();
-
-    public int getPageSize();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/NoOpIndexScanner.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/NoOpIndexScanner.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/NoOpIndexScanner.java
deleted file mode 100644
index 3d1b9d7..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/index/NoOpIndexScanner.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra.index;
-
-
-import java.nio.ByteBuffer;
-import java.util.Iterator;
-import java.util.NavigableSet;
-import java.util.Set;
-
-import me.prettyprint.hector.api.beans.HColumn;
-
-
-/**
- * Index scanner that doesn't return anything.  This is used if our cursor has advanced beyond the end of all scannable
- * ranges
- *
- * @author tnine
- */
-public class NoOpIndexScanner implements IndexScanner {
-
-    /**
-     *
-     */
-    public NoOpIndexScanner() {
-    }
-
-
-    /* (non-Javadoc)
-     * @see java.lang.Iterable#iterator()
-     */
-    @Override
-    public Iterator<Set<HColumn<ByteBuffer, ByteBuffer>>> iterator() {
-        return this;
-    }
-
-
-    /* (non-Javadoc)
-     * @see java.util.Iterator#hasNext()
-     */
-    @Override
-    public boolean hasNext() {
-        return false;
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.cassandra.index.IndexScanner#reset()
-     */
-    @Override
-    public void reset() {
-        //no op
-    }
-
-
-    /* (non-Javadoc)
-     * @see java.util.Iterator#next()
-     */
-    @Override
-    public NavigableSet<HColumn<ByteBuffer, ByteBuffer>> next() {
-        return null;
-    }
-
-
-    /* (non-Javadoc)
-     * @see java.util.Iterator#remove()
-     */
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException( "Remove is not supported" );
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.cassandra.index.IndexScanner#getPageSize()
-     */
-    @Override
-    public int getPageSize() {
-        return 0;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/CollectionGeoSearch.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/CollectionGeoSearch.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/CollectionGeoSearch.java
deleted file mode 100644
index c823e20..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/CollectionGeoSearch.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo;
-
-
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.TreeSet;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.IndexBucketLocator;
-import org.apache.usergrid.persistence.cassandra.CassandraService;
-import org.apache.usergrid.persistence.geo.model.Point;
-
-import me.prettyprint.hector.api.beans.HColumn;
-
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-
-
-/**
- * Class for loading collection search data
- *
- * @author tnine
- */
-public class CollectionGeoSearch extends GeoIndexSearcher {
-
-    private final String collectionName;
-    private final EntityRef headEntity;
-
-
-    public CollectionGeoSearch( EntityManager entityManager, IndexBucketLocator locator, CassandraService cass,
-                                EntityRef headEntity, String collectionName ) {
-        super( entityManager, locator, cass );
-        this.collectionName = collectionName;
-        this.headEntity = headEntity;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.result.GeoIterator.GeoIndexSearcher
-     * #doSearch()
-     */
-    @Override
-    protected TreeSet<HColumn<ByteBuffer, ByteBuffer>> doSearch( List<String> geoCells, UUID startId, Point searchPoint,
-                                                                 String propertyName, int pageSize ) throws Exception {
-
-        return query( key( headEntity.getUuid(), collectionName, propertyName ), geoCells, searchPoint, startId,
-                pageSize );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/ConnectionGeoSearch.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/ConnectionGeoSearch.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/ConnectionGeoSearch.java
deleted file mode 100644
index a1ad71e..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/ConnectionGeoSearch.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo;
-
-
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.TreeSet;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.IndexBucketLocator;
-import org.apache.usergrid.persistence.cassandra.CassandraService;
-import org.apache.usergrid.persistence.geo.model.Point;
-
-import me.prettyprint.hector.api.beans.HColumn;
-
-import static org.apache.usergrid.persistence.Schema.INDEX_CONNECTIONS;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-
-
-/**
- * Class for loading connection data
- *
- * @author tnine
- */
-public class ConnectionGeoSearch extends GeoIndexSearcher {
-
-    private final UUID connectionId;
-
-
-    public ConnectionGeoSearch( EntityManager entityManager, IndexBucketLocator locator, CassandraService cass,
-                                UUID connectionId ) {
-        super( entityManager, locator, cass );
-
-        this.connectionId = connectionId;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.usergrid.persistence.query.ir.result.GeoIterator.GeoIndexSearcher
-     * #doSearch()
-     */
-    @Override
-    protected TreeSet<HColumn<ByteBuffer, ByteBuffer>> doSearch( List<String> geoCells, UUID startId, Point searchPoint,
-                                                                 String propertyName, int pageSize ) throws Exception {
-
-        return query( key( connectionId, INDEX_CONNECTIONS, propertyName ), geoCells, searchPoint, startId, pageSize );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/EntityLocationRef.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/EntityLocationRef.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/EntityLocationRef.java
deleted file mode 100644
index 59db1d9..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/EntityLocationRef.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo;
-
-
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.geo.model.Point;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import me.prettyprint.hector.api.beans.DynamicComposite;
-
-import static org.apache.commons.lang.math.NumberUtils.toDouble;
-import static org.apache.usergrid.utils.StringUtils.stringOrSubstringAfterLast;
-import static org.apache.usergrid.utils.StringUtils.stringOrSubstringBeforeFirst;
-
-
-public class EntityLocationRef implements EntityRef {
-
-    private UUID uuid;
-
-    private String type;
-
-    private UUID timestampUuid = UUIDUtils.newTimeUUID();
-
-    private double latitude;
-
-    private double longitude;
-
-    private double distance;
-
-
-    public EntityLocationRef() {
-    }
-
-
-    public EntityLocationRef( EntityRef entity, double latitude, double longitude ) {
-        this( entity.getType(), entity.getUuid(), latitude, longitude );
-    }
-
-
-    public EntityLocationRef( String type, UUID uuid, double latitude, double longitude ) {
-        this.type = type;
-        this.uuid = uuid;
-        this.latitude = latitude;
-        this.longitude = longitude;
-    }
-
-
-    public EntityLocationRef( EntityRef entity, UUID timestampUuid, double latitude, double longitude ) {
-        this( entity.getType(), entity.getUuid(), timestampUuid, latitude, longitude );
-    }
-
-
-    public EntityLocationRef( String type, UUID uuid, UUID timestampUuid, double latitude, double longitude ) {
-        this.type = type;
-        this.uuid = uuid;
-        this.timestampUuid = timestampUuid;
-        this.latitude = latitude;
-        this.longitude = longitude;
-    }
-
-
-    public EntityLocationRef( EntityRef entity, UUID timestampUuid, String coord ) {
-        this.type = entity.getType();
-        this.uuid = entity.getUuid();
-        this.timestampUuid = timestampUuid;
-        this.latitude = toDouble( stringOrSubstringBeforeFirst( coord, ',' ) );
-        this.longitude = toDouble( stringOrSubstringAfterLast( coord, ',' ) );
-    }
-
-
-    @Override
-    public UUID getUuid() {
-        return uuid;
-    }
-
-
-    public void setUuid( UUID uuid ) {
-        this.uuid = uuid;
-    }
-
-
-    @Override
-    public String getType() {
-        return type;
-    }
-
-
-    public void setType( String type ) {
-        this.type = type;
-    }
-
-
-    public UUID getTimestampUuid() {
-        return timestampUuid;
-    }
-
-
-    public void setTimestampUuid( UUID timestampUuid ) {
-        this.timestampUuid = timestampUuid;
-    }
-
-
-    public double getLatitude() {
-        return latitude;
-    }
-
-
-    public void setLatitude( double latitude ) {
-        this.latitude = latitude;
-    }
-
-
-    public double getLongitude() {
-        return longitude;
-    }
-
-
-    public void setLongitude( double longitude ) {
-        this.longitude = longitude;
-    }
-
-
-    public Point getPoint() {
-        return new Point( latitude, longitude );
-    }
-
-
-    public DynamicComposite getColumnName() {
-        return new DynamicComposite( uuid, type, timestampUuid );
-    }
-
-
-    public DynamicComposite getColumnValue() {
-        return new DynamicComposite( latitude, longitude );
-    }
-
-
-    public long getTimestampInMicros() {
-        return UUIDUtils.getTimestampInMicros( timestampUuid );
-    }
-
-
-    public long getTimestampInMillis() {
-        return UUIDUtils.getTimestampInMillis( timestampUuid );
-    }
-
-
-    public double getDistance() {
-        return distance;
-    }
-
-
-    /** Calculate, set and return the distance from this location to the point specified */
-    public double calcDistance( Point point ) {
-        distance = GeocellUtils.distance( getPoint(), point );
-        return distance;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.lang.Object#hashCode()
-     */
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + ( ( type == null ) ? 0 : type.hashCode() );
-        result = prime * result + ( ( uuid == null ) ? 0 : uuid.hashCode() );
-        return result;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.lang.Object#equals(java.lang.Object)
-     */
-    @Override
-    public boolean equals( Object obj ) {
-        if ( this == obj ) {
-            return true;
-        }
-        if ( obj == null ) {
-            return false;
-        }
-        if ( getClass() != obj.getClass() ) {
-            return false;
-        }
-        EntityLocationRef other = ( EntityLocationRef ) obj;
-        if ( type == null ) {
-            if ( other.type != null ) {
-                return false;
-            }
-        }
-        else if ( !type.equals( other.type ) ) {
-            return false;
-        }
-        if ( uuid == null ) {
-            if ( other.uuid != null ) {
-                return false;
-            }
-        }
-        else if ( !uuid.equals( other.uuid ) ) {
-            return false;
-        }
-        return true;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/EntityLocationRefDistanceComparator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/EntityLocationRefDistanceComparator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/EntityLocationRefDistanceComparator.java
deleted file mode 100644
index f002b9d..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/EntityLocationRefDistanceComparator.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo;
-
-
-import java.util.Comparator;
-
-import org.apache.usergrid.utils.UUIDUtils;
-
-
-/**
- * Compares 2 entity location refs by distance.  The one with the larger distance is considered greater than one with a
- * smaller distance.  If the distances are the same they time uuids are compared based on the UUIDUtils.compare for time
- * uuids.  The one with a larger time is considered greater
- *
- * @author tnine
- */
-public class EntityLocationRefDistanceComparator implements Comparator<EntityLocationRef> {
-
-    /**
-     *
-     */
-    public EntityLocationRefDistanceComparator() {
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object)
-     */
-    @Override
-    public int compare( EntityLocationRef o1, EntityLocationRef o2 ) {
-
-        if ( o1 == null ) {
-
-            //second is not null
-            if ( o2 != null ) {
-                return 1;
-            }
-            //both null
-            return 0;
-        }
-        //second is null, first isn't
-        else if ( o2 == null ) {
-            return -1;
-        }
-
-        double o1Distance = o1.getDistance();
-        double o2Distance = o2.getDistance();
-
-
-        int doubleCompare = Double.compare( o1Distance, o2Distance );
-
-
-        //    int doubleCompare = Double.compare(o1.getDistance(), o2.getDistance());
-
-        if ( doubleCompare != 0 ) {
-            return doubleCompare;
-        }
-
-        return UUIDUtils.compare( o1.getUuid(), o2.getUuid() );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeoIndexSearcher.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeoIndexSearcher.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeoIndexSearcher.java
deleted file mode 100644
index 4bc160d..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeoIndexSearcher.java
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo;
-
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.IndexBucketLocator;
-import org.apache.usergrid.persistence.IndexBucketLocator.IndexType;
-import org.apache.usergrid.persistence.cassandra.CassandraService;
-import org.apache.usergrid.persistence.cassandra.GeoIndexManager;
-import org.apache.usergrid.persistence.cassandra.index.IndexMultiBucketSetLoader;
-import org.apache.usergrid.persistence.geo.model.Point;
-import org.apache.usergrid.persistence.geo.model.Tuple;
-
-import org.apache.commons.lang.StringUtils;
-
-import me.prettyprint.hector.api.beans.AbstractComposite.ComponentEquality;
-import me.prettyprint.hector.api.beans.DynamicComposite;
-import me.prettyprint.hector.api.beans.HColumn;
-
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_GEOCELL;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_INDEX;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-import static org.apache.usergrid.utils.CompositeUtils.setEqualityFlag;
-import static org.apache.usergrid.persistence.cassandra.Serializers.*;
-
-public abstract class GeoIndexSearcher {
-
-    private static final Logger logger = LoggerFactory.getLogger( GeoIndexSearcher.class );
-
-    private static final EntityLocationRefDistanceComparator COMP = new EntityLocationRefDistanceComparator();
-
-    // The maximum *practical* geocell resolution.
-    private static final int MAX_GEOCELL_RESOLUTION = GeoIndexManager.MAX_RESOLUTION;
-
-    /** Max number of records to read+parse from cass per tile */
-    private static final int MAX_FETCH_SIZE = 1000;
-
-    protected final EntityManager em;
-    protected final IndexBucketLocator locator;
-    protected final CassandraService cass;
-
-    public GeoIndexSearcher( EntityManager entityManager, IndexBucketLocator locator, CassandraService cass ) {
-        this.em = entityManager;
-        this.locator = locator;
-        this.cass = cass;
-    }
-
-
-    /**
-     * Perform a search from the center. The corresponding entities returned must be >= minDistance(inclusive) and <
-     * maxDistance (exclusive)
-     *
-     * @param maxResults The maximum number of results to include
-     * @param minDistance The minimum distance (inclusive)
-     * @param maxDistance The maximum distance (exclusive)
-     * @param entityClass The entity class
-     * @param baseQuery The base query
-     * @param queryEngine The query engine to use
-     * @param maxGeocellResolution The max resolution to use when searching
-     */
-    public final SearchResults proximitySearch( final EntityLocationRef minMatch, final List<String> geoCells,
-                                                Point searchPoint, String propertyName, double minDistance,
-                                                double maxDistance, final int maxResults ) throws Exception {
-
-        List<EntityLocationRef> entityLocations = new ArrayList<EntityLocationRef>( maxResults );
-
-        List<String> curGeocells = new ArrayList<String>();
-        String curContainingGeocell = null;
-
-        // we have some cells used from last time, re-use them
-        if ( geoCells != null && geoCells.size() > 0 ) {
-            curGeocells.addAll( geoCells );
-            curContainingGeocell = geoCells.get( 0 );
-        }
-        // start at the bottom
-        else {
-
-      /*
-       * The currently-being-searched geocells. NOTES: Start with max possible.
-       * Must always be of the same resolution. Must always form a rectangular
-       * region. One of these must be equal to the cur_containing_geocell.
-       */
-            curContainingGeocell = GeocellUtils.compute( searchPoint, MAX_GEOCELL_RESOLUTION );
-            curGeocells.add( curContainingGeocell );
-        }
-
-        if ( minMatch != null ) {
-            minMatch.calcDistance( searchPoint );
-        }
-        // Set of already searched cells
-        Set<String> searchedCells = new HashSet<String>();
-
-        List<String> curGeocellsUnique = null;
-
-        double closestPossibleNextResultDist = 0;
-
-    /*
-     * Assumes both a and b are lists of (entity, dist) tuples, *sorted by
-     * dist*. NOTE: This is an in-place merge, and there are guaranteed no
-     * duplicates in the resulting list.
-     */
-
-        int noDirection[] = { 0, 0 };
-        List<Tuple<int[], Double>> sortedEdgesDistances = Arrays.asList( new Tuple<int[], Double>( noDirection, 0d ) );
-        boolean done = false;
-        UUID lastReturned = null;
-
-        while ( !curGeocells.isEmpty() && entityLocations.size() < maxResults ) {
-            closestPossibleNextResultDist = sortedEdgesDistances.get( 0 ).getSecond();
-            if ( maxDistance > 0 && closestPossibleNextResultDist > maxDistance ) {
-                break;
-            }
-
-            Set<String> curTempUnique = new HashSet<String>( curGeocells );
-            curTempUnique.removeAll( searchedCells );
-            curGeocellsUnique = new ArrayList<String>( curTempUnique );
-
-            Set<HColumn<ByteBuffer, ByteBuffer>> queryResults = null;
-
-            lastReturned = null;
-
-            // we need to keep searching everything in our tiles until we don't get
-            // any more results, then we'll have the closest points and can move on
-            // do the next tiles
-            do {
-                queryResults = doSearch( curGeocellsUnique, lastReturned, searchPoint, propertyName, MAX_FETCH_SIZE );
-
-                if ( logger.isDebugEnabled() ) {
-                    logger.debug( "fetch complete for: {}", StringUtils.join( curGeocellsUnique, ", " ) );
-                }
-
-                searchedCells.addAll( curGeocells );
-
-                // Begin storing distance from the search result entity to the
-                // search center along with the search result itself, in a tuple.
-
-                // Merge new_results into results
-                for ( HColumn<ByteBuffer, ByteBuffer> column : queryResults ) {
-
-                    DynamicComposite composite = DynamicComposite.fromByteBuffer( column.getName() );
-
-                    UUID uuid = composite.get( 0, ue );
-
-                    lastReturned = uuid;
-
-                    String type = composite.get( 1, se );
-                    UUID timestampUuid = composite.get( 2, ue );
-                    composite = DynamicComposite.fromByteBuffer( column.getValue() );
-                    Double latitude = composite.get( 0, de );
-                    Double longitude = composite.get( 1, de );
-
-                    EntityLocationRef entityLocation =
-                            new EntityLocationRef( type, uuid, timestampUuid, latitude, longitude );
-
-                    double distance = entityLocation.calcDistance( searchPoint );
-
-                    // discard, it's too close or too far, of closer than the minimum we
-                    // should match, skip it
-                    if ( distance < minDistance || ( maxDistance != 0 && distance > maxDistance ) || ( minMatch != null
-                            && COMP.compare( entityLocation, minMatch ) <= 0 ) ) {
-                        continue;
-                    }
-
-                    int index = Collections.binarySearch( entityLocations, entityLocation, COMP );
-
-                    // already in the index
-                    if ( index > -1 ) {
-                        continue;
-                    }
-
-                    // set the insert index
-                    index = ( index + 1 ) * -1;
-
-                    // no point in adding it
-                    if ( index >= maxResults ) {
-                        continue;
-                    }
-
-                    // results.add(index, entity);
-                    // distances.add(index, distance);
-                    entityLocations.add( index, entityLocation );
-
-                    /**
-                     * Discard an additional entries as we iterate to avoid holding them
-                     * all in ram
-                     */
-                    while ( entityLocations.size() > maxResults ) {
-                        entityLocations.remove( entityLocations.size() - 1 );
-                    }
-                }
-            }
-            while ( queryResults != null && queryResults.size() == MAX_FETCH_SIZE );
-
-            /**
-             * We've searched everything and have a full set, we want to return the
-             * "current" tiles to search next time for the cursor, since cass could
-             * contain more results
-             */
-            if ( done || entityLocations.size() == maxResults ) {
-                break;
-            }
-
-            sortedEdgesDistances = GeocellUtils.distanceSortedEdges( curGeocells, searchPoint );
-
-            if ( queryResults.size() == 0 || curGeocells.size() == 4 ) {
-        /*
-         * Either no results (in which case we optimize by not looking at
-         * adjacents, go straight to the parent) or we've searched 4 adjacent
-         * geocells, in which case we should now search the parents of those
-         * geocells.
-         */
-                curContainingGeocell =
-                        curContainingGeocell.substring( 0, Math.max( curContainingGeocell.length() - 1, 0 ) );
-                if ( curContainingGeocell.length() == 0 ) {
-                    // final check - top level tiles
-                    curGeocells.clear();
-                    String[] items = "0123456789abcdef".split( "(?!^)" );
-                    Collections.addAll(curGeocells, items);
-                    done = true;
-                }
-                else {
-                    List<String> oldCurGeocells = new ArrayList<String>( curGeocells );
-                    curGeocells.clear();
-                    for ( String cell : oldCurGeocells ) {
-                        if ( cell.length() > 0 ) {
-                            String newCell = cell.substring( 0, cell.length() - 1 );
-                            if ( !curGeocells.contains( newCell ) ) {
-                                curGeocells.add( newCell );
-                            }
-                        }
-                    }
-                }
-            }
-            else if ( curGeocells.size() == 1 ) {
-                // Get adjacent in one direction.
-                // TODO(romannurik): Watch for +/- 90 degree latitude edge case
-                // geocells.
-                for (Tuple<int[], Double> sortedEdgesDistance : sortedEdgesDistances) {
-
-                    int nearestEdge[] = sortedEdgesDistance.getFirst();
-                    String edge = GeocellUtils.adjacent(curGeocells.get(0), nearestEdge);
-
-                    // we're at the edge of the world, search in a different direction
-                    if (edge == null) {
-                        continue;
-                    }
-
-                    curGeocells.add(edge);
-                    break;
-                }
-            }
-            else if ( curGeocells.size() == 2 ) {
-                // Get adjacents in perpendicular direction.
-                int nearestEdge[] =
-                        GeocellUtils.distanceSortedEdges( Arrays.asList( curContainingGeocell ), searchPoint ).get( 0 )
-                                    .getFirst();
-                int[] perpendicularNearestEdge = { 0, 0 };
-                if ( nearestEdge[0] == 0 ) {
-                    // Was vertical, perpendicular is horizontal.
-                    for ( Tuple<int[], Double> edgeDistance : sortedEdgesDistances ) {
-                        if ( edgeDistance.getFirst()[0] != 0 ) {
-                            perpendicularNearestEdge = edgeDistance.getFirst();
-                            break;
-                        }
-                    }
-                }
-                else {
-                    // Was horizontal, perpendicular is vertical.
-                    for ( Tuple<int[], Double> edgeDistance : sortedEdgesDistances ) {
-                        if ( edgeDistance.getFirst()[0] == 0 ) {
-                            perpendicularNearestEdge = edgeDistance.getFirst();
-                            break;
-                        }
-                    }
-                }
-                List<String> tempCells = new ArrayList<String>();
-                for ( String cell : curGeocells ) {
-                    tempCells.add( GeocellUtils.adjacent( cell, perpendicularNearestEdge ) );
-                }
-                curGeocells.addAll( tempCells );
-            }
-
-            logger.debug( "{} results found.", entityLocations.size() );
-        }
-
-        // now we have our final sets, construct the results
-
-        return new SearchResults( entityLocations, curGeocells );
-    }
-
-
-    protected TreeSet<HColumn<ByteBuffer, ByteBuffer>> query( Object key, List<String> curGeocellsUnique,
-                                                              Point searchPoint, UUID startId, int count )
-            throws Exception {
-
-        List<Object> keys = new ArrayList<Object>();
-
-        UUID appId = em.getApplicationRef().getUuid();
-
-        for ( String geoCell : curGeocellsUnique ) {
-
-            // add buckets for each geoCell
-
-            for ( String indexBucket : locator.getBuckets( appId, IndexType.GEO, geoCell ) ) {
-                keys.add( key( key, DICTIONARY_GEOCELL, geoCell, indexBucket ) );
-            }
-        }
-
-        DynamicComposite start = null;
-
-        if ( startId != null ) {
-            start = new DynamicComposite( startId );
-            setEqualityFlag( start, ComponentEquality.GREATER_THAN_EQUAL );
-        }
-
-        TreeSet<HColumn<ByteBuffer, ByteBuffer>> columns =
-                IndexMultiBucketSetLoader.load( cass, ENTITY_INDEX, appId, keys, start, null, count, false );
-
-        return columns;
-    }
-
-
-    protected abstract TreeSet<HColumn<ByteBuffer, ByteBuffer>> doSearch( List<String> geoCells, UUID startId,
-                                                                          Point searchPoint, String propertyName,
-                                                                          int pageSize ) throws Exception;
-
-
-    public static class SearchResults {
-
-        public final List<EntityLocationRef> entityLocations;
-        public final List<String> lastSearchedGeoCells;
-
-
-        /**
-         * @param entityLocations
-         * @param lastSearchedGeoCells
-         */
-        public SearchResults( List<EntityLocationRef> entityLocations, List<String> lastSearchedGeoCells ) {
-            this.entityLocations = entityLocations;
-            this.lastSearchedGeoCells = lastSearchedGeoCells;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeocellManager.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeocellManager.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeocellManager.java
deleted file mode 100644
index 074f731..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeocellManager.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo;
-
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.usergrid.persistence.cassandra.GeoIndexManager;
-import org.apache.usergrid.persistence.geo.model.BoundingBox;
-import org.apache.usergrid.persistence.geo.model.CostFunction;
-import org.apache.usergrid.persistence.geo.model.DefaultCostFunction;
-import org.apache.usergrid.persistence.geo.model.Point;
-
-
-/**
- #
- # Copyright 2010 Alexandre Gellibert
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- #     http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- */
-
-
-/**
- * Ported java version of python geocell: http://code.google.com/p/geomodel/source/browse/trunk/geo/geocell.py
- * <p/>
- * Defines the notion of 'geocells' and exposes methods to operate on them.
- * <p/>
- * A geocell is a hexadecimal string that defines a two dimensional rectangular region inside the [-90,90] x [-180,180]
- * latitude/longitude space. A geocell's 'resolution' is its length. For most practical purposes, at high resolutions,
- * geocells can be treated as single points.
- * <p/>
- * Much like geohashes (see http://en.wikipedia.org/wiki/Geohash), geocells are hierarchical, in that any prefix of a
- * geocell is considered its ancestor, with geocell[:-1] being geocell's immediate parent cell.
- * <p/>
- * To calculate the rectangle of a given geocell string, first divide the [-90,90] x [-180,180] latitude/longitude space
- * evenly into a 4x4 grid like so:
- * <p/>
- * +---+---+---+---+ (90, 180) | a | b | e | f | +---+---+---+---+ | 8 | 9 | c | d | +---+---+---+---+ | 2 | 3 | 6 | 7 |
- * +---+---+---+---+ | 0 | 1 | 4 | 5 | (-90,-180) +---+---+---+---+
- * <p/>
- * NOTE: The point (0, 0) is at the intersection of grid cells 3, 6, 9 and c. And, for example, cell 7 should be the
- * sub-rectangle from (-45, 90) to (0, 180).
- * <p/>
- * Calculate the sub-rectangle for the first character of the geocell string and re-divide this sub-rectangle into
- * another 4x4 grid. For example, if the geocell string is '78a', we will re-divide the sub-rectangle like so:
- * <p/>
- * .                   . .                   . . . +----+----+----+----+ (0, 180) | 7a | 7b | 7e | 7f |
- * +----+----+----+----+ | 78 | 79 | 7c | 7d | +----+----+----+----+ | 72 | 73 | 76 | 77 | +----+----+----+----+ | 70 |
- * 71 | 74 | 75 | . . (-45,90) +----+----+----+----+ .                   . .                   .
- * <p/>
- * Continue to re-divide into sub-rectangles and 4x4 grids until the entire geocell string has been exhausted. The final
- * sub-rectangle is the rectangular region for the geocell.
- *
- * @author api.roman.public@gmail.com (Roman Nurik)
- * @author (java portage) Alexandre Gellibert
- */
-
-public class GeocellManager {
-
-    // The maximum *practical* geocell resolution.
-    public static final int MAX_GEOCELL_RESOLUTION = GeoIndexManager.MAX_RESOLUTION;
-
-    // The maximum number of geocells to consider for a bounding box search.
-    private static final int MAX_FEASIBLE_BBOX_SEARCH_CELLS = 300;
-
-    // Function used if no custom function is used in bestBboxSearchCells method
-    private static final CostFunction DEFAULT_COST_FUNCTION = new DefaultCostFunction();
-
-    //    private static final Logger logger = GeocellLogger.get();
-
-
-    /**
-     * Returns the list of geocells (all resolutions) that are containing the point
-     *
-     * @return Returns the list of geocells (all resolutions) that are containing the point
-     */
-    public static List<String> generateGeoCell( Point point ) {
-        List<String> geocells = new ArrayList<String>();
-        String geocellMax = GeocellUtils.compute( point, GeocellManager.MAX_GEOCELL_RESOLUTION );
-        for ( int i = 1; i < GeocellManager.MAX_GEOCELL_RESOLUTION; i++ ) {
-            geocells.add( GeocellUtils.compute( point, i ) );
-        }
-        geocells.add( geocellMax );
-        return geocells;
-    }
-
-
-    /**
-     * Returns an efficient set of geocells to search in a bounding box query.
-     * <p/>
-     * This method is guaranteed to return a set of geocells having the same resolution (except in the case of
-     * antimeridian search i.e when east < west).
-     *
-     * @param bbox: A geotypes.Box indicating the bounding box being searched.
-     * @param costFunction: A function that accepts two arguments: numCells: the number of cells to search resolution:
-     * the resolution of each cell to search and returns the 'cost' of querying against this number of cells at the
-     * given resolution.)
-     *
-     * @return A list of geocell strings that contain the given box.
-     */
-    public static List<String> bestBboxSearchCells( BoundingBox bbox, CostFunction costFunction ) {
-        if ( bbox.getEast() < bbox.getWest() ) {
-            BoundingBox bboxAntimeridian1 =
-                    new BoundingBox( bbox.getNorth(), bbox.getEast(), bbox.getSouth(), GeocellUtils.MIN_LONGITUDE );
-            BoundingBox bboxAntimeridian2 =
-                    new BoundingBox( bbox.getNorth(), GeocellUtils.MAX_LONGITUDE, bbox.getSouth(), bbox.getWest() );
-            List<String> antimeridianList = bestBboxSearchCells( bboxAntimeridian1, costFunction );
-            antimeridianList.addAll( bestBboxSearchCells( bboxAntimeridian2, costFunction ) );
-            return antimeridianList;
-        }
-
-        String cellNE = GeocellUtils.compute( bbox.getNorthEast(), GeocellManager.MAX_GEOCELL_RESOLUTION );
-        String cellSW = GeocellUtils.compute( bbox.getSouthWest(), GeocellManager.MAX_GEOCELL_RESOLUTION );
-
-        // The current lowest BBOX-search cost found; start with practical infinity.
-        double minCost = Double.MAX_VALUE;
-
-        // The set of cells having the lowest calculated BBOX-search cost.
-        List<String> minCostCellSet = new ArrayList<String>();
-
-        // First find the common prefix, if there is one.. this will be the base
-        // resolution.. i.e. we don't have to look at any higher resolution cells.
-        int minResolution = 0;
-        int maxResoltuion = Math.min( cellNE.length(), cellSW.length() );
-        while ( minResolution < maxResoltuion && cellNE.substring( 0, minResolution + 1 )
-                                                       .startsWith( cellSW.substring( 0, minResolution + 1 ) ) ) {
-            minResolution++;
-        }
-
-        // Iteravely calculate all possible sets of cells that wholely contain
-        // the requested bounding box.
-        for ( int curResolution = minResolution; curResolution < GeocellManager.MAX_GEOCELL_RESOLUTION + 1;
-              curResolution++ ) {
-            String curNE = cellNE.substring( 0, curResolution );
-            String curSW = cellSW.substring( 0, curResolution );
-
-            int numCells = GeocellUtils.interpolationCount( curNE, curSW );
-            if ( numCells > MAX_FEASIBLE_BBOX_SEARCH_CELLS ) {
-                continue;
-            }
-
-            List<String> cellSet = GeocellUtils.interpolate( curNE, curSW );
-            Collections.sort( cellSet );
-
-            double cost;
-            if ( costFunction == null ) {
-                cost = DEFAULT_COST_FUNCTION.defaultCostFunction( cellSet.size(), curResolution );
-            }
-            else {
-                cost = costFunction.defaultCostFunction( cellSet.size(), curResolution );
-            }
-
-            if ( cost <= minCost ) {
-                minCost = cost;
-                minCostCellSet = cellSet;
-            }
-            else {
-                if ( minCostCellSet.size() == 0 ) {
-                    minCostCellSet = cellSet;
-                }
-                // Once the cost starts rising, we won't be able to do better, so abort.
-                break;
-            }
-        }
-        //        logger.log(Level.INFO, "Calculate cells "+StringUtils.join(minCostCellSet, ",
-        // ")+" in box ("+bbox.getSouth()+","+bbox.getWest()+") ("+bbox.getNorth()+","+bbox.getEast()+")");
-        return minCostCellSet;
-    }
-}


[02/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/StaticIdIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/StaticIdIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/StaticIdIterator.java
deleted file mode 100644
index e04ac6c..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/StaticIdIterator.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-
-
-/** Simple iterator that just returns UUIDs that are set into it */
-public class StaticIdIterator implements ResultIterator {
-
-    private final Set<ScanColumn> ids;
-
-    private boolean returnedOnce = false;
-
-
-    /**
-     *
-     */
-    public StaticIdIterator( UUID id ) {
-        final ScanColumn col = new UUIDIndexSliceParser.UUIDColumn( id, ByteBuffer.allocate( 0 ) );
-
-        ids = Collections.singleton( col );
-    }
-
-
-    @Override
-    public void reset() {
-        //no op
-    }
-
-
-    @Override
-    public void finalizeCursor( CursorCache cache, UUID lastValue ) {
-        //no cursor, it's a static list
-    }
-
-
-    @Override
-    public Iterator<Set<ScanColumn>> iterator() {
-        return this;
-    }
-
-
-    @Override
-    public boolean hasNext() {
-        return !returnedOnce;
-    }
-
-
-    @Override
-    public Set<ScanColumn> next() {
-        returnedOnce = true;
-        return ids;
-    }
-
-
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException( "This iterator does not support remove" );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SubtractionIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SubtractionIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SubtractionIterator.java
deleted file mode 100644
index 9cfcf0a..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SubtractionIterator.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.LinkedHashSet;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-
-import com.google.common.collect.Sets;
-
-
-/**
- * Simple iterator to perform Unions
- *
- * @author tnine
- */
-public class SubtractionIterator extends MergeIterator {
-
-    private ResultIterator keepIterator;
-    private ResultIterator subtractIterator;
-
-
-    public SubtractionIterator( int pageSize ) {
-        super( pageSize );
-    }
-
-
-    /** @param subtractIterator the subtractIterator to set */
-    public void setSubtractIterator( ResultIterator subtractIterator ) {
-        this.subtractIterator = subtractIterator;
-    }
-
-
-    /** @param keepIterator the keepIterator to set */
-    public void setKeepIterator( ResultIterator keepIterator ) {
-        this.keepIterator = keepIterator;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultIterator#reset()
-     */
-    @Override
-    public void doReset() {
-        keepIterator.reset();
-        subtractIterator.reset();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.result.MergeIterator#advance()
-     */
-    @Override
-    protected Set<ScanColumn> advance() {
-        if ( !keepIterator.hasNext() ) {
-            return null;
-        }
-
-        Set<ScanColumn> results = new LinkedHashSet<ScanColumn>( pageSize );
-
-        /**
-         * The order here is important.  We don't want to check the advance unless we're less than our result size
-         * Otherwise we have issues with side effects of cursor construction.
-         */
-        while (results.size() < pageSize && keepIterator.hasNext() ) {
-
-            Set<ScanColumn> keepPage = keepIterator.next();
-
-            while ( subtractIterator.hasNext() && keepPage.size() > 0 ) {
-                keepPage.removeAll( subtractIterator.next() );
-            }
-
-            subtractIterator.reset();
-
-            results.addAll( keepPage );
-        }
-
-        return results;
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultIterator#finalizeCursor(org.apache.usergrid.persistence.cassandra
-     * .CursorCache)
-     */
-    @Override
-    public void finalizeCursor( CursorCache cache, UUID lastLoaded ) {
-        //we can only keep a cursor on our keep result set, we must subtract from every page of keep when loading
-        // results
-        keepIterator.finalizeCursor( cache, lastLoaded );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/UUIDIndexSliceParser.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/UUIDIndexSliceParser.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/UUIDIndexSliceParser.java
deleted file mode 100644
index 4b98cc7..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/UUIDIndexSliceParser.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-import static org.apache.usergrid.persistence.cassandra.Serializers.*;
-
-/**
- * Parser for reading and writing secondary index composites
- *
- * @author tnine
- */
-public class UUIDIndexSliceParser implements SliceParser {
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.SliceParser#parse(java.nio.ByteBuffer)
-     */
-    @Override
-    public ScanColumn parse( ByteBuffer buff ) {
-        return new UUIDColumn( ue.fromByteBuffer( buff.duplicate() ), buff );
-    }
-
-
-    public static class UUIDColumn extends AbstractScanColumn {
-
-        public UUIDColumn( UUID uuid, ByteBuffer buffer ) {
-            super( uuid, buffer );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/UnionIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/UnionIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/UnionIterator.java
deleted file mode 100644
index ea7b3f6..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/UnionIterator.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import static org.apache.usergrid.persistence.cassandra.Serializers.*;
-
-/**
- * Simple iterator to perform Unions
- *
- * @author tnine
- */
-public class UnionIterator extends MultiIterator {
-
-    private static final ScanColumnComparator COMP = new ScanColumnComparator();
-
-    private SortedColumnList list;
-
-    private final int id;
-
-
-    /**
-     * @param pageSize The page size to return
-     * @param id The id assigned to this node
-     * @param minUuid The minimum UUID to return
-     */
-    public UnionIterator( int pageSize, int id, ByteBuffer minUuid ) {
-        super( pageSize );
-
-        this.id = id;
-
-        UUID parseMinUuid = null;
-
-        if(minUuid != null)      {
-            parseMinUuid = ue.fromByteBuffer( minUuid );
-        }
-
-        list = new SortedColumnList( pageSize, parseMinUuid );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.result.MergeIterator#advance()
-     */
-    @Override
-    protected Set<ScanColumn> advance() {
-
-        int size = iterators.size();
-
-        if ( size == 0 ) {
-            return null;
-        }
-
-
-        list.clear();
-
-        for ( ResultIterator itr : iterators ) {
-
-            while ( itr.hasNext() ) {
-                list.addAll( itr.next() );
-            }
-
-            itr.reset();
-        }
-
-        //mark us for the next page
-        list.mark();
-
-
-        return list.asSet();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.usergrid.persistence.query.ir.result.ResultIterator#finalizeCursor(
-     * org.apache.usergrid.persistence.cassandra.CursorCache)
-     */
-    @Override
-    public void finalizeCursor( CursorCache cache, UUID lastLoaded ) {
-
-        ByteBuffer buff = ue.toByteBuffer( lastLoaded );
-        cache.setNextCursor( id, buff );
-        //get our scan column and put them in the cache
-        //we finalize the cursor of the min
-    }
-
-
-    @Override
-    public void doReset() {
-        //reset sub iterators if we need to
-        super.doReset();
-
-        list.reset();
-
-    }
-
-
-    /**
-     * A Sorted Set with a max size. When a new entry is added, the max is removed.  You can mark the next "min" by
-     * calling the mark method.  Values > min are accepted.  Values > min and that are over size are discarded
-     */
-    public static final class SortedColumnList {
-
-        private static final ScanColumnComparator COMP = new ScanColumnComparator();
-
-        private final int maxSize;
-
-        private final List<ScanColumn> list;
-
-
-        private ScanColumn min;
-
-
-        public SortedColumnList( final int maxSize, final UUID minUuid ) {
-            //we need to allocate the extra space if required
-            this.list = new ArrayList<ScanColumn>( maxSize );
-            this.maxSize = maxSize;
-
-            if ( minUuid != null ) {
-                min = new AbstractScanColumn( minUuid, null ) {};
-            }
-        }
-
-
-        /**
-         * Add the column to this list
-         */
-        public void add( ScanColumn col ) {
-            //less than our min, don't add
-            if ( COMP.compare( min, col ) >= 0 ) {
-                return;
-            }
-
-            int index = Collections.binarySearch( this.list, col, COMP );
-
-            //already present
-            if ( index > -1 ) {
-                return;
-            }
-
-            index = ( index * -1 ) - 1;
-
-            //outside the range
-            if ( index >= maxSize ) {
-                return;
-            }
-
-            this.list.add( index, col );
-
-            final int size = this.list.size();
-
-            if ( size > maxSize ) {
-                this.list.subList( maxSize, size ).clear();
-            }
-        }
-
-
-        /**
-         * Add all the elements to this list
-         */
-        public void addAll( final Collection<? extends ScanColumn> cols ) {
-            for ( ScanColumn col : cols ) {
-                add( col );
-            }
-        }
-
-
-        /**
-         * Returns a new list.  If no elements are present, returns null
-         */
-        public Set<ScanColumn> asSet() {
-            if ( this.list.size() == 0 ) {
-                return null;
-            }
-
-            return new LinkedHashSet<ScanColumn>( this.list );
-        }
-
-
-        /**
-         * Mark our last element in the tree as the max
-         */
-        public void mark() {
-
-            final int size = this.list.size();
-
-            //we don't have any elements in the list, and we've never set a min
-            if ( size == 0 ) {
-                return;
-            }
-
-            min = this.list.get( size - 1 );
-        }
-
-
-        /**
-         * Clear the list
-         */
-        public void clear() {
-            this.list.clear();
-        }
-
-        public void reset(){
-            clear();
-            this.min = null;
-        }
-    }
-
-
-    /**
-     * Simple comparator for comparing scan columns.  Orders them by time uuid
-     */
-    private static class ScanColumnComparator implements Comparator<ScanColumn> {
-
-        @Override
-        public int compare( final ScanColumn o1, final ScanColumn o2 ) {
-            if ( o1 == null ) {
-                if ( o2 == null ) {
-                    return 0;
-                }
-
-                return -1;
-            }
-
-            else if ( o2 == null ) {
-                return 1;
-            }
-
-            return UUIDUtils.compare( o1.getUUID(), o2.getUUID() );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/resources/usergrid-core-context.xml
----------------------------------------------------------------------
diff --git a/stack/core/src/main/resources/usergrid-core-context.xml b/stack/core/src/main/resources/usergrid-core-context.xml
index dda99f5..2c44fa2 100644
--- a/stack/core/src/main/resources/usergrid-core-context.xml
+++ b/stack/core/src/main/resources/usergrid-core-context.xml
@@ -31,7 +31,7 @@
     <aop:config proxy-target-class="true"/>
 
 
-	
+
 	<bean id="propertyPlaceholderConfigurer"
 		class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
 		<property name="properties" ref="properties" />
@@ -40,9 +40,9 @@
 		</property>
 	</bean>
 
-	
+
 	<!-- The Time Resolution used for the cluster -->
-	<bean id="microsecondsTimeResolution" class="me.prettyprint.cassandra.service.clock.MicrosecondsClockResolution" />
+    <bean id="microsecondsTimeResolution" class="me.prettyprint.cassandra.service.clock.MicrosecondsClockResolution" />
   <bean id="traceTagManager" class="org.apache.usergrid.persistence.cassandra.util.TraceTagManager"/>
   <bean id="traceTagReporter" class="org.apache.usergrid.persistence.cassandra.util.Slf4jTraceTagReporter"/>
 
@@ -54,7 +54,6 @@
 		<constructor-arg value="${cassandra.url}" />
         <!-- set the pool size if it's available.  If not go with 50 -->
         <property name="maxActive" value="${cassandra.connections:50}"/>
-        <property name="clockResolution" ref="microsecondsTimeResolution" />
         <property name="opTimer" ref="taggedOpTimer"/>
         <property name="loadBalancingPolicy" ref="loadBalancingPolicy"/>
 	</bean>
@@ -69,21 +68,21 @@
 
     <bean id="loadBalancingPolicy" class="me.prettyprint.cassandra.connection.DynamicLoadBalancingPolicy"/>
 
-	<!--  locking for a single node -->	
-<!--	<bean name="lockManager" 
+	<!--  locking for a single node -->
+<!--	<bean name="lockManager"
         class="org.apache.usergrid.locking.singlenode.SingleNodeLockManagerImpl" />-->
-	
+
 	<!--  hector based locks -->
-	<!-- Note that if this is deployed in a production cluster, the RF on the keyspace 
-    MUST be updated to use an odd number for it's replication Factor.  Even numbers can 
+	<!-- Note that if this is deployed in a production cluster, the RF on the keyspace
+    MUST be updated to use an odd number for it's replication Factor.  Even numbers can
     potentially case the locks to fail, via "split brain" when read at QUORUM on lock verification-->
-	
+
 	<bean name="lockManager" class="org.apache.usergrid.locking.cassandra.HectorLockManagerImpl" >
 		<property name="cluster" ref="cassandraCluster"/>
 		<property name="keyspaceName" value="${cassandra.lock.keyspace}"/>
 		<property name="consistencyLevelPolicy" ref="consistencyLevelPolicy"/>
 	</bean>
-	
+
 	<!--  zookeeper locks -->
 	<!--
 	<bean name="lockManager" class="org.apache.usergrid.locking.zookeeper.ZooKeeperLockManagerImpl" >
@@ -91,7 +90,7 @@
 		<property name="sessionTimeout" value="2000"/>
 		<property name="maxAttempts" value="10"/>
 	</bean>  -->
-	
+
 	<bean id="cassandraService"
 		class="org.apache.usergrid.persistence.cassandra.CassandraService" init-method="init" destroy-method="destroy">
 		<constructor-arg ref="properties" />
@@ -100,17 +99,16 @@
 		<constructor-arg ref="lockManager" />
 		<property name="consistencyLevelPolicy" ref="consistencyLevelPolicy"/>
 	</bean>
-	
+
 	<bean name="consistencyLevelPolicy" class="me.prettyprint.cassandra.model.ConfigurableConsistencyLevel">
         <property name="defaultReadConsistencyLevel" value="${cassandra.readcl}"/>
         <property name="defaultWriteConsistencyLevel" value="${cassandra.writecl}"/>
     </bean>
-	
+
     <bean id="entityManagerFactory"
-		class="org.apache.usergrid.corepersistence.HybridEntityManagerFactory" scope="singleton">
+		class="org.apache.usergrid.corepersistence.CpEntityManagerFactory" scope="singleton">
 		<constructor-arg ref="cassandraService" />
         <constructor-arg ref="counterUtils"/>
-        <constructor-arg value="${usergrid.counter.skipAggregate}"/>
     </bean>
 
     <bean id="queueManagerFactory"
@@ -148,12 +146,12 @@
         <constructor-arg ref="cassandraCluster"/>
         <constructor-arg ref="properties"/>
     </bean>
-    
-        
+
+
    <bean id="indexBucketLocator" class="org.apache.usergrid.persistence.cassandra.SimpleIndexBucketLocatorImpl">
     	<constructor-arg value="${usergrid.index.defaultbucketsize}"/>
     </bean>
-    
+
     <bean id="mailUtils" class="org.apache.usergrid.utils.MailUtils" />
 
     <bean id="traceTagAspect" class="org.apache.usergrid.persistence.cassandra.util.TraceTagAspect"/>
@@ -169,7 +167,7 @@
            method="applyTrace"/>
       </aop:aspect>
     </aop:config>
-   
+
     <!-- ============================================================== -->
     <!-- Scheduler Settings from removed Scheduler Module's app context -->
     <!-- ============================================================== -->

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/IndexIT.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/IndexIT.java b/stack/core/src/test/java/org/apache/usergrid/persistence/IndexIT.java
index dc6593b..9e8b62f 100644
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/IndexIT.java
+++ b/stack/core/src/test/java/org/apache/usergrid/persistence/IndexIT.java
@@ -33,7 +33,6 @@ import org.apache.usergrid.cassandra.Concurrent;
 import org.apache.usergrid.persistence.cassandra.CassandraService;
 import org.apache.usergrid.persistence.cassandra.IndexUpdate;
 import org.apache.usergrid.persistence.cassandra.IndexUpdate.IndexEntry;
-import org.apache.usergrid.persistence.cassandra.RelationManagerImpl;
 import org.apache.usergrid.persistence.index.query.Query;
 import org.apache.usergrid.utils.JsonUtils;
 import org.apache.usergrid.utils.UUIDUtils;
@@ -53,8 +52,8 @@ public class IndexIT extends AbstractCoreIT {
     private static final Logger LOG = LoggerFactory.getLogger( IndexIT.class );
 
     public static final String[] alphabet = {
-        "Alpha", "Bravo", "Charlie", "Delta", "Echo", "Foxtrot", "Golf", "Hotel", "India", 
-        "Juliet", "Kilo", "Lima", "Mike", "November", "Oscar", "Papa", "Quebec", "Romeo", "Sierra", 
+        "Alpha", "Bravo", "Charlie", "Delta", "Echo", "Foxtrot", "Golf", "Hotel", "India",
+        "Juliet", "Kilo", "Lima", "Mike", "November", "Oscar", "Papa", "Quebec", "Romeo", "Sierra",
         "Tango", "Uniform", "Victor", "Whiskey", "X-ray", "Yankee", "Zulu"
     };
 
@@ -440,43 +439,5 @@ public class IndexIT extends AbstractCoreIT {
 
 
 
-        //now read the index and see what properties are there
-
-        RelationManager rm = em.getRelationManager( entity2Ref );
-
-        if ( rm instanceof RelationManagerImpl ) { // only relevant for old-school EntityManagers
-
-            RelationManagerImpl impl = (RelationManagerImpl)rm;
-
-            CassandraService cass = cassandraResource.getBean( CassandraService.class );
-
-            ByteBufferSerializer buf = ByteBufferSerializer.get();
-
-            Keyspace ko = cass.getApplicationKeyspace( applicationId );
-            Mutator<ByteBuffer> m = createMutator( ko, buf );
-
-            IndexUpdate update = impl.batchStartIndexUpdate( m, entity1Ref, 
-                    "status", "ignore", UUIDUtils.newTimeUUID(), false, false, true, false );
-
-            int count = 0;
-
-            IndexEntry lastMatch = null;
-
-            for ( IndexEntry entry : update.getPrevEntries() ) {
-                if ( "status".equals( entry.getPath() ) ) {
-                    count++;
-                    lastMatch = entry;
-                }
-            }
-
-            assertEquals( 1, count );
-
-            if ( lastMatch != null ) {
-                assertEquals( "herring", lastMatch.getValue() );
-            }
-            else {
-                fail( "The last match was null but should have been herring!" );
-            }
-        }
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/EntityManagerFactoryImplIT.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/EntityManagerFactoryImplIT.java b/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/EntityManagerFactoryImplIT.java
deleted file mode 100644
index 8841e56..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/EntityManagerFactoryImplIT.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra;
-
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.usergrid.AbstractCoreIT;
-import org.apache.usergrid.cassandra.CassandraResource;
-import org.apache.usergrid.cassandra.Concurrent;
-import org.apache.usergrid.persistence.*;
-import org.apache.usergrid.persistence.cassandra.util.TraceTag;
-import org.apache.usergrid.persistence.cassandra.util.TraceTagManager;
-import org.apache.usergrid.persistence.cassandra.util.TraceTagReporter;
-import org.apache.usergrid.persistence.index.impl.ElasticSearchResource;
-import org.junit.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.*;
-
-import static org.junit.Assert.*;
-
-
-@Concurrent()
-public class EntityManagerFactoryImplIT extends AbstractCoreIT {
-
-    @SuppressWarnings("PointlessBooleanExpression")
-    public static final boolean USE_DEFAULT_DOMAIN = !CassandraService.USE_VIRTUAL_KEYSPACES;
-
-    private static final Logger logger = LoggerFactory.getLogger( EntityManagerFactoryImplIT.class );
-
-
-    @ClassRule
-    public static CassandraResource cassandraResource = CassandraResource.newWithAvailablePorts();
-
-    @ClassRule
-    public static ElasticSearchResource elasticSearchResource = new ElasticSearchResource();
-
-
-    public EntityManagerFactoryImplIT() {
-        emf = cassandraResource.getBean( EntityManagerFactory.class );
-    }
-
-
-    @BeforeClass
-    public static void setup() throws Exception {
-        logger.info( "setup" );
-    }
-
-
-    @AfterClass
-    public static void teardown() throws Exception {
-        logger.info( "teardown" );
-    }
-
-
-    EntityManagerFactory emf;
-    TraceTagManager traceTagManager;
-    TraceTagReporter traceTagReporter;
-
-
-    public UUID createApplication( String organizationName, String applicationName ) throws Exception {
-        if ( USE_DEFAULT_DOMAIN ) {
-            return emf.getDefaultAppId();
-        }
-        return emf.createApplication( organizationName, applicationName );
-    }
-
-
-    @Before
-    public void initTracing() {
-        traceTagManager = cassandraResource.getBean(
-                "traceTagManager", TraceTagManager.class );
-        traceTagReporter = cassandraResource.getBean(
-                "traceTagReporter", TraceTagReporter.class );
-    }
-
-
-    @Test
-    public void testDeleteApplication() throws Exception {
-
-        String rand = RandomStringUtils.randomAlphabetic(20);
-
-        // create an application with a collection and an entity
-
-        UUID applicationId = setup.createApplication( "test-org-" + rand, "test-app-" + rand );
-
-        EntityManager em = setup.getEmf().getEntityManager( applicationId );
-
-        Map<String, Object> properties1 = new LinkedHashMap<String, Object>();
-        properties1.put( "Name", "12 Angry Men" );
-        properties1.put( "Year", 1957 );
-        Entity film1 = em.create("film", properties1);
-
-        Map<String, Object> properties2 = new LinkedHashMap<String, Object>();
-        properties2.put( "Name", "Reservoir Dogs" );
-        properties2.put( "Year", 1992 );
-        Entity film2 = em.create( "film", properties2 );
-
-        em.refreshIndex();
-
-        // delete the application
-
-        setup.getEmf().deleteApplication( applicationId );
-
-        // attempt to get entities in application's collections in various ways should all fail
-
-        assertNull( setup.getEmf().lookupApplication("test-app-" + rand) );
-
-        Map<String, UUID> appMap = setup.getEmf().getApplications();
-        for ( String appName : appMap.keySet() ) {
-            UUID appId = appMap.get( appName );
-            assertNotEquals( appId, applicationId );
-            assertNotEquals( appName, "test-app-" + rand );
-        }
-
-    }
-
-
-    @Test
-    public void testCreateAndGet() throws Exception {
-        TraceTag traceTag = traceTagManager.create( "testCreateAndGet" );
-        traceTagManager.attach( traceTag );
-        logger.info( "EntityDaoTest.testCreateAndGet" );
-
-        UUID applicationId = createApplication( "EntityManagerFactoryImplIT", "testCreateAndGet"
-                + RandomStringUtils.randomAlphabetic(20)  );
-        logger.info( "Application id " + applicationId );
-
-        EntityManager em = emf.getEntityManager( applicationId );
-
-        int i;
-        List<Entity> things = new ArrayList<Entity>();
-        for ( i = 0; i < 10; i++ ) {
-            Map<String, Object> properties = new LinkedHashMap<String, Object>();
-            properties.put( "name", "thing" + i );
-
-            Entity thing = em.create( "thing", properties );
-            assertNotNull( "thing should not be null", thing );
-            assertFalse( "thing id not valid", thing.getUuid().equals( new UUID( 0, 0 ) ) );
-            assertEquals( "name not expected value", "thing" + i, thing.getProperty( "name" ) );
-
-            things.add( thing );
-        }
-        assertEquals( "should be ten entities", 10, things.size() );
-
-        i = 0;
-        for ( Entity entity : things ) {
-
-            Entity thing = em.get( new SimpleEntityRef("thing", entity.getUuid()));
-            assertNotNull( "thing should not be null", thing );
-            assertFalse( "thing id not valid", thing.getUuid().equals( new UUID( 0, 0 ) ) );
-            assertEquals( "name not expected value", "thing" + i, thing.getProperty( "name" ) );
-
-            i++;
-        }
-
-        List<UUID> ids = new ArrayList<UUID>();
-        for ( Entity entity : things ) {
-            ids.add( entity.getUuid() );
-
-            Entity en = em.get( new SimpleEntityRef("thing", entity.getUuid()));
-            String type = en.getType();
-            assertEquals( "type not expected value", "thing", type );
-
-            Object property = en.getProperty( "name" );
-            assertNotNull( "thing name property should not be null", property );
-            assertTrue( "thing name should start with \"thing\"", property.toString().startsWith( "thing" ) );
-
-            Map<String, Object> properties = en.getProperties();
-            assertEquals( "number of properties wrong", 5, properties.size() );
-        }
-
-        i = 0;
-        Results results = em.getEntities( ids, "thing" );
-        for ( Entity thing : results ) {
-            assertNotNull( "thing should not be null", thing );
-
-            assertFalse( "thing id not valid", thing.getUuid().equals( new UUID( 0, 0 ) ) );
-
-            assertEquals( "wrong type", "thing", thing.getType() );
-
-            assertNotNull( "thing name should not be null", thing.getProperty( "name" ) );
-            String name = thing.getProperty( "name" ).toString();
-            assertEquals( "unexpected name", "thing" + i, name );
-
-            i++;
-        }
-
-        assertEquals( "entities unfound entity name count incorrect", 10, i );
-
-		/*
-         * List<UUID> entities = emf.findEntityIds(applicationId, "thing", null,
-		 * null, 100); assertNotNull("entities list should not be null",
-		 * entities); assertEquals("entities count incorrect", 10,
-		 * entities.size());
-		 */
-        traceTagReporter.report( traceTagManager.detach() );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/QueryProcessorTest.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/QueryProcessorTest.java b/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/QueryProcessorTest.java
deleted file mode 100644
index fe04d23..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/QueryProcessorTest.java
+++ /dev/null
@@ -1,823 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra;
-
-
-import java.math.BigInteger;
-import java.util.Iterator;
-import java.util.UUID;
-
-import org.antlr.runtime.ANTLRStringStream;
-import org.antlr.runtime.TokenRewriteStream;
-import org.junit.Test;
-
-import org.apache.usergrid.cassandra.Concurrent;
-import org.apache.usergrid.mq.QueryFilterLexer;
-import org.apache.usergrid.mq.QueryFilterParser;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.exceptions.PersistenceException;
-import org.apache.usergrid.persistence.index.query.tree.CpQueryFilterLexer;
-import org.apache.usergrid.persistence.index.query.tree.CpQueryFilterParser;
-import org.apache.usergrid.persistence.query.ir.AndNode;
-import org.apache.usergrid.persistence.query.ir.NotNode;
-import org.apache.usergrid.persistence.query.ir.OrNode;
-import org.apache.usergrid.persistence.query.ir.QuerySlice;
-import org.apache.usergrid.persistence.query.ir.SliceNode;
-import org.apache.usergrid.persistence.query.ir.WithinNode;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import org.junit.Ignore;
-
-
-/**
- * @author tnine
- */
-@Concurrent()
-public class QueryProcessorTest {
-
-    @Test 
-    public void equality() throws Exception {
-        String queryString = "select * where a = 5";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( BigInteger.valueOf( 5 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 5 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test
-    public void lessThan() throws Exception {
-        String queryString = "select * where a < 5";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertNull( slice.getStart() );
-
-        assertEquals( BigInteger.valueOf( 5 ), slice.getFinish().getValue() );
-        assertFalse( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test
-    public void lessThanEquals() throws Exception {
-        String queryString = "select * where a <= 5";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertNull( slice.getStart() );
-
-        assertEquals( BigInteger.valueOf( 5 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test 
-    public void greaterThan() throws Exception {
-        String queryString = "select * where a > 5";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( BigInteger.valueOf( 5 ), slice.getStart().getValue() );
-        assertFalse( slice.getStart().isInclusive() );
-
-        assertNull( slice.getFinish() );
-    }
-
-
-    @Test 
-    public void greaterThanEquals() throws Exception {
-        String queryString = "select * where a >= 5";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( BigInteger.valueOf( 5 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-
-        assertNull( slice.getFinish() );
-    }
-
-
-    @Test 
-    public void contains() throws Exception {
-        String queryString = "select * where a contains 'foo'";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "a.keywords", slice.getPropertyName() );
-
-        assertEquals( "foo", slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-
-        assertEquals( "foo", slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test 
-    public void containsLower() throws Exception {
-        String queryString = "select * where a contains 'FOO'";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "a.keywords", slice.getPropertyName() );
-
-        assertEquals( "foo", slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-
-        assertEquals( "foo", slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test 
-    public void containsRange() throws Exception, PersistenceException {
-
-        String queryString = "select * where a contains 'foo*'";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        if ( !(processor.getEntityManager() instanceof EntityManagerImpl) ) {
-            return; // only relevant for old entity manager
-        }
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "a.keywords", slice.getPropertyName() );
-
-        assertEquals( "foo", slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-
-        assertEquals( "foo\uffff", slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test 
-    public void within() throws Exception {
-        String queryString = "select * where a within .5 of 157.00, 0.00";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        WithinNode node = ( WithinNode ) processor.getFirstNode();
-
-        assertEquals( "a.coordinates", node.getPropertyName() );
-        assertEquals( .5f, node.getDistance(), 0 );
-        assertEquals( 157f, node.getLattitude(), 0 );
-        assertEquals( 0f, node.getLongitude(), 0 );
-    }
-
-
-    @Test 
-    public void andEquality() throws Exception {
-        assertAndQuery( "select * where a = 1 and b = 2 and c = 3" );
-        assertAndQuery( "select * where a = 1 AND b = 2 and c = 3" );
-        assertAndQuery( "select * where a = 1 AnD b = 2 and c = 3" );
-        assertAndQuery( "select * where a = 1 ANd b = 2 and c = 3" );
-        assertAndQuery( "select * where a = 1 anD b = 2 and c = 3" );
-        assertAndQuery( "select * where a = 1 ANd b = 2 and c = 3" );
-        assertAndQuery( "select * where a = 1 && b = 2 && c = 3" );
-    }
-
-
-    private void assertAndQuery( String queryString ) throws Exception {
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-
-        assertEquals( "a", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 1 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 1 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-
-        slice = slices.next();
-
-        assertEquals( "b", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 2 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 2 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-
-        slice = slices.next();
-
-        assertEquals( "c", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 3 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 3 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test 
-    public void orEquality() throws Exception {
-        assertOrQuery( "select * where a = 1 or b = 2" );
-        assertOrQuery( "select * where a = 1 OR b = 2" );
-        assertOrQuery( "select * where a = 1 oR b = 2" );
-        assertOrQuery( "select * where a = 1 Or b = 2" );
-        assertOrQuery( "select * where a = 1 || b = 2" );
-    }
-
-
-    private void assertOrQuery( String queryString ) throws Exception {
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        OrNode node = ( OrNode ) processor.getFirstNode();
-
-        SliceNode sliceNode = ( SliceNode ) node.getLeft();
-
-        Iterator<QuerySlice> slices = sliceNode.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "a", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 1 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 1 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-
-        sliceNode = ( SliceNode ) node.getRight();
-
-        slices = sliceNode.getAllSlices().iterator();
-
-        slice = slices.next();
-
-        assertEquals( "b", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 2 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 2 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    /** Tests that when properties are not siblings, they are properly assigned to a SliceNode */
-    @Test 
-    public void nestedCompression() throws Exception {
-        String queryString =
-                "select * where (a > 1 and b > 10 and a < 10 and b < 20 ) or ( c >= 20 and d >= 30 and c <= 30 and d "
-                        + "<= 40)";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        OrNode node = ( OrNode ) processor.getFirstNode();
-
-        SliceNode sliceNode = ( SliceNode ) node.getLeft();
-
-        Iterator<QuerySlice> slices = sliceNode.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-
-        assertEquals( "a", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 1 ), slice.getStart().getValue() );
-        assertFalse( slice.getStart().isInclusive() );
-
-        assertEquals( BigInteger.valueOf( 10 ), slice.getFinish().getValue() );
-        assertFalse( slice.getFinish().isInclusive() );
-
-
-        slice = slices.next();
-
-
-        assertEquals( "b", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 10 ), slice.getStart().getValue() );
-        assertFalse( slice.getStart().isInclusive() );
-
-        assertEquals( BigInteger.valueOf( 20 ), slice.getFinish().getValue() );
-        assertFalse( slice.getFinish().isInclusive() );
-
-
-        sliceNode = ( SliceNode ) node.getRight();
-
-        slices = sliceNode.getAllSlices().iterator();
-
-        slice = slices.next();
-
-
-        assertEquals( "c", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 20 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 30 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-
-        slice = slices.next();
-
-        assertEquals( "d", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 30 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 40 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    /** Tests that when there are multiple or with and clauses, the tree is constructed correctly */
-    @Test 
-    public void nestedOrCompression() throws Exception {
-        String queryString =
-                "select * where ((a > 1 and  a < 10) or (b > 10 and b < 20 )) or (( c >= 20 and c <= 30 ) or (d >= 30"
-                        + "  and d <= 40))";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        OrNode rootNode = ( OrNode ) processor.getFirstNode();
-
-        OrNode node = ( OrNode ) rootNode.getLeft();
-
-        // get the left node of the or
-
-        SliceNode sliceNode = ( SliceNode ) node.getLeft();
-
-        Iterator<QuerySlice> slices = sliceNode.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "a", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 1 ), slice.getStart().getValue() );
-        assertFalse( slice.getStart().isInclusive() );
-
-        assertEquals( BigInteger.valueOf( 10 ), slice.getFinish().getValue() );
-        assertFalse( slice.getFinish().isInclusive() );
-
-        // get our right node
-        sliceNode = ( SliceNode ) node.getRight();
-
-        slices = sliceNode.getAllSlices().iterator();
-
-        slice = slices.next();
-
-        assertEquals( "b", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 10 ), slice.getStart().getValue() );
-        assertFalse( slice.getStart().isInclusive() );
-
-        assertEquals( BigInteger.valueOf( 20 ), slice.getFinish().getValue() );
-        assertFalse( slice.getFinish().isInclusive() );
-
-        node = ( OrNode ) rootNode.getRight();
-
-        sliceNode = ( SliceNode ) node.getLeft();
-
-        slices = sliceNode.getAllSlices().iterator();
-
-        slice = slices.next();
-
-        assertEquals( "c", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 20 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 30 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-
-        sliceNode = ( SliceNode ) node.getRight();
-
-        slices = sliceNode.getAllSlices().iterator();
-
-        slice = slices.next();
-
-        assertEquals( "d", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 30 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 40 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    /** Tests that when NOT is not the root operand the tree has a different root */
-    @Test 
-    public void andNot() throws Exception {
-        String queryString = "select * where a > 1 and not b = 2";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        AndNode rootNode = ( AndNode ) processor.getFirstNode();
-
-        SliceNode sliceNode = ( SliceNode ) rootNode.getLeft();
-
-        Iterator<QuerySlice> slices = sliceNode.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "a", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 1 ), slice.getStart().getValue() );
-        assertFalse( slice.getStart().isInclusive() );
-
-        assertNull( slice.getFinish() );
-
-        NotNode notNode = ( NotNode ) rootNode.getRight();
-
-        // now get the child of the not node
-        sliceNode = ( SliceNode ) notNode.getSubtractNode();
-
-        slices = sliceNode.getAllSlices().iterator();
-
-        slice = slices.next();
-
-        assertEquals( "b", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 2 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 2 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    /** Tests that when NOT is the root operand, a full scan range is performed. */
-    @Test 
-    public void notRootOperand() throws Exception {
-        String queryString = "select * where not b = 2";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        NotNode rootNode = ( NotNode ) processor.getFirstNode();
-
-        SliceNode sliceNode = ( SliceNode ) rootNode.getSubtractNode();
-
-        Iterator<QuerySlice> slices = sliceNode.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "b", slice.getPropertyName() );
-        assertEquals( BigInteger.valueOf( 2 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 2 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test 
-    public void stringWithSpaces() throws Exception {
-        String queryString = "select * where a = 'foo with bar'";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "a", slice.getPropertyName() );
-
-        assertEquals( "foo with bar", slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-
-        assertEquals( "foo with bar", slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test 
-    public void fieldWithDash() throws Exception {
-        String queryString = "select * where a-foo = 5";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "a-foo", slice.getPropertyName() );
-
-        assertEquals( BigInteger.valueOf( 5 ), slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( BigInteger.valueOf( 5 ), slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test 
-    public void stringWithDash() throws Exception {
-        String queryString = "select * where a = 'foo-bar'";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "a", slice.getPropertyName() );
-
-        assertEquals( "foo-bar", slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-
-        assertEquals( "foo-bar", slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test 
-    public void uuidParse() throws Exception {
-
-        //    UUID value = UUID.fromString("4b91a9c2-86a1-11e2-b7fa-68a86d52fa56");
-        //
-        //
-        //    String queryString = "select * where uuid = 4b91a9c2-86a1-11e2-b7fa-68a86d52fa56";
-
-        UUID value = UUID.fromString( "c6ee8a1c-3ef4-11e2-8861-02e81adcf3d0" );
-        String queryString = "select * where uuid = c6ee8a1c-3ef4-11e2-8861-02e81adcf3d0";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        Query query = parser.ql().query;
-
-        QueryProcessor processor = new QueryProcessorImpl( query, null, null, null );
-
-        SliceNode node = ( SliceNode ) processor.getFirstNode();
-
-        Iterator<QuerySlice> slices = node.getAllSlices().iterator();
-
-        QuerySlice slice = slices.next();
-
-        assertEquals( "uuid", slice.getPropertyName() );
-
-        assertEquals( value, slice.getStart().getValue() );
-        assertTrue( slice.getStart().isInclusive() );
-        assertEquals( value, slice.getFinish().getValue() );
-        assertTrue( slice.getFinish().isInclusive() );
-    }
-
-
-    @Test
-    @Ignore("no longer relevant for two-dot-o")
-    public void validateHintSizeForOrder() throws Exception {
-        String queryString = "order by name desc";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        QueryFilterLexer lexer = new QueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        QueryFilterParser parser = new QueryFilterParser( tokens );
-
-        /**
-         * Test set limit
-         */
-
-        final int limit = 105;
-
-//        Query query = parser.ql().query;
-//        query.setLimit( limit );
-//
-//        QueryProcessor processor = new QueryProcessor( query, null, null, null );
-//
-//        OrderByNode node = ( OrderByNode ) processor.getFirstNode();
-//
-//        assertEquals( limit, processor.getPageSizeHint( node ) );
-    }
-
-
-    @Test
-    @Ignore("no longer relevant for two-dot-o")
-    public void validateHintSizeForEquality() throws Exception {
-        String queryString = "select * where X = 'Foo'";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        /**
-         * Test set limit
-         */
-
-        final int limit = 105;
-
-        Query query = parser.ql().query;
-        query.setLimit( limit );
-
-//        QueryProcessor processor = new QueryProcessor( query, null, null, null );
-//
-//        SliceNode node = ( SliceNode ) processor.getFirstNode();
-//
-//        assertEquals( limit, processor.getPageSizeHint( node ) );
-    }
-
-
-    @Test
-    @Ignore("no longer relevant for two-dot-o")
-    public void validateHintSizeForComplexQueries() throws Exception {
-        //        String queryString = "select * where y = 'Foo' AND z = 'Bar'";
-
-        String queryString = "select * where y = 'Foo' AND z = 'Bar'";
-
-        ANTLRStringStream in = new ANTLRStringStream( queryString );
-        CpQueryFilterLexer lexer = new CpQueryFilterLexer( in );
-        TokenRewriteStream tokens = new TokenRewriteStream( lexer );
-        CpQueryFilterParser parser = new CpQueryFilterParser( tokens );
-
-        /**
-         * Test set limit
-         */
-
-        final int limit = 105;
-
-        Query query = parser.ql().query;
-        query.setLimit( limit );
-
-//        QueryProcessor processor = new QueryProcessor( query, null, null, null );
-//
-//        QueryNode slice =  processor.getFirstNode();
-//
-//        assertEquals( 1000, processor.getPageSizeHint( slice ) );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/SimpleIndexShardLocatorImplTest.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/SimpleIndexShardLocatorImplTest.java b/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/SimpleIndexShardLocatorImplTest.java
deleted file mode 100644
index 0c9d1f3..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/SimpleIndexShardLocatorImplTest.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra;
-
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import org.junit.Test;
-import org.apache.usergrid.cassandra.Concurrent;
-import org.apache.usergrid.persistence.IndexBucketLocator.IndexType;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import com.yammer.metrics.Metrics;
-import com.yammer.metrics.core.Timer;
-import com.yammer.metrics.core.TimerContext;
-
-import static org.junit.Assert.assertEquals;
-
-
-/** @author tnine */
-@Concurrent()
-public class SimpleIndexShardLocatorImplTest {
-    @Test
-    public void oneBucket() {
-
-        UUID appId = UUIDUtils.newTimeUUID();
-        String entityType = "user";
-        String propName = "firstName";
-
-        SimpleIndexBucketLocatorImpl locator = new SimpleIndexBucketLocatorImpl( 1 );
-
-        List<String> buckets = locator.getBuckets( appId, IndexType.COLLECTION, entityType, propName );
-
-        assertEquals( 1, buckets.size() );
-
-        UUID testId1 = UUIDUtils.minTimeUUID( 0l );
-
-        UUID testId2 = UUIDUtils.minTimeUUID( Long.MAX_VALUE / 2 );
-
-        UUID testId3 = UUIDUtils.minTimeUUID( Long.MAX_VALUE );
-
-        String bucket1 = locator.getBucket( appId, IndexType.COLLECTION, testId1, entityType, propName );
-
-        String bucket2 = locator.getBucket( appId, IndexType.COLLECTION, testId2, entityType, propName );
-
-        String bucket3 = locator.getBucket( appId, IndexType.COLLECTION, testId3, entityType, propName );
-
-        assertEquals( bucket1, "000000000000000000000000000000000000000" );
-        assertEquals( bucket2, "000000000000000000000000000000000000000" );
-        assertEquals( bucket3, "000000000000000000000000000000000000000" );
-    }
-
-
-    @Test
-    public void twoBuckets() {
-
-        UUID appId = UUIDUtils.newTimeUUID();
-        String entityType = "user";
-        String propName = "firstName";
-
-        SimpleIndexBucketLocatorImpl locator = new SimpleIndexBucketLocatorImpl( 2 );
-
-        List<String> buckets = locator.getBuckets( appId, IndexType.COLLECTION, entityType, propName );
-
-        assertEquals( 2, buckets.size() );
-
-        UUID testId1 = UUIDUtils.minTimeUUID( 0l );
-
-        UUID testId2 = UUIDUtils.maxTimeUUID( Long.MAX_VALUE / 2 );
-
-        UUID testId3 = UUIDUtils.minTimeUUID( Long.MAX_VALUE );
-
-        String bucket1 = locator.getBucket( appId, IndexType.COLLECTION, testId1, entityType, propName );
-
-        String bucket2 = locator.getBucket( appId, IndexType.COLLECTION, testId2, entityType, propName );
-
-        String bucket3 = locator.getBucket( appId, IndexType.COLLECTION, testId3, entityType, propName );
-
-        assertEquals( bucket1, "000000000000000000000000000000000000000" );
-        assertEquals( bucket2, "085070591730234615865843651857942052863" );
-        assertEquals( bucket3, "000000000000000000000000000000000000000" );
-    }
-
-
-    @Test
-    public void evenDistribution() {
-
-        UUID appId = UUIDUtils.newTimeUUID();
-        String entityType = "user";
-        String propName = "firstName";
-
-        int bucketSize = 20;
-        float distributionPercentage = .05f;
-
-        // test 100 elements
-        SimpleIndexBucketLocatorImpl locator = new SimpleIndexBucketLocatorImpl( bucketSize );
-
-        List<String> buckets = locator.getBuckets( appId, IndexType.COLLECTION, entityType, propName );
-
-        assertEquals( bucketSize, buckets.size() );
-
-        int testSize = 2000000;
-
-        Map<String, Float> counts = new HashMap<String, Float>();
-
-        final Timer hashes =
-                Metrics.newTimer( SimpleIndexShardLocatorImplTest.class, "responses", TimeUnit.MILLISECONDS,
-                        TimeUnit.SECONDS );
-
-        // ConsoleReporter.enable(1, TimeUnit.SECONDS);
-
-        /**
-         * Loop through each new UUID and add it's hash to our map
-         */
-        for ( int i = 0; i < testSize; i++ ) {
-            UUID id = UUIDUtils.newTimeUUID();
-
-            final TimerContext context = hashes.time();
-
-            String bucket = locator.getBucket( appId, IndexType.COLLECTION, id, entityType, propName );
-
-            context.stop();
-
-            Float count = counts.get( bucket );
-
-            if ( count == null ) {
-                count = 0f;
-            }
-
-            counts.put( bucket, ++count );
-        }
-
-        /**
-         * Check each entry is within +- 5% of every subsequent entry
-         */
-        List<String> keys = new ArrayList<String>( counts.keySet() );
-        int keySize = keys.size();
-
-        assertEquals( bucketSize, keySize );
-
-        for ( int i = 0; i < keySize; i++ ) {
-
-            float sourceCount = counts.get( keys.get( i ) );
-
-            for ( int j = i + 1; j < keySize; j++ ) {
-                float destCount = counts.get( keys.get( j ) );
-
-                // find the maximum allowed value for the assert based on the
-                // largest value in the pair
-                float maxDelta = Math.max( sourceCount, destCount ) * distributionPercentage;
-
-                assertEquals(
-                        String.format( "Not within %f as percentage for keys '%s' and '%s'", distributionPercentage,
-                                keys.get( i ), keys.get( j ) ), sourceCount, destCount, maxDelta );
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/util/TraceTagUnitTest.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/util/TraceTagUnitTest.java b/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/util/TraceTagUnitTest.java
deleted file mode 100644
index 2a9e740..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/cassandra/util/TraceTagUnitTest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra.util;
-
-
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-
-/** @author zznate */
-public class TraceTagUnitTest {
-
-    private TraceTagManager traceTagManager;
-    private Slf4jTraceTagReporter traceTagReporter;
-    private TaggedOpTimer taggedOpTimer;
-
-
-    @Before
-    public void setup() {
-        traceTagManager = new TraceTagManager();
-        traceTagReporter = new Slf4jTraceTagReporter();
-        taggedOpTimer = new TaggedOpTimer( traceTagManager );
-    }
-
-
-    @Test
-    public void createAttachDetach() throws Exception {
-        TraceTag traceTag = traceTagManager.create( "testtag1" );
-        traceTagManager.attach( traceTag );
-        TimedOpTag timedOpTag = ( TimedOpTag ) taggedOpTimer.start( "op-tag-name" );
-        Thread.currentThread().sleep( 500 );
-        taggedOpTimer.stop( timedOpTag, "op-tag-name", true );
-        assertTrue( timedOpTag.getElapsed() >= 500 );
-        assertEquals( timedOpTag, traceTag.iterator().next() );
-        traceTagManager.detach();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/geo/EntityLocationRefDistanceComparatorTest.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/geo/EntityLocationRefDistanceComparatorTest.java b/stack/core/src/test/java/org/apache/usergrid/persistence/geo/EntityLocationRefDistanceComparatorTest.java
deleted file mode 100644
index 9c7ec90..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/geo/EntityLocationRefDistanceComparatorTest.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo;
-
-
-import java.util.UUID;
-
-import org.junit.Test;
-import org.apache.usergrid.persistence.geo.model.Point;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-
-/** @author tnine */
-public class EntityLocationRefDistanceComparatorTest {
-
-
-    @Test
-    public void locationDistanceComparator() {
-        EntityLocationRefDistanceComparator comp = new EntityLocationRefDistanceComparator();
-
-        UUID firstId = UUIDUtils.newTimeUUID();
-        UUID matchId = UUID.fromString( firstId.toString() );
-
-
-        Point zero = new Point( 0, 0 );
-
-        EntityLocationRef first = new EntityLocationRef( ( String ) null, firstId, 0, 0 );
-        first.calcDistance( zero );
-
-        EntityLocationRef second = new EntityLocationRef( ( String ) null, matchId, 0, 0 );
-        second.calcDistance( zero );
-
-        assertEquals( 0, comp.compare( first, second ) );
-
-        //now increase the distance on the second one
-
-        second = new EntityLocationRef( ( String ) null, matchId, 1, 1 );
-        second.calcDistance( zero );
-
-        assertTrue( comp.compare( first, second ) < 0 );
-
-        //set the first one to be farther
-        first = new EntityLocationRef( ( String ) null, firstId, 1, 1 );
-        first.calcDistance( zero );
-
-        second = new EntityLocationRef( ( String ) null, matchId, 0, 0 );
-        second.calcDistance( zero );
-
-        assertTrue( comp.compare( first, second ) > 0 );
-
-        //now compare by UUID.
-
-        UUID secondId = UUIDUtils.newTimeUUID();
-
-        first = new EntityLocationRef( ( String ) null, firstId, 0, 0 );
-        first.calcDistance( zero );
-
-        second = new EntityLocationRef( ( String ) null, secondId, 0, 0 );
-        second.calcDistance( zero );
-
-        assertTrue( comp.compare( first, second ) < 0 );
-
-        first = new EntityLocationRef( ( String ) null, secondId, 0, 0 );
-        first.calcDistance( zero );
-
-        second = new EntityLocationRef( ( String ) null, firstId, 0, 0 );
-        second.calcDistance( zero );
-
-        assertTrue( comp.compare( first, second ) > 0 );
-
-        //compare nulls
-
-        assertTrue( comp.compare( null, first ) > 0 );
-        assertTrue( comp.compare( first, null ) < 0 );
-
-        assertEquals( 0, comp.compare( null, null ) );
-
-
-        double less = 0;
-        double more = 1000;
-
-        int compare = Double.compare( less, more );
-
-        assertTrue( compare < 1 );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/query/IntersectionUnionPagingIT.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/query/IntersectionUnionPagingIT.java b/stack/core/src/test/java/org/apache/usergrid/persistence/query/IntersectionUnionPagingIT.java
index 4dc4170..0d4372e 100644
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/query/IntersectionUnionPagingIT.java
+++ b/stack/core/src/test/java/org/apache/usergrid/persistence/query/IntersectionUnionPagingIT.java
@@ -26,10 +26,10 @@ import java.util.Set;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.usergrid.persistence.Entity;
-import org.apache.usergrid.persistence.index.query.Query;
 import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.cassandra.QueryProcessor;
+import org.apache.usergrid.persistence.index.query.Query;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -80,7 +80,7 @@ public class IntersectionUnionPagingIT extends AbstractIteratingQueryIT {
     private Set<String> performSetup( final IoHelper io ) throws Exception {
         io.doSetup();
 
-        int size = ( int ) ( QueryProcessor.PAGE_SIZE*2.5);
+        int size = ( int ) ( PAGE_SIZE*2.5);
 
         long start = System.currentTimeMillis();
 
@@ -130,12 +130,12 @@ public class IntersectionUnionPagingIT extends AbstractIteratingQueryIT {
     }
 
 
-    private void testUnionPaging( final IoHelper io, final String queryString, 
+    private void testUnionPaging( final IoHelper io, final String queryString,
             final Set<String> expectedResults ) throws Exception {
 
         Set<String> newSets = new HashSet<String>( expectedResults );
 
-        //our field1Or has a result size < our page size, so it shouldn't blow up when the 
+        //our field1Or has a result size < our page size, so it shouldn't blow up when the
         // cursor is getting created the leaf iterator should insert it's own "no value left" i
         // not the cursor
         Query query = Query.fromQL( queryString );

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/query/NotSubPropertyIT.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/query/NotSubPropertyIT.java b/stack/core/src/test/java/org/apache/usergrid/persistence/query/NotSubPropertyIT.java
index 13f7b22..0295fb9 100644
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/query/NotSubPropertyIT.java
+++ b/stack/core/src/test/java/org/apache/usergrid/persistence/query/NotSubPropertyIT.java
@@ -27,9 +27,8 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.usergrid.persistence.index.query.Query;
 import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.cassandra.QueryProcessor;
+import org.apache.usergrid.persistence.index.query.Query;
 
 import static org.junit.Assert.assertEquals;
 
@@ -82,7 +81,7 @@ public class NotSubPropertyIT extends AbstractIteratingQueryIT {
         private List<UUID> performSetup( final IoHelper io ) throws Exception {
             io.doSetup();
 
-            int size = ( int ) ( QueryProcessor.PAGE_SIZE*2.5);
+            int size = ( int ) (PAGE_SIZE*2.5);
 
             long start = System.currentTimeMillis();
 

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/AbstractScanColumnTest.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/AbstractScanColumnTest.java b/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/AbstractScanColumnTest.java
deleted file mode 100644
index d478942..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/AbstractScanColumnTest.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-import org.junit.Test;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import static junit.framework.Assert.assertNull;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-
-
-/**
- * Simple test to test null value
- */
-public class AbstractScanColumnTest {
-
-    @Test
-    public void testValues() {
-        final UUID uuid = UUIDUtils.newTimeUUID();
-        final ByteBuffer buffer = ByteBuffer.allocate( 4 );
-        buffer.putInt( 1 );
-        buffer.rewind();
-
-        TestScanColumn col = new TestScanColumn( uuid, buffer );
-
-        assertSame( uuid, col.getUUID() );
-
-        assertEquals( 1, col.getCursorValue().getInt() );
-    }
-
-
-    @Test
-    public void nullUUID() {
-        final UUID uuid = null;
-        final ByteBuffer buffer = ByteBuffer.allocate( 4 );
-        buffer.putInt( 1 );
-        buffer.rewind();
-
-        TestScanColumn col = new TestScanColumn( uuid, buffer );
-
-        assertNull( col.getUUID() );
-
-        assertEquals( 1, col.getCursorValue().getInt() );
-    }
-
-
-    @Test
-    public void nullBuffer() {
-        final UUID uuid = UUIDUtils.newTimeUUID();
-        final ByteBuffer buffer = null;
-
-        TestScanColumn col = new TestScanColumn( uuid, buffer );
-
-        assertSame( uuid, col.getUUID() );
-
-        assertNull( col.getCursorValue() );
-    }
-
-
-    @Test
-    public void nullBoth() {
-        final UUID uuid = null;
-        final ByteBuffer buffer = null;
-
-        TestScanColumn col = new TestScanColumn( uuid, buffer );
-
-        assertNull( col.getUUID() );
-
-        assertNull( col.getCursorValue() );
-    }
-
-
-
-
-    private class TestScanColumn extends AbstractScanColumn {
-
-        protected TestScanColumn( final UUID uuid, final ByteBuffer buffer ) {
-            super( uuid, buffer );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/InOrderIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/InOrderIterator.java b/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/InOrderIterator.java
deleted file mode 100644
index e937162..0000000
--- a/stack/core/src/test/java/org/apache/usergrid/persistence/query/ir/result/InOrderIterator.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-
-import org.junit.Ignore;
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-
-import com.google.common.collect.Iterables;
-
-
-/**
- * Simple iterator for testing that iterates UUIDs in the order returned
- *
- * @author tnine
- */
-@Ignore("not a test")
-public class InOrderIterator implements ResultIterator {
-
-    private LinkedHashSet<ScanColumn> uuids = new LinkedHashSet<ScanColumn>();
-    private Iterator<List<ScanColumn>> iterator;
-    private int pageSize = 1000;
-
-
-    public InOrderIterator( int pageSize ) {
-        this.pageSize = pageSize;
-    }
-
-
-    /** Add a uuid to the list */
-    public void add( UUID... ids ) {
-        for ( UUID current : ids ) {
-            uuids.add( new UUIDIndexSliceParser.UUIDColumn( current, ByteBuffer.allocate( 0 ) ) );
-        }
-    }
-
-
-    /*
-     * (non-Javadoc)
-     * 
-     * @see java.lang.Iterable#iterator()
-     */
-    @Override
-    public Iterator<Set<ScanColumn>> iterator() {
-        if ( iterator == null ) {
-            reset();
-        }
-
-        return this;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     * 
-     * @see java.util.Iterator#hasNext()
-     */
-    @Override
-    public boolean hasNext() {
-        if ( iterator == null ) {
-            reset();
-        }
-
-        return iterator.hasNext();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     * 
-     * @see java.util.Iterator#next()
-     */
-    @Override
-    public Set<ScanColumn> next() {
-        if ( iterator == null ) {
-            reset();
-        }
-
-        return new LinkedHashSet<ScanColumn>( iterator.next() );
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultIterator#reset()
-     */
-    @Override
-    public void reset() {
-        this.iterator = Iterables.partition( uuids, pageSize ).iterator();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     * 
-     * @see java.util.Iterator#remove()
-     */
-    @Override
-    public void remove() {
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultIterator#finalizeCursor(org.apache.usergrid.persistence.cassandra
-     * .CursorCache)
-     */
-    @Override
-    public void finalizeCursor( CursorCache cache, UUID lastLoaded ) {
-
-    }
-}


[08/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/EntityManagerImpl.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/EntityManagerImpl.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/EntityManagerImpl.java
deleted file mode 100644
index 24be179..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/EntityManagerImpl.java
+++ /dev/null
@@ -1,2937 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra;
-
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.UUID;
-
-import javax.annotation.Resource;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.context.ApplicationContext;
-import org.springframework.util.Assert;
-
-import org.apache.usergrid.persistence.IndexBucketLocator.IndexType;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_ID_SETS;
-
-import org.apache.usergrid.locking.Lock;
-import org.apache.usergrid.mq.Message;
-import org.apache.usergrid.mq.QueueManager;
-import org.apache.usergrid.mq.cassandra.QueueManagerFactoryImpl;
-import org.apache.usergrid.persistence.AggregateCounter;
-import org.apache.usergrid.persistence.AggregateCounterSet;
-import org.apache.usergrid.persistence.CollectionRef;
-import org.apache.usergrid.persistence.ConnectedEntityRef;
-import org.apache.usergrid.persistence.ConnectionRef;
-import org.apache.usergrid.persistence.DynamicEntity;
-import org.apache.usergrid.persistence.Entity;
-import org.apache.usergrid.persistence.EntityFactory;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.EntityManagerFactory;
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.IndexBucketLocator;
-import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.RoleRef;
-import org.apache.usergrid.persistence.Schema;
-import org.apache.usergrid.persistence.SimpleCollectionRef;
-import org.apache.usergrid.persistence.SimpleEntityRef;
-import org.apache.usergrid.persistence.SimpleRoleRef;
-import org.apache.usergrid.persistence.TypedEntity;
-import org.apache.usergrid.persistence.cassandra.CounterUtils.AggregateCounterSelection;
-import org.apache.usergrid.persistence.cassandra.util.TraceParticipant;
-import org.apache.usergrid.persistence.entities.Application;
-import org.apache.usergrid.persistence.entities.Event;
-import org.apache.usergrid.persistence.entities.Group;
-import org.apache.usergrid.persistence.entities.Role;
-import org.apache.usergrid.persistence.entities.User;
-import org.apache.usergrid.persistence.exceptions.DuplicateUniquePropertyExistsException;
-import org.apache.usergrid.persistence.exceptions.EntityNotFoundException;
-import org.apache.usergrid.persistence.exceptions.RequiredPropertyNotFoundException;
-import org.apache.usergrid.persistence.exceptions.UnexpectedEntityTypeException;
-import org.apache.usergrid.persistence.index.query.CounterResolution;
-import org.apache.usergrid.persistence.index.query.Identifier;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.index.query.Query.CounterFilterPredicate;
-import org.apache.usergrid.persistence.index.query.Query.Level;
-import org.apache.usergrid.persistence.schema.CollectionInfo;
-import org.apache.usergrid.utils.ClassUtils;
-import org.apache.usergrid.utils.CompositeUtils;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import com.google.common.collect.BiMap;
-import com.google.common.collect.HashBiMap;
-import com.yammer.metrics.annotation.Metered;
-
-import me.prettyprint.hector.api.Keyspace;
-import me.prettyprint.hector.api.beans.ColumnSlice;
-import me.prettyprint.hector.api.beans.CounterRow;
-import me.prettyprint.hector.api.beans.CounterRows;
-import me.prettyprint.hector.api.beans.CounterSlice;
-import me.prettyprint.hector.api.beans.DynamicComposite;
-import me.prettyprint.hector.api.beans.HColumn;
-import me.prettyprint.hector.api.beans.HCounterColumn;
-import me.prettyprint.hector.api.beans.Row;
-import me.prettyprint.hector.api.beans.Rows;
-import me.prettyprint.hector.api.factory.HFactory;
-import me.prettyprint.hector.api.mutation.Mutator;
-import me.prettyprint.hector.api.query.MultigetSliceCounterQuery;
-import me.prettyprint.hector.api.query.QueryResult;
-import me.prettyprint.hector.api.query.SliceCounterQuery;
-
-import static java.lang.String.CASE_INSENSITIVE_ORDER;
-import static java.util.Arrays.asList;
-
-import static me.prettyprint.hector.api.factory.HFactory.createCounterSliceQuery;
-
-import static org.apache.commons.lang.StringUtils.capitalize;
-import static org.apache.commons.lang.StringUtils.isBlank;
-import static org.apache.usergrid.locking.LockHelper.getUniqueUpdateLock;
-import static org.apache.usergrid.persistence.Results.fromEntities;
-import static org.apache.usergrid.persistence.Schema.COLLECTION_ROLES;
-import static org.apache.usergrid.persistence.Schema.COLLECTION_USERS;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_COLLECTIONS;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_PERMISSIONS;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_PROPERTIES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_ROLENAMES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_ROLETIMES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_SETS;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_ASSOCIATED;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_CREATED;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_INACTIVITY;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_MODIFIED;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_NAME;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_TIMESTAMP;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_TYPE;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_UUID;
-import static org.apache.usergrid.persistence.Schema.TYPE_APPLICATION;
-import static org.apache.usergrid.persistence.Schema.TYPE_CONNECTION;
-import static org.apache.usergrid.persistence.Schema.TYPE_ENTITY;
-import static org.apache.usergrid.persistence.Schema.TYPE_MEMBER;
-import static org.apache.usergrid.persistence.Schema.TYPE_ROLE;
-import static org.apache.usergrid.persistence.Schema.defaultCollectionName;
-import static org.apache.usergrid.persistence.Schema.deserializeEntityProperties;
-import static org.apache.usergrid.persistence.Schema.getDefaultSchema;
-import static org.apache.usergrid.persistence.SimpleEntityRef.getUuid;
-import static org.apache.usergrid.persistence.SimpleEntityRef.ref;
-import static org.apache.usergrid.persistence.SimpleRoleRef.getIdForGroupIdAndRoleName;
-import static org.apache.usergrid.persistence.SimpleRoleRef.getIdForRoleName;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.APPLICATION_AGGREGATE_COUNTERS;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_COMPOSITE_DICTIONARIES;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_COUNTERS;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_DICTIONARIES;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_PROPERTIES;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_UNIQUE;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.addDeleteToMutator;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.addInsertToMutator;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.addPropertyToMutator;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.batchExecute;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.toStorableBinaryValue;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.ALL_COUNT;
-import static org.apache.usergrid.persistence.cassandra.Serializers.be;
-import static org.apache.usergrid.persistence.cassandra.Serializers.le;
-import static org.apache.usergrid.persistence.cassandra.Serializers.se;
-import static org.apache.usergrid.persistence.cassandra.Serializers.ue;
-import org.apache.usergrid.persistence.core.util.Health;
-import org.apache.usergrid.persistence.hector.CountingMutator;
-import static org.apache.usergrid.persistence.index.query.Query.Level.REFS;
-import static org.apache.usergrid.utils.ClassUtils.cast;
-import static org.apache.usergrid.utils.ConversionUtils.bytebuffer;
-import static org.apache.usergrid.utils.ConversionUtils.getLong;
-import static org.apache.usergrid.utils.ConversionUtils.object;
-import static org.apache.usergrid.utils.ConversionUtils.string;
-import static org.apache.usergrid.utils.ConversionUtils.uuid;
-import static org.apache.usergrid.utils.InflectionUtils.singularize;
-import static org.apache.usergrid.utils.UUIDUtils.getTimestampInMicros;
-import static org.apache.usergrid.utils.UUIDUtils.getTimestampInMillis;
-import static org.apache.usergrid.utils.UUIDUtils.isTimeBased;
-import static org.apache.usergrid.utils.UUIDUtils.newTimeUUID;
-
-
-/**
- * Cassandra-specific implementation of Datastore
- *
- * @author edanuff
- * @author tnine
- */
-public class EntityManagerImpl implements EntityManager {
-
-    /** The log4j logger. */
-    private static final Logger logger = LoggerFactory.getLogger( EntityManagerImpl.class );
-    public static final String APPLICATION_COLLECTION = "application.collection.";
-    public static final String APPLICATION_ENTITIES = "application.entities";
-    public static final long ONE_COUNT = 1L;
-    @Resource
-    private EntityManagerFactoryImpl emf;
-    @Resource
-    private QueueManagerFactoryImpl qmf;
-    @Resource
-    private IndexBucketLocator indexBucketLocator;
-
-    private UUID applicationId;
-
-    private Application application;
-    @Resource
-    private CassandraService cass;
-    @Resource
-    private CounterUtils counterUtils;
-
-    private boolean skipAggregateCounters;
-
-    public EntityManagerImpl() {
-    }
-
-
-    @Override
-    public void init(EntityManagerFactory emf, UUID applicationId) {
-        init( (EntityManagerFactoryImpl)emf, null, null, applicationId, false);
-    }
-
-
-    public EntityManager init(
-            EntityManagerFactoryImpl emf, CassandraService cass, CounterUtils counterUtils,
-            UUID applicationId, boolean skipAggregateCounters ) {
-
-        this.emf = emf;
-        this.cass = cass;
-        this.counterUtils = counterUtils;
-        this.applicationId = applicationId;
-        this.skipAggregateCounters = skipAggregateCounters;
-        qmf = ( QueueManagerFactoryImpl ) getApplicationContext().getBean( "queueManagerFactory" );
-        indexBucketLocator = ( IndexBucketLocator ) getApplicationContext().getBean( "indexBucketLocator" );
-        // prime the application entity for the EM
-        try {
-            getApplication();
-        }
-        catch ( Exception ex ) {
-            ex.printStackTrace();
-        }
-        return this;
-    }
-
-
-    public void setApplicationId( UUID applicationId ) {
-        this.applicationId = applicationId;
-    }
-
-
-    public ApplicationContext getApplicationContext() {
-        return emf.applicationContext;
-    }
-
-
-    @Override
-    public EntityRef getApplicationRef() {
-        return ref( TYPE_APPLICATION, applicationId );
-    }
-
-
-    @Override
-    public Application getApplication() throws Exception {
-        if ( application == null ) {
-            application = get( applicationId, Application.class );
-        }
-        return application;
-    }
-
-
-    @Override
-    public void updateApplication( Application app ) throws Exception {
-        update( app );
-        this.application = app;
-    }
-
-
-    @Override
-    public void updateApplication( Map<String, Object> properties ) throws Exception {
-        this.updateProperties( applicationId, Application.ENTITY_TYPE, properties );
-        this.application = get( applicationId, Application.class );
-    }
-
-
-    @Override
-    public RelationManagerImpl getRelationManager( EntityRef entityRef ) {
-        //RelationManagerImpl rmi = applicationContext.getBean(RelationManagerImpl.class);
-        RelationManagerImpl rmi = new RelationManagerImpl();
-        rmi.init( this, cass, applicationId, entityRef, indexBucketLocator );
-        return rmi;
-    }
-
-
-    /**
-     * Batch dictionary property.
-     *
-     * @param batch The batch to set the property into
-     * @param entity The entity that owns the property
-     * @param propertyName the property name
-     * @param propertyValue the property value
-     * @param timestampUuid The update timestamp as a uuid
-     *
-     * @return batch
-     *
-     * @throws Exception the exception
-     */
-    public Mutator<ByteBuffer> batchSetProperty( Mutator<ByteBuffer> batch, EntityRef entity, String propertyName,
-                                                 Object propertyValue, UUID timestampUuid ) throws Exception {
-        return this.batchSetProperty( batch, entity, propertyName, propertyValue, false, false, timestampUuid );
-    }
-
-
-    public Mutator<ByteBuffer> batchSetProperty( Mutator<ByteBuffer> batch, EntityRef entity, String propertyName,
-                                                 Object propertyValue, boolean force, boolean noRead,
-                                                 UUID timestampUuid ) throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        // propertyName = propertyName.toLowerCase();
-
-        boolean entitySchemaHasProperty = getDefaultSchema().hasProperty( entity.getType(), propertyName );
-
-        propertyValue = getDefaultSchema().validateEntityPropertyValue( entity.getType(), propertyName, propertyValue );
-
-        Schema defaultSchema = Schema.getDefaultSchema();
-
-        if ( PROPERTY_TYPE.equalsIgnoreCase( propertyName ) && ( propertyValue != null ) ) {
-            if ( "entity".equalsIgnoreCase( propertyValue.toString() ) || "dynamicentity"
-                    .equalsIgnoreCase( propertyValue.toString() ) ) {
-                String errorMsg =
-                        "Unable to dictionary entity type to " + propertyValue + " because that is not a valid type.";
-                logger.error( errorMsg );
-                throw new IllegalArgumentException( errorMsg );
-            }
-        }
-
-        if ( entitySchemaHasProperty ) {
-
-            if ( !force ) {
-                if ( !defaultSchema.isPropertyMutable( entity.getType(), propertyName ) ) {
-                    return batch;
-                }
-
-                // Passing null for propertyValue indicates delete the property
-                // so if required property, exit
-                if ( ( propertyValue == null ) && defaultSchema.isRequiredProperty( entity.getType(), propertyName ) ) {
-                    return batch;
-                }
-            }
-
-
-            /**
-             * Unique property, load the old value and remove it, check if it's not a duplicate
-             */
-            if ( defaultSchema.getEntityInfo( entity.getType() ).isPropertyUnique( propertyName ) ) {
-
-                Lock lock = getUniqueUpdateLock( cass.getLockManager(), applicationId, propertyValue, entity.getType(),
-                        propertyName );
-
-                try {
-                    lock.lock();
-
-                    if ( !isPropertyValueUniqueForEntity( entity.getUuid(), entity.getType(), propertyName,
-                            propertyValue ) ) {
-                        throw new DuplicateUniquePropertyExistsException( entity.getType(), propertyName,
-                                propertyValue );
-                    }
-
-
-                    String collectionName = Schema.defaultCollectionName( entity.getType() );
-
-                    uniquePropertyDelete( batch, collectionName, entity.getType(), propertyName, propertyValue,
-                            entity.getUuid(), timestamp - 1 );
-                    uniquePropertyWrite( batch, collectionName, propertyName, propertyValue, entity.getUuid(),
-                            timestamp );
-                }
-                finally {
-                    lock.unlock();
-                }
-            }
-        }
-
-        if ( getDefaultSchema().isPropertyIndexed( entity.getType(), propertyName ) ) {
-            //this call is incorrect.  The current entity is NOT the head entity
-            getRelationManager( entity )
-                    .batchUpdatePropertyIndexes( batch, propertyName, propertyValue, entitySchemaHasProperty, noRead,
-                            timestampUuid );
-        }
-
-
-        if ( propertyValue != null ) {
-            // Set the new value
-            addPropertyToMutator( batch, key( entity.getUuid() ), entity.getType(), propertyName, propertyValue,
-                    timestamp );
-
-            if ( !entitySchemaHasProperty ) {
-                // Make a list of all the properties ever dictionary on this
-                // entity
-                addInsertToMutator( batch, ENTITY_DICTIONARIES, key( entity.getUuid(), DICTIONARY_PROPERTIES ),
-                        propertyName, null, timestamp );
-            }
-        }
-        else {
-            addDeleteToMutator( batch, ENTITY_PROPERTIES, key( entity.getUuid() ), propertyName, timestamp );
-        }
-
-        return batch;
-    }
-
-
-    /**
-     * Batch update properties.
-     *
-     * @param batch the batch
-     * @param entity The owning entity reference
-     * @param properties the properties to set
-     * @param timestampUuid the timestamp of the update operation as a time uuid
-     *
-     * @return batch
-     *
-     * @throws Exception the exception
-     */
-    public Mutator<ByteBuffer> batchUpdateProperties( Mutator<ByteBuffer> batch, EntityRef entity,
-                                                      Map<String, Object> properties, UUID timestampUuid )
-            throws Exception {
-
-        for ( String propertyName : properties.keySet() ) {
-            Object propertyValue = properties.get( propertyName );
-
-            batch = batchSetProperty( batch, entity, propertyName, propertyValue, timestampUuid );
-        }
-
-        return batch;
-    }
-
-
-    /**
-     * Batch update set.
-     *
-     * @param batch the batch
-     * @param entity The owning entity
-     * @param dictionaryName the dictionary name
-     * @param elementValue the dictionary value
-     * @param removeFromDictionary True to delete from the dictionary
-     * @param timestampUuid the timestamp
-     *
-     * @return batch
-     *
-     * @throws Exception the exception
-     */
-    public Mutator<ByteBuffer> batchUpdateDictionary( Mutator<ByteBuffer> batch, EntityRef entity,
-                                                      String dictionaryName, Object elementValue,
-                                                      boolean removeFromDictionary, UUID timestampUuid )
-            throws Exception {
-        return batchUpdateDictionary( batch, entity, dictionaryName, elementValue, null, removeFromDictionary,
-                timestampUuid );
-    }
-
-
-    public Mutator<ByteBuffer> batchUpdateDictionary( Mutator<ByteBuffer> batch, EntityRef entity,
-                                                      String dictionaryName, Object elementValue, Object elementCoValue,
-                                                      boolean removeFromDictionary, UUID timestampUuid )
-            throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        // dictionaryName = dictionaryName.toLowerCase();
-        if ( elementCoValue == null ) {
-            elementCoValue = ByteBuffer.allocate( 0 );
-        }
-
-        boolean entityHasDictionary = getDefaultSchema().hasDictionary( entity.getType(), dictionaryName );
-
-        // Don't index dynamic dictionaries not defined by the schema
-        if ( entityHasDictionary ) {
-            getRelationManager( entity )
-                    .batchUpdateSetIndexes( batch, dictionaryName, elementValue, removeFromDictionary, timestampUuid );
-        }
-
-        ApplicationCF dictionary_cf = entityHasDictionary ? ENTITY_DICTIONARIES : ENTITY_COMPOSITE_DICTIONARIES;
-
-        if ( elementValue != null ) {
-            if ( !removeFromDictionary ) {
-                // Set the new value
-
-                elementCoValue = toStorableBinaryValue( elementCoValue, !entityHasDictionary );
-
-                addInsertToMutator( batch, dictionary_cf, key( entity.getUuid(), dictionaryName ),
-                        entityHasDictionary ? elementValue : asList( elementValue ), elementCoValue, timestamp );
-
-                if ( !entityHasDictionary ) {
-                    addInsertToMutator( batch, ENTITY_DICTIONARIES, key( entity.getUuid(), DICTIONARY_SETS ),
-                            dictionaryName, null, timestamp );
-                }
-            }
-            else {
-                addDeleteToMutator( batch, dictionary_cf, key( entity.getUuid(), dictionaryName ),
-                        entityHasDictionary ? elementValue : asList( elementValue ), timestamp );
-            }
-        }
-
-        return batch;
-    }
-
-
-    /**
-     * Returns true if the property is unique, and the entity can be saved.  If it's not unique, false is returned
-     *
-     * @return True if this entity can safely "own" this property name and value unique combination
-     */
-    @Metered( group = "core", name = "EntityManager_isPropertyValueUniqueForEntity" )
-    public boolean isPropertyValueUniqueForEntity( UUID ownerEntityId, String entityType, String propertyName,
-                                                   Object propertyValue ) throws Exception {
-
-        if ( !getDefaultSchema().isPropertyUnique( entityType, propertyName ) ) {
-            return true;
-        }
-
-        if ( propertyValue == null ) {
-            return true;
-        }
-
-        /**
-         * Doing this in a loop sucks, but we need to account for possibly having more than 1 entry in the index due
-         * to corruption.  We need to allow them to update, otherwise
-         * both entities will be unable to update and must be deleted
-         */
-
-        Set<UUID> ownerEntityIds = getUUIDsForUniqueProperty(
-            new SimpleEntityRef(Application.ENTITY_TYPE, applicationId), entityType, propertyName, propertyValue );
-
-        //if there are no entities for this property, we know it's unique.  If there are,
-        // we have to make sure the one we were passed is in the set.  otherwise it belongs
-        //to a different entity
-        return ownerEntityIds.size() == 0 || ownerEntityIds.contains( ownerEntityId );
-    }
-
-
-    /**
-     * Return all UUIDs that have this unique value
-     *
-     * @param ownerEntityId The entity id that owns this entity collection
-     * @param collectionName The entity collection name
-     * @param propertyName The name of the unique property
-     * @param propertyValue The value of the unique property
-     */
-    private Set<UUID> getUUIDsForUniqueProperty(
-            EntityRef ownerEntityRef, String collectionName, String propertyName,
-            Object propertyValue ) throws Exception {
-
-
-        String collectionNameInternal = defaultCollectionName( collectionName );
-
-        Object key = createUniqueIndexKey(
-            ownerEntityRef.getUuid(), collectionNameInternal, propertyName, propertyValue );
-
-        List<HColumn<ByteBuffer, ByteBuffer>> cols = cass.getColumns(
-            cass.getApplicationKeyspace( applicationId ), ENTITY_UNIQUE, key, null, null, 2, false );
-
-
-        //No columns at all, it's unique
-        if ( cols.size() == 0 ) {
-            return Collections.emptySet();
-        }
-
-        //shouldn't happen, but it's an error case
-        if ( cols.size() > 1 ) {
-            logger.error( "INDEX CORRUPTION: More than 1 unique value exists for entities in "
-                + "ownerId {} of type {} on property {} with value {}",
-                new Object[] { ownerEntityRef, collectionNameInternal, propertyName, propertyValue } );
-        }
-
-        /**
-         * Doing this in a loop sucks, but we need to account for possibly having more than
-         * 1 entry in the index due
-         * to corruption.  We need to allow them to update, otherwise
-         * both entities will be unable to update and must be deleted
-         */
-
-        Set<UUID> results = new HashSet<UUID>( cols.size() );
-
-        for ( HColumn<ByteBuffer, ByteBuffer> col : cols ) {
-            results.add( ue.fromByteBuffer( col.getName() ) );
-        }
-
-        return results;
-    }
-
-
-    /** Add this unique index to the delete */
-    private void uniquePropertyDelete( Mutator<ByteBuffer> m, String collectionName, String entityType,
-        String propertyName, Object propertyValue, UUID entityId, long timestamp ) throws Exception {
-
-        //read the old value and delete it
-
-        Object oldValue = getProperty( new SimpleEntityRef( entityType, entityId ), propertyName );
-
-        //we have an old value.  If the new value is empty, we want to delete the old value.  If the new value is
-        // different we want to delete, otherwise we don't issue the delete
-        if ( oldValue != null && ( propertyValue == null || !oldValue.equals( propertyValue ) ) ) {
-            Object key = createUniqueIndexKey( applicationId, collectionName, propertyName, oldValue );
-
-            addDeleteToMutator( m, ENTITY_UNIQUE, key, timestamp, entityId );
-        }
-    }
-
-
-    /** Add this unique index to the delete */
-    private void uniquePropertyWrite( Mutator<ByteBuffer> m, String collectionName, String propertyName,
-                                      Object propertyValue, UUID entityId, long timestamp ) throws Exception {
-        Object key = createUniqueIndexKey( applicationId, collectionName, propertyName, propertyValue );
-
-        addInsertToMutator( m, ENTITY_UNIQUE, key, entityId, null, timestamp );
-    }
-
-
-    /**
-     * Create a row key for the entity of the given type with the name and value in the property.
-     * Used for fast unique
-     * index lookups
-     */
-    private Object createUniqueIndexKey(
-            UUID ownerId, String collectionName, String propertyName, Object value ) {
-        return key( ownerId, collectionName, propertyName, value );
-    }
-
-
-    @Override
-    @Metered( group = "core", name = "EntityManager_getAlias_single" )
-    public EntityRef getAlias( EntityRef ownerRef, String collectionType, String aliasValue )
-            throws Exception {
-
-        Assert.notNull( ownerRef, "ownerRef is required" );
-        Assert.notNull( collectionType, "collectionType is required" );
-        Assert.notNull( aliasValue, "aliasValue is required" );
-
-        Map<String, EntityRef> results = getAlias(
-                ownerRef, collectionType, Collections.singletonList( aliasValue ) );
-
-        if ( results == null || results.size() == 0 ) {
-            return null;
-        }
-
-        //add a warn statement so we can see if we have data migration issues.
-        //TODO When we get an event system, trigger a repair if this is detected
-        if ( results.size() > 1 ) {
-            logger.warn(
-                "More than 1 entity with Owner id '{}' of type '{}' and alias '{}' exists.  "
-              + "This is a duplicate alias, and needs audited",
-                    new Object[] { ownerRef, collectionType, aliasValue } );
-        }
-
-        return results.get( aliasValue );
-    }
-
-
-    @Override
-    public Map<String, EntityRef> getAlias( String aliasType, List<String> aliases ) throws Exception {
-        return getAlias( new SimpleEntityRef(Application.ENTITY_TYPE, applicationId), aliasType, aliases );
-    }
-
-
-    @Override
-    @Metered( group = "core", name = "EntityManager_getAlias_multi" )
-    public Map<String, EntityRef> getAlias(
-            EntityRef ownerRef, String collectionName, List<String> aliases ) throws Exception {
-
-        Assert.notNull( ownerRef, "ownerRef is required" );
-        Assert.notNull( collectionName, "collectionName is required" );
-        Assert.notEmpty( aliases, "aliases are required" );
-
-        String propertyName = Schema.getDefaultSchema().aliasProperty( collectionName );
-        String entityType = Schema.getDefaultSchema().getCollectionType(ownerRef.getType(), collectionName);
-
-        Map<String, EntityRef> results = new HashMap<String, EntityRef>();
-
-        for ( String alias : aliases ) {
-            for ( UUID id : getUUIDsForUniqueProperty( ownerRef, collectionName, propertyName, alias ) ) {
-                results.put( alias, new SimpleEntityRef( entityType, id ) );
-            }
-        }
-
-        return results;
-    }
-
-
-    @SuppressWarnings( "unchecked" )
-    @Override
-    public <A extends Entity> A create(
-            String entityType, Class<A> entityClass, Map<String, Object> properties )
-            throws Exception {
-
-        if ( ( entityType != null ) && ( entityType.startsWith( TYPE_ENTITY ) || entityType
-                .startsWith( "entities" ) ) ) {
-            throw new IllegalArgumentException( "Invalid entity type" );
-        }
-        A e = null;
-        try {
-            e = ( A ) create( entityType, ( Class<Entity> ) entityClass, properties, null );
-        }
-        catch ( ClassCastException e1 ) {
-            logger.error( "Unable to create typed entity", e1 );
-        }
-        return e;
-    }
-
-
-    @Override
-    public Entity create( UUID importId, String entityType, Map<String, Object> properties )
-            throws Exception {
-        return create( entityType, null, properties, importId );
-    }
-
-
-    @SuppressWarnings( "unchecked" )
-    @Override
-    public <A extends TypedEntity> A create( A entity ) throws Exception {
-        return ( A ) create( entity.getType(), entity.getClass(), entity.getProperties() );
-    }
-
-
-    @Override
-    @TraceParticipant
-    public Entity create( String entityType, Map<String, Object> properties ) throws Exception {
-        return create( entityType, null, properties );
-    }
-
-
-    /**
-     * Creates a new entity.
-     *
-     * @param entityType the entity type
-     * @param entityClass the entity class
-     * @param properties the properties
-     * @param importId an existing external uuid to use as the id for the new entity
-     *
-     * @return new entity
-     *
-     * @throws Exception the exception
-     */
-    @Metered( group = "core", name = "EntityManager_create" )
-    @TraceParticipant
-    public <A extends Entity> A create(
-            String entityType, Class<A> entityClass, Map<String, Object> properties,
-            UUID importId ) throws Exception {
-
-        UUID timestampUuid = newTimeUUID();
-
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-        Mutator<ByteBuffer> m = CountingMutator.createFlushingMutator( ko, be );
-        A entity = batchCreate( m, entityType, entityClass, properties, importId, timestampUuid );
-
-        batchExecute( m, CassandraService.RETRY_COUNT );
-
-        return entity;
-    }
-
-
-    @SuppressWarnings( "unchecked" )
-    @Metered( group = "core", name = "EntityManager_batchCreate" )
-    public <A extends Entity> A batchCreate(
-            Mutator<ByteBuffer> m, String entityType, Class<A> entityClass,
-            Map<String, Object> properties, UUID importId, UUID timestampUuid )
-            throws Exception {
-
-        String eType = Schema.normalizeEntityType( entityType );
-
-        Schema schema = getDefaultSchema();
-
-        boolean is_application = TYPE_APPLICATION.equals( eType );
-
-        if ( ( ( applicationId == null ) || applicationId.equals( UUIDUtils.ZERO_UUID ) ) && !is_application ) {
-            return null;
-        }
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        UUID itemId = null;
-
-        if ( is_application ) {
-            itemId = applicationId;
-        }
-        if ( importId != null ) {
-            itemId = importId;
-        }
-        if (itemId == null) {
-            itemId = UUIDUtils.newTimeUUID();
-        }
-        boolean emptyPropertyMap = false;
-        if ( properties == null ) {
-            properties = new TreeMap<String, Object>( CASE_INSENSITIVE_ORDER );
-        }
-        if ( properties.isEmpty() ) {
-            emptyPropertyMap = true;
-        }
-
-        if ( importId != null ) {
-            if ( isTimeBased( importId ) ) {
-                timestamp = UUIDUtils.getTimestampInMicros( importId );
-            }
-            else if ( properties.get( PROPERTY_CREATED ) != null ) {
-                timestamp = getLong( properties.get( PROPERTY_CREATED ) ) * 1000;
-            }
-        }
-
-        if ( entityClass == null ) {
-            entityClass = ( Class<A> ) Schema.getDefaultSchema().getEntityClass( entityType );
-        }
-
-        Set<String> required = schema.getRequiredProperties( entityType );
-
-        if ( required != null ) {
-            for ( String p : required ) {
-                if ( !PROPERTY_UUID.equals( p ) && !PROPERTY_TYPE.equals( p ) && !PROPERTY_CREATED.equals( p )
-                        && !PROPERTY_MODIFIED.equals( p ) ) {
-                    Object v = properties.get( p );
-                    if ( schema.isPropertyTimestamp( entityType, p ) ) {
-                        if ( v == null ) {
-                            properties.put( p, timestamp / 1000 );
-                        }
-                        else {
-                            long ts = getLong( v );
-                            if ( ts <= 0 ) {
-                                properties.put( p, timestamp / 1000 );
-                            }
-                        }
-                        continue;
-                    }
-                    if ( v == null ) {
-                        throw new RequiredPropertyNotFoundException( entityType, p );
-                    }
-                    else if ( ( v instanceof String ) && isBlank( ( String ) v ) ) {
-                        throw new RequiredPropertyNotFoundException( entityType, p );
-                    }
-                }
-            }
-        }
-
-        // Create collection name based on entity: i.e. "users"
-        String collection_name = Schema.defaultCollectionName( eType );
-        // Create collection key based collection name
-        String bucketId = indexBucketLocator.getBucket( applicationId, IndexType.COLLECTION, itemId, collection_name );
-
-        Object collection_key = key( applicationId, Schema.DICTIONARY_COLLECTIONS, collection_name, bucketId );
-
-        CollectionInfo collection = null;
-
-        if ( !is_application ) {
-            // Add entity to collection
-
-
-            if ( !emptyPropertyMap ) {
-                addInsertToMutator( m, ENTITY_ID_SETS, collection_key, itemId, null, timestamp );
-            }
-
-            // Add name of collection to dictionary property
-            // Application.collections
-            addInsertToMutator( m, ENTITY_DICTIONARIES, key( applicationId, Schema.DICTIONARY_COLLECTIONS ),
-                    collection_name, null, timestamp );
-
-            addInsertToMutator( m, ENTITY_COMPOSITE_DICTIONARIES, key( itemId, Schema.DICTIONARY_CONTAINER_ENTITIES ),
-                    asList( TYPE_APPLICATION, collection_name, applicationId ), null, timestamp );
-        }
-
-        if ( emptyPropertyMap ) {
-            return null;
-        }
-        properties.put( PROPERTY_UUID, itemId );
-        properties.put( PROPERTY_TYPE, Schema.normalizeEntityType( entityType, false ) );
-
-        if ( importId != null ) {
-            if ( properties.get( PROPERTY_CREATED ) == null ) {
-                properties.put( PROPERTY_CREATED, timestamp / 1000 );
-            }
-
-            if ( properties.get( PROPERTY_MODIFIED ) == null ) {
-                properties.put( PROPERTY_MODIFIED, timestamp / 1000 );
-            }
-        }
-        else {
-            properties.put( PROPERTY_CREATED, timestamp / 1000 );
-            properties.put( PROPERTY_MODIFIED, timestamp / 1000 );
-        }
-
-        // special case timestamp and published properties
-        // and dictionary their timestamp values if not set
-        // this is sure to break something for someone someday
-
-        if ( properties.containsKey( PROPERTY_TIMESTAMP ) ) {
-            long ts = getLong( properties.get( PROPERTY_TIMESTAMP ) );
-            if ( ts <= 0 ) {
-                properties.put( PROPERTY_TIMESTAMP, timestamp / 1000 );
-            }
-        }
-
-        A entity = EntityFactory.newEntity( itemId, eType, entityClass );
-        logger.info( "Entity created of type {}", entity.getClass().getName() );
-
-        if ( Event.ENTITY_TYPE.equals( eType ) ) {
-            Event event = ( Event ) entity.toTypedEntity();
-            for ( String prop_name : properties.keySet() ) {
-                Object propertyValue = properties.get( prop_name );
-                if ( propertyValue != null ) {
-                    event.setProperty( prop_name, propertyValue );
-                }
-            }
-            Message message = storeEventAsMessage( m, event, timestamp );
-            incrementEntityCollection( "events", timestamp );
-
-            entity.setUuid( message.getUuid() );
-            return entity;
-        }
-
-        for ( String prop_name : properties.keySet() ) {
-
-            Object propertyValue = properties.get( prop_name );
-
-            if ( propertyValue == null ) {
-                continue;
-            }
-
-
-            if ( User.ENTITY_TYPE.equals( entityType ) && "me".equals( prop_name ) ) {
-                throw new DuplicateUniquePropertyExistsException( entityType, prop_name, propertyValue );
-            }
-
-            entity.setProperty( prop_name, propertyValue );
-
-            batchSetProperty( m, entity, prop_name, propertyValue, true, true, timestampUuid );
-        }
-
-        if ( !is_application ) {
-            incrementEntityCollection( collection_name, timestamp );
-        }
-
-        return entity;
-    }
-
-
-    private void incrementEntityCollection( String collection_name, long cassandraTimestamp ) {
-        try {
-            incrementAggregateCounters( null, null, null, APPLICATION_COLLECTION + collection_name,
-                    ONE_COUNT, cassandraTimestamp );
-        }
-        catch ( Exception e ) {
-            logger.error( "Unable to increment counter application.collection: {}.", new Object[]{ collection_name, e} );
-        }
-        try {
-            incrementAggregateCounters( null, null, null, APPLICATION_ENTITIES, ONE_COUNT, cassandraTimestamp );
-        }
-        catch ( Exception e ) {
-            logger.error( "Unable to increment counter application.entities for collection: {} with timestamp: {}", new Object[]{collection_name, cassandraTimestamp,e} );
-        }
-    }
-
-
-    public void decrementEntityCollection( String collection_name ) {
-
-        long cassandraTimestamp = cass.createTimestamp();
-        decrementEntityCollection( collection_name, cassandraTimestamp );
-    }
-
-
-    public void decrementEntityCollection( String collection_name, long cassandraTimestamp ) {
-        try {
-            incrementAggregateCounters( null, null, null, APPLICATION_COLLECTION + collection_name, -ONE_COUNT,
-                    cassandraTimestamp );
-        }
-        catch ( Exception e ) {
-            logger.error( "Unable to decrement counter application.collection: {}.", new Object[]{collection_name, e} );
-        }
-        try {
-            incrementAggregateCounters( null, null, null, APPLICATION_ENTITIES, -ONE_COUNT, cassandraTimestamp );
-        }
-        catch ( Exception e ) {
-        	logger.error( "Unable to decrement counter application.entities for collection: {} with timestamp: {}", new Object[]{collection_name, cassandraTimestamp,e} );
-        }
-    }
-
-
-    @Metered( group = "core", name = "EntityManager_insertEntity" )
-    public void insertEntity( EntityRef entityRef ) throws Exception {
-
-        String type = entityRef.getType();
-        UUID entityId = entityRef.getUuid();
-
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-        Mutator<ByteBuffer> m = CountingMutator.createFlushingMutator( ko, be );
-
-        Object itemKey = key( entityId );
-
-        long timestamp = cass.createTimestamp();
-
-        addPropertyToMutator( m, itemKey, type, PROPERTY_UUID, entityId, timestamp );
-        addPropertyToMutator( m, itemKey, type, PROPERTY_TYPE, type, timestamp );
-
-        batchExecute( m, CassandraService.RETRY_COUNT );
-    }
-
-
-    public Message storeEventAsMessage( Mutator<ByteBuffer> m, Event event, long timestamp ) {
-
-        counterUtils.addEventCounterMutations( m, applicationId, event, timestamp );
-
-        QueueManager q = qmf.getQueueManager( applicationId );
-
-        Message message = new Message();
-        message.setType( "event" );
-        message.setCategory( event.getCategory() );
-        message.setStringProperty( "message", event.getMessage() );
-        message.setTimestamp( timestamp );
-        q.postToQueue( "events", message );
-
-        return message;
-    }
-
-
-    /**
-     * Gets the type.
-     *
-     * @param entityId the entity id
-     *
-     * @return entity type
-     *
-     * @throws Exception the exception
-     */
-    @Metered( group = "core", name = "EntityManager_getEntityType" )
-    public String getEntityType( UUID entityId ) throws Exception {
-
-        HColumn<String, String> column =
-                cass.getColumn( cass.getApplicationKeyspace( applicationId ), ENTITY_PROPERTIES, key( entityId ),
-                        PROPERTY_TYPE, se, se );
-        if ( column != null ) {
-            return column.getValue();
-        }
-        return null;
-    }
-
-
-    /**
-     * Gets the entity info. If no propertyNames are passed it loads the ENTIRE entity!
-     *
-     * @param entityId the entity id
-     * @param propertyNames the property names
-     *
-     * @return DynamicEntity object holding properties
-     *
-     * @throws Exception the exception
-     */
-    @Metered( group = "core", name = "EntityManager_loadPartialEntity" )
-    public DynamicEntity loadPartialEntity( UUID entityId, String... propertyNames ) throws Exception {
-
-        List<HColumn<String, ByteBuffer>> results = null;
-        if ( ( propertyNames != null ) && ( propertyNames.length > 0 ) ) {
-            Set<String> column_names = new TreeSet<String>( CASE_INSENSITIVE_ORDER );
-
-            column_names.add( PROPERTY_TYPE );
-            column_names.add( PROPERTY_UUID );
-
-            Collections.addAll(column_names, propertyNames);
-
-            results = cass.getColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_PROPERTIES, key( entityId ),
-                    column_names, se, be );
-        }
-        else {
-            results = cass.getAllColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_PROPERTIES,
-                    key( entityId ) );
-        }
-
-        Map<String, Object> entityProperties = deserializeEntityProperties( results );
-        if ( entityProperties == null ) {
-            return null;
-        }
-
-        String entityType = ( String ) entityProperties.get( PROPERTY_TYPE );
-        UUID id = ( UUID ) entityProperties.get( PROPERTY_UUID );
-
-        return new DynamicEntity( entityType, id, entityProperties );
-    }
-
-
-    /**
-     * Gets the specified entity.
-     *
-     * @param entityId the entity id
-     * @param entityClass the entity class
-     *
-     * @return entity
-     *
-     * @throws Exception the exception
-     */
-    public <A extends Entity> A getEntity( UUID entityId, Class<A> entityClass ) throws Exception {
-
-        Object entity_key = key( entityId );
-        Map<String, Object> results = null;
-
-        // if (entityType == null) {
-        results = deserializeEntityProperties( cass.getAllColumns(
-                cass.getApplicationKeyspace( applicationId ), ENTITY_PROPERTIES, entity_key ) );
-        // } else {
-        // Set<String> columnNames = Schema.getPropertyNames(entityType);
-        // results = getColumns(getApplicationKeyspace(applicationId),
-        // EntityCF.PROPERTIES, entity_key, columnNames, se, be);
-        // }
-
-        if ( results == null ) {
-            logger.warn( "getEntity(): No properties found for entity {}, "
-                    + "probably doesn't exist...", entityId );
-            return null;
-        }
-
-        UUID id = uuid( results.get( PROPERTY_UUID ) );
-        String type = string( results.get( PROPERTY_TYPE ) );
-
-        if ( !entityId.equals( id ) ) {
-
-            logger.error( "Expected entity id {}, found {}. Returning null entity",
-                    new Object[]{entityId, id, new Throwable()} );
-            return null;
-        }
-
-        A entity = EntityFactory.newEntity( id, type, entityClass );
-        entity.setProperties( results );
-
-        return entity;
-    }
-
-
-    /**
-     * Gets the specified list of entities.
-     *
-     * @param entityIds the entity ids
-     * @param entityClass the entity class
-     *
-     * @return entity
-     *
-     * @throws Exception the exception
-     */
-    @Metered( group = "core", name = "EntityManager_getEntities" )
-    public <A extends Entity> List<A> getEntities(
-            Collection<UUID> entityIds, Class<A> entityClass ) throws Exception {
-
-        List<A> entities = new ArrayList<A>();
-
-        if ( ( entityIds == null ) || ( entityIds.size() == 0 ) ) {
-            return entities;
-        }
-
-        Map<UUID, A> resultSet = new LinkedHashMap<UUID, A>();
-
-        Rows<UUID, String, ByteBuffer> results = null;
-
-        // if (entityType == null) {
-        results = cass.getRows( cass.getApplicationKeyspace( applicationId ),
-                ENTITY_PROPERTIES, entityIds, ue, se, be );
-        // } else {
-        // Set<String> columnNames = Schema.getPropertyNames(entityType);
-        // results = getRows(getApplicationKeyspace(applicationId),
-        // EntityCF.PROPERTIES,
-        // entityIds, columnNames, ue, se, be);
-        // }
-
-        if ( results != null ) {
-            for ( UUID key : entityIds ) {
-                Map<String, Object> properties = deserializeEntityProperties( results.getByKey( key ) );
-
-                if ( properties == null ) {
-                    logger.error( "Error deserializing entity with key {} entity probaby "
-                            + "doesn't exist, where did this key come from?", key );
-                    continue;
-                }
-
-                UUID id = uuid( properties.get( PROPERTY_UUID ) );
-                String type = string( properties.get( PROPERTY_TYPE ) );
-
-                if ( ( id == null ) || ( type == null ) ) {
-                    logger.error( "Error retrieving entity with key {}, no type or id deseriazable, where did this key come from?", key );
-                    continue;
-                }
-                A entity = EntityFactory.newEntity( id, type, entityClass );
-                entity.setProperties( properties );
-
-                resultSet.put( id, entity );
-            }
-
-            for ( UUID entityId : entityIds ) {
-                A entity = resultSet.get( entityId );
-                if ( entity != null ) {
-                    entities.add( entity );
-                }
-            }
-        }
-
-        return entities;
-    }
-
-
-    @Metered( group = "core", name = "EntityManager_getPropertyNames" )
-    public Set<String> getPropertyNames( EntityRef entity ) throws Exception {
-
-        Set<String> propertyNames = new TreeSet<String>( CASE_INSENSITIVE_ORDER );
-        List<HColumn<String, ByteBuffer>> results =
-                cass.getAllColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_DICTIONARIES,
-                        key( entity.getUuid(), DICTIONARY_PROPERTIES ) );
-        for ( HColumn<String, ByteBuffer> result : results ) {
-            String str = string( result.getName() );
-            if ( str != null ) {
-                propertyNames.add( str );
-            }
-        }
-
-        Set<String> schemaProperties = getDefaultSchema().getPropertyNames( entity.getType() );
-        if ( ( schemaProperties != null ) && !schemaProperties.isEmpty() ) {
-            propertyNames.addAll( schemaProperties );
-        }
-
-        return propertyNames;
-    }
-
-
-    @Metered( group = "core", name = "EntityManager_getDictionaryNames" )
-    public Set<String> getDictionaryNames( EntityRef entity ) throws Exception {
-
-        Set<String> dictionaryNames = new TreeSet<String>( CASE_INSENSITIVE_ORDER );
-        List<HColumn<String, ByteBuffer>> results =
-                cass.getAllColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_DICTIONARIES,
-                        key( entity.getUuid(), DICTIONARY_SETS ) );
-        for ( HColumn<String, ByteBuffer> result : results ) {
-            String str = string( result.getName() );
-            if ( str != null ) {
-                dictionaryNames.add( str );
-            }
-        }
-
-        Set<String> schemaSets = getDefaultSchema().getDictionaryNames( entity.getType() );
-        if ( ( schemaSets != null ) && !schemaSets.isEmpty() ) {
-            dictionaryNames.addAll( schemaSets );
-        }
-
-        return dictionaryNames;
-    }
-
-
-    @Override
-    @Metered( group = "core", name = "EntityManager_getDictionaryElementValue" )
-    public Object getDictionaryElementValue( EntityRef entity, String dictionaryName, String elementName )
-            throws Exception {
-
-        Object value = null;
-
-        ApplicationCF dictionaryCf = null;
-
-        boolean entityHasDictionary = getDefaultSchema().hasDictionary( entity.getType(), dictionaryName );
-
-        if ( entityHasDictionary ) {
-            dictionaryCf = ENTITY_DICTIONARIES;
-        }
-        else {
-            dictionaryCf = ENTITY_COMPOSITE_DICTIONARIES;
-        }
-
-        Class<?> dictionaryCoType = getDefaultSchema().getDictionaryValueType( entity.getType(), dictionaryName );
-        boolean coTypeIsBasic = ClassUtils.isBasicType( dictionaryCoType );
-
-        HColumn<ByteBuffer, ByteBuffer> result =
-                cass.getColumn( cass.getApplicationKeyspace( applicationId ), dictionaryCf,
-                        key( entity.getUuid(), dictionaryName ),
-                        entityHasDictionary ? bytebuffer( elementName ) : DynamicComposite.toByteBuffer( elementName ),
-                        be, be );
-        if ( result != null ) {
-            if ( entityHasDictionary && coTypeIsBasic ) {
-                value = object( dictionaryCoType, result.getValue() );
-            }
-            else if ( result.getValue().remaining() > 0 ) {
-                value = Schema.deserializePropertyValueFromJsonBinary( result.getValue().slice(), dictionaryCoType );
-            }
-        }
-        else {
-            logger.info( "Results of EntityManagerImpl.getDictionaryElementValue is null" );
-        }
-
-        return value;
-    }
-
-
-    @Metered( group = "core", name = "EntityManager_getDictionaryElementValues" )
-    public Map<String, Object> getDictionaryElementValues( EntityRef entity, String dictionaryName,
-                                                           String... elementNames ) throws Exception {
-
-        Map<String, Object> values = null;
-
-        ApplicationCF dictionaryCf = null;
-
-        boolean entityHasDictionary = getDefaultSchema().hasDictionary( entity.getType(), dictionaryName );
-
-        if ( entityHasDictionary ) {
-            dictionaryCf = ENTITY_DICTIONARIES;
-        }
-        else {
-            dictionaryCf = ENTITY_COMPOSITE_DICTIONARIES;
-        }
-
-        Class<?> dictionaryCoType = getDefaultSchema().getDictionaryValueType( entity.getType(), dictionaryName );
-        boolean coTypeIsBasic = ClassUtils.isBasicType( dictionaryCoType );
-
-        ByteBuffer[] columnNames = new ByteBuffer[elementNames.length];
-        for ( int i = 0; i < elementNames.length; i++ ) {
-            columnNames[i] = entityHasDictionary ? bytebuffer( elementNames[i] ) :
-                             DynamicComposite.toByteBuffer( elementNames[i] );
-        }
-
-        ColumnSlice<ByteBuffer, ByteBuffer> results =
-                cass.getColumns( cass.getApplicationKeyspace( applicationId ), dictionaryCf,
-                        key( entity.getUuid(), dictionaryName ), columnNames, be, be );
-        if ( results != null ) {
-            values = new HashMap<String, Object>();
-            for ( HColumn<ByteBuffer, ByteBuffer> result : results.getColumns() ) {
-                String name = entityHasDictionary ? string( result.getName() ) :
-                              DynamicComposite.fromByteBuffer( result.getName() ).get( 0, se );
-                if ( entityHasDictionary && coTypeIsBasic ) {
-                    values.put( name, object( dictionaryCoType, result.getValue() ) );
-                }
-                else if ( result.getValue().remaining() > 0 ) {
-                    values.put( name, Schema.deserializePropertyValueFromJsonBinary( result.getValue().slice(),
-                            dictionaryCoType ) );
-                }
-            }
-        }
-        else {
-            logger.error( "Results of EntityManagerImpl.getDictionaryElementValues is null" );
-        }
-
-        return values;
-    }
-
-
-    /**
-     * Gets the set.
-     *
-     * @param entity The owning entity
-     * @param dictionaryName the dictionary name
-     *
-     * @return contents of dictionary property
-     *
-     * @throws Exception the exception
-     */
-    @Override
-    @Metered( group = "core", name = "EntityManager_getDictionaryAsMap" )
-    public Map<Object, Object> getDictionaryAsMap( EntityRef entity, String dictionaryName ) throws Exception {
-
-        entity = validate( entity );
-
-        Map<Object, Object> dictionary = new LinkedHashMap<Object, Object>();
-
-        ApplicationCF dictionaryCf = null;
-
-        boolean entityHasDictionary = getDefaultSchema().hasDictionary( entity.getType(), dictionaryName );
-
-        if ( entityHasDictionary ) {
-            dictionaryCf = ENTITY_DICTIONARIES;
-        }
-        else {
-            dictionaryCf = ENTITY_COMPOSITE_DICTIONARIES;
-        }
-
-        Class<?> setType = getDefaultSchema().getDictionaryKeyType( entity.getType(), dictionaryName );
-        Class<?> setCoType = getDefaultSchema().getDictionaryValueType( entity.getType(), dictionaryName );
-        boolean coTypeIsBasic = ClassUtils.isBasicType( setCoType );
-
-        List<HColumn<ByteBuffer, ByteBuffer>> results =
-                cass.getAllColumns( cass.getApplicationKeyspace( applicationId ), dictionaryCf,
-                        key( entity.getUuid(), dictionaryName ), be, be );
-        for ( HColumn<ByteBuffer, ByteBuffer> result : results ) {
-            Object name = null;
-            if ( entityHasDictionary ) {
-                name = object( setType, result.getName() );
-            }
-            else {
-                name = CompositeUtils.deserialize( result.getName() );
-            }
-            Object value = null;
-            if ( entityHasDictionary && coTypeIsBasic ) {
-                value = object( setCoType, result.getValue() );
-            }
-            else if ( result.getValue().remaining() > 0 ) {
-                value = Schema.deserializePropertyValueFromJsonBinary( result.getValue().slice(), setCoType );
-            }
-            if ( name != null ) {
-                dictionary.put( name, value );
-            }
-        }
-
-        return dictionary;
-    }
-
-
-    @Override
-    public Set<Object> getDictionaryAsSet( EntityRef entity, String dictionaryName ) throws Exception {
-        return new LinkedHashSet<Object>( getDictionaryAsMap( entity, dictionaryName ).keySet() );
-    }
-
-
-    /**
-     * Update properties.
-     *
-     * @param entityId the entity id
-     * @param properties the properties
-     *
-     * @throws Exception the exception
-     */
-    @Metered( group = "core", name = "EntityManager_updateProperties" )
-    public void updateProperties(
-            UUID entityId, String type, Map<String, Object> properties ) throws Exception {
-
-        EntityRef entity = new SimpleEntityRef( type, entityId );
-
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-        Mutator<ByteBuffer> m = CountingMutator.createFlushingMutator( ko, be );
-
-        UUID timestampUuid = newTimeUUID();
-        properties.put( PROPERTY_MODIFIED, getTimestampInMillis( timestampUuid ) );
-
-        batchUpdateProperties( m, entity, properties, timestampUuid );
-
-        batchExecute( m, CassandraService.RETRY_COUNT );
-    }
-
-
-    @Metered( group = "core", name = "EntityManager_deleteEntity" )
-    public void deleteEntity( UUID entityId, String type ) throws Exception {
-
-        logger.info( "deleteEntity {} of application {}", entityId, applicationId );
-
-        EntityRef entity = new SimpleEntityRef( type, entityId );
-
-        logger.info( "deleteEntity: {} is of type {}", entityId, entity.getType() );
-
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-        Mutator<ByteBuffer> m = CountingMutator.createFlushingMutator( ko, be );
-
-        UUID timestampUuid = newTimeUUID();
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        // get all connections and disconnect them
-        getRelationManager( ref( type, entityId ) ).batchDisconnect( m, timestampUuid );
-
-        // delete all core properties and any dynamic property that's ever been
-        // dictionary for this entity
-        Set<String> properties = getPropertyNames( entity );
-        if ( properties != null ) {
-            for ( String propertyName : properties ) {
-                m = batchSetProperty( m, entity, propertyName, null, true, false, timestampUuid );
-            }
-        }
-
-        // delete any core dictionaries and dynamic dictionaries associated with
-        // this entity
-        Set<String> dictionaries = getDictionaryNames( entity );
-        if ( dictionaries != null ) {
-            for ( String dictionary : dictionaries ) {
-                Set<Object> values = getDictionaryAsSet( entity, dictionary );
-                if ( values != null ) {
-                    for ( Object value : values ) {
-                        batchUpdateDictionary( m, entity, dictionary, value, true, timestampUuid );
-                    }
-                }
-            }
-        }
-
-        // find all the containing collections
-        getRelationManager( entity ).batchRemoveFromContainers( m, timestampUuid );
-
-        //decrease entity count
-        if ( !TYPE_APPLICATION.equals( entity.getType() ) ) {
-            String collection_name = Schema.defaultCollectionName( entity.getType() );
-            decrementEntityCollection( collection_name );
-        }
-
-
-        timestamp += 1;
-
-        if ( dictionaries != null ) {
-            for ( String dictionary : dictionaries ) {
-
-                ApplicationCF cf =
-                        getDefaultSchema().hasDictionary( entity.getType(), dictionary ) ? ENTITY_DICTIONARIES :
-                        ENTITY_COMPOSITE_DICTIONARIES;
-
-                addDeleteToMutator( m, cf, key( entity.getUuid(), dictionary ), timestamp );
-            }
-        }
-
-        addDeleteToMutator( m, ENTITY_PROPERTIES, key( entityId ), timestamp );
-
-        batchExecute( m, CassandraService.RETRY_COUNT );
-    }
-
-
-    @Override
-    public void delete( EntityRef entityRef ) throws Exception {
-        deleteEntity( entityRef.getUuid(), entityRef.getType() );
-    }
-
-
-    public void batchCreateRole( Mutator<ByteBuffer> batch, UUID groupId, String roleName, String roleTitle,
-                                 long inactivity, RoleRef roleRef, UUID timestampUuid ) throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        if ( roleRef == null ) {
-            roleRef = new SimpleRoleRef( groupId, roleName );
-        }
-        if ( roleTitle == null ) {
-            roleTitle = roleRef.getRoleName();
-        }
-
-        EntityRef ownerRef = null;
-        if ( roleRef.getGroupId() != null ) {
-            ownerRef = new SimpleEntityRef( Group.ENTITY_TYPE, roleRef.getGroupId() );
-        }
-        else {
-            ownerRef = new SimpleEntityRef( Application.ENTITY_TYPE, applicationId );
-        }
-
-        Map<String, Object> properties = new TreeMap<String, Object>( CASE_INSENSITIVE_ORDER );
-        properties.put( PROPERTY_TYPE, Role.ENTITY_TYPE );
-        properties.put( "group", roleRef.getGroupId() );
-        properties.put( PROPERTY_NAME, roleRef.getApplicationRoleName() );
-        properties.put( "roleName", roleRef.getRoleName() );
-        properties.put( "title", roleTitle );
-        properties.put( PROPERTY_INACTIVITY, inactivity );
-
-        Entity role = batchCreate( batch, Role.ENTITY_TYPE, null, properties, roleRef.getUuid(), timestampUuid );
-
-        addInsertToMutator( batch, ENTITY_DICTIONARIES, key( ownerRef.getUuid(), Schema.DICTIONARY_ROLENAMES ),
-                roleRef.getRoleName(), roleTitle, timestamp );
-
-        addInsertToMutator( batch, ENTITY_DICTIONARIES, key( ownerRef.getUuid(), Schema.DICTIONARY_ROLETIMES ),
-                roleRef.getRoleName(), inactivity, timestamp );
-
-        addInsertToMutator( batch, ENTITY_DICTIONARIES, key( ownerRef.getUuid(), DICTIONARY_SETS ),
-                Schema.DICTIONARY_ROLENAMES, null, timestamp );
-
-        if ( roleRef.getGroupId() != null ) {
-            getRelationManager( ownerRef ).batchAddToCollection( batch, COLLECTION_ROLES, role, timestampUuid );
-        }
-    }
-
-
-    @Override
-    public EntityRef getUserByIdentifier( Identifier identifier ) throws Exception {
-        if ( identifier == null ) {
-            return null;
-        }
-        if ( identifier.isUUID() ) {
-            return new SimpleEntityRef( "user", identifier.getUUID() );
-        }
-        if ( identifier.isName() ) {
-            return this.getAlias(
-                    new SimpleEntityRef(Application.ENTITY_TYPE, applicationId),
-                    "user", identifier.getName() );
-        }
-        if ( identifier.isEmail() ) {
-
-            Query query = new Query();
-            query.setEntityType( "user" );
-            query.addEqualityFilter( "email", identifier.getEmail() );
-            query.setLimit( 1 );
-            query.setResultsLevel( REFS );
-
-            Results r = getRelationManager(
-                ref( Application.ENTITY_TYPE, applicationId ) ).searchCollection( "users", query );
-            if ( r != null && r.getRef() != null ) {
-                return r.getRef();
-            }
-            else {
-                // look-aside as it might be an email in the name field
-                return this.getAlias(
-                        new SimpleEntityRef(Application.ENTITY_TYPE, applicationId),
-                        "user", identifier.getEmail() );
-            }
-        }
-        return null;
-    }
-
-
-    @Override
-    public EntityRef getGroupByIdentifier( Identifier identifier ) throws Exception {
-        if ( identifier == null ) {
-            return null;
-        }
-        if ( identifier.isUUID() ) {
-            return new SimpleEntityRef( "group", identifier.getUUID() );
-        }
-        if ( identifier.isName() ) {
-            return this.getAlias(
-                    new SimpleEntityRef(Application.ENTITY_TYPE, applicationId),
-                    "group", identifier.getName() );
-        }
-        return null;
-    }
-
-
-    @Override
-    public Results getAggregateCounters( UUID userId, UUID groupId, String category,
-        String counterName, CounterResolution resolution, long start, long finish, boolean pad ) {
-
-        return this.getAggregateCounters(
-                userId, groupId, null, category, counterName, resolution, start, finish, pad );
-    }
-
-
-    @Override
-    @Metered( group = "core", name = "EntityManager_getAggregateCounters" )
-    public Results getAggregateCounters( UUID userId, UUID groupId, UUID queueId, String category,
-            String counterName, CounterResolution resolution, long start, long finish, boolean pad ) {
-
-        start = resolution.round( start );
-        finish = resolution.round( finish );
-        long expected_time = start;
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-        SliceCounterQuery<String, Long> q = createCounterSliceQuery( ko, se, le );
-        q.setColumnFamily( APPLICATION_AGGREGATE_COUNTERS.toString() );
-        q.setRange( start, finish, false, ALL_COUNT );
-
-        QueryResult<CounterSlice<Long>> r = q.setKey(
-                counterUtils.getAggregateCounterRow(
-                        counterName, userId, groupId, queueId, category, resolution ) ).execute();
-
-        List<AggregateCounter> counters = new ArrayList<AggregateCounter>();
-        for ( HCounterColumn<Long> column : r.get().getColumns() ) {
-            AggregateCounter count = new AggregateCounter( column.getName(), column.getValue() );
-            if ( pad && !( resolution == CounterResolution.ALL ) ) {
-                while ( count.getTimestamp() != expected_time ) {
-                    counters.add( new AggregateCounter( expected_time, 0 ) );
-                    expected_time = resolution.next( expected_time );
-                }
-                expected_time = resolution.next( expected_time );
-            }
-            counters.add( count );
-        }
-        if ( pad && !( resolution == CounterResolution.ALL ) ) {
-            while ( expected_time <= finish ) {
-                counters.add( new AggregateCounter( expected_time, 0 ) );
-                expected_time = resolution.next( expected_time );
-            }
-        }
-        return Results.fromCounters( new AggregateCounterSet( counterName, userId, groupId, category, counters ) );
-    }
-
-
-    @Override
-    @Metered( group = "core", name = "EntityManager_getAggregateCounters_fromQueryObj" )
-    public Results getAggregateCounters( Query query ) throws Exception {
-        CounterResolution resolution = query.getResolution();
-        if ( resolution == null ) {
-            resolution = CounterResolution.ALL;
-        }
-        long start = query.getStartTime() != null ? query.getStartTime() : 0;
-        long finish = query.getFinishTime() != null ? query.getFinishTime() : 0;
-        boolean pad = query.isPad();
-        if ( start <= 0 ) {
-            start = 0;
-        }
-        if ( ( finish <= 0 ) || ( finish < start ) ) {
-            finish = System.currentTimeMillis();
-        }
-        start = resolution.round( start );
-        finish = resolution.round( finish );
-        long expected_time = start;
-
-        if ( pad && ( resolution != CounterResolution.ALL ) ) {
-            long max_counters = ( finish - start ) / resolution.interval();
-            if ( max_counters > 1000 ) {
-                finish = resolution.round( start + ( resolution.interval() * 1000 ) );
-            }
-        }
-
-        List<CounterFilterPredicate> filters = query.getCounterFilters();
-        if ( filters == null ) {
-            return null;
-        }
-        Map<String, AggregateCounterSelection> selections = new HashMap<String, AggregateCounterSelection>();
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-
-        for ( CounterFilterPredicate filter : filters ) {
-            AggregateCounterSelection selection =
-                    new AggregateCounterSelection( filter.getName(), getUuid( getUserByIdentifier( filter.getUser() ) ),
-                            getUuid( getGroupByIdentifier( filter.getGroup() ) ),
-                            org.apache.usergrid.mq.Queue.getQueueId( filter.getQueue() ), filter.getCategory() );
-            selections.put( selection.getRow( resolution ), selection );
-        }
-
-        MultigetSliceCounterQuery<String, Long> q = HFactory.createMultigetSliceCounterQuery( ko, se, le );
-        q.setColumnFamily( APPLICATION_AGGREGATE_COUNTERS.toString() );
-        q.setRange( start, finish, false, ALL_COUNT );
-        QueryResult<CounterRows<String, Long>> rows = q.setKeys( selections.keySet() ).execute();
-
-        List<AggregateCounterSet> countSets = new ArrayList<AggregateCounterSet>();
-        for ( CounterRow<String, Long> r : rows.get() ) {
-            expected_time = start;
-            List<AggregateCounter> counters = new ArrayList<AggregateCounter>();
-            for ( HCounterColumn<Long> column : r.getColumnSlice().getColumns() ) {
-                AggregateCounter count = new AggregateCounter( column.getName(), column.getValue() );
-                if ( pad && ( resolution != CounterResolution.ALL ) ) {
-                    while ( count.getTimestamp() != expected_time ) {
-                        counters.add( new AggregateCounter( expected_time, 0 ) );
-                        expected_time = resolution.next( expected_time );
-                    }
-                    expected_time = resolution.next( expected_time );
-                }
-                counters.add( count );
-            }
-            if ( pad && ( resolution != CounterResolution.ALL ) ) {
-                while ( expected_time <= finish ) {
-                    counters.add( new AggregateCounter( expected_time, 0 ) );
-                    expected_time = resolution.next( expected_time );
-                }
-            }
-            AggregateCounterSelection selection = selections.get( r.getKey() );
-            countSets.add( new AggregateCounterSet( selection.getName(), selection.getUserId(), selection.getGroupId(),
-                    selection.getCategory(), counters ) );
-        }
-
-        Collections.sort( countSets, new Comparator<AggregateCounterSet>() {
-            @Override
-            public int compare( AggregateCounterSet o1, AggregateCounterSet o2 ) {
-                String s1 = o1.getName();
-                String s2 = o2.getName();
-                return s1.compareTo( s2 );
-            }
-        } );
-        return Results.fromCounters( countSets );
-    }
-
-
-    @Override
-    @Metered( group = "core", name = "EntityManager_getEntityCounters" )
-    public Map<String, Long> getEntityCounters( UUID entityId ) throws Exception {
-
-        Map<String, Long> counters = new HashMap<String, Long>();
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-        SliceCounterQuery<UUID, String> q = createCounterSliceQuery( ko, ue, se );
-        q.setColumnFamily( ENTITY_COUNTERS.toString() );
-        q.setRange( null, null, false, ALL_COUNT );
-        QueryResult<CounterSlice<String>> r = q.setKey( entityId ).execute();
-        for ( HCounterColumn<String> column : r.get().getColumns() ) {
-            counters.put( column.getName(), column.getValue() );
-        }
-        return counters;
-    }
-
-
-    @Override
-    public Map<String, Long> getApplicationCounters() throws Exception {
-        return getEntityCounters( applicationId );
-    }
-
-
-    @Override
-    @Metered( group = "core", name = "EntityManager_createApplicationCollection" )
-    public void createApplicationCollection( String entityType ) throws Exception {
-
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-        Mutator<ByteBuffer> m = CountingMutator.createFlushingMutator( ko, be );
-
-        long timestamp = cass.createTimestamp();
-
-        String collection_name = Schema.defaultCollectionName( entityType );
-        // Add name of collection to dictionary property Application.collections
-        addInsertToMutator( m, ENTITY_DICTIONARIES, key( applicationId, Schema.DICTIONARY_COLLECTIONS ),
-                collection_name, null, timestamp );
-
-        batchExecute( m, CassandraService.RETRY_COUNT );
-    }
-
-
-    @Override
-    public EntityRef getAlias( String aliasType, String alias ) throws Exception {
-        return getAlias( new SimpleEntityRef(Application.ENTITY_TYPE, applicationId), aliasType, alias );
-    }
-
-
-    @Override
-    public EntityRef validate( EntityRef entityRef ) throws Exception {
-        return validate( entityRef, true );
-    }
-
-
-    public EntityRef validate( EntityRef entityRef, boolean verify ) throws Exception {
-        if ( ( entityRef == null ) || ( entityRef.getUuid() == null ) ) {
-            if(verify){
-                throw new EntityNotFoundException( "An unknown entity cannot be verified" );
-            }
-            return null;
-        }
-        if ( ( entityRef.getType() == null ) || verify ) {
-            UUID entityId = entityRef.getUuid();
-            String entityType = entityRef.getType();
-            try {
-                get( entityRef ).getType();
-            }
-            catch ( Exception e ) {
-                logger.error( "Unable to load entity: {}", new Object[] {entityRef.getUuid(), e} );
-            }
-            if ( entityRef == null ) {
-                throw new EntityNotFoundException( "Entity " + entityId.toString() + " cannot be verified" );
-            }
-            if ( ( entityType != null ) && !entityType.equalsIgnoreCase( entityRef.getType() ) ) {
-                throw new UnexpectedEntityTypeException(
-                        "Entity " + entityId + " is not the expected type, expected " + entityType + ", found "
-                                + entityRef.getType() );
-            }
-        }
-        return entityRef;
-    }
-
-
-    public String getType( EntityRef entity ) throws Exception {
-        if ( entity.getType() != null ) {
-            return entity.getType();
-        }
-        return getEntityType( entity.getUuid() );
-    }
-
-
-    @Override
-    public Entity get(UUID id) throws Exception {
-        return getEntity( id, null );
-    }
-
-
-    @Override
-    public Entity get( EntityRef entityRef ) throws Exception {
-        if ( entityRef == null ) {
-            return null;
-        }
-        return getEntity( entityRef.getUuid(), null );
-    }
-
-
-    @Override
-    public <A extends Entity> A get( EntityRef entityRef, Class<A> entityClass ) throws Exception {
-        if ( entityRef == null ) {
-            return null;
-        }
-        return get( entityRef.getUuid(), entityClass );
-    }
-
-
-    @SuppressWarnings( "unchecked" )
-    @Override
-    public <A extends Entity> A get( UUID entityId, Class<A> entityClass ) throws Exception {
-        A e = null;
-        try {
-            e = ( A ) getEntity( entityId, ( Class<Entity> ) entityClass );
-        }
-        catch ( ClassCastException e1 ) {
-            logger.error( "Unable to get typed entity: {} of class {}",
-                    new Object[] {entityId, entityClass.getCanonicalName(), e1} );
-        }
-        return e;
-    }
-
-
-    @Override
-    public Results get( Collection<UUID> entityIds, Class<? extends Entity> entityClass,
-            Level resultsLevel ) throws Exception {
-        return fromEntities( getEntities( entityIds, entityClass ) );
-    }
-
-
-    @Override
-    public Results get( Collection<UUID> entityIds, String entityType, Class<? extends Entity> entityClass,
-                        Level resultsLevel ) throws Exception {
-        return fromEntities( getEntities( entityIds, entityClass ) );
-    }
-
-
-    public Results loadEntities( Results results, Level resultsLevel, int count ) throws Exception {
-        return loadEntities( results, resultsLevel, null, count );
-    }
-
-
-    public Results loadEntities( Results results, Level resultsLevel,
-            Map<UUID, UUID> associatedMap, int count ) throws Exception {
-
-        results = results.trim( count );
-        if ( resultsLevel.ordinal() <= results.getLevel().ordinal() ) {
-            return results;
-        }
-
-        results.setEntities( getEntities( results.getIds(), (Class)null ) );
-
-        if ( resultsLevel == Level.LINKED_PROPERTIES ) {
-            List<Entity> entities = results.getEntities();
-            BiMap<UUID, UUID> associatedIds = null;
-
-            if ( associatedMap != null ) {
-                associatedIds = HashBiMap.create( associatedMap );
-            }
-            else {
-                associatedIds = HashBiMap.create( entities.size() );
-                for ( Entity entity : entities ) {
-                    Object id = entity.getMetadata( PROPERTY_ASSOCIATED );
-                    if ( id instanceof UUID ) {
-                        associatedIds.put( entity.getUuid(), ( UUID ) id );
-                    }
-                }
-            }
-            List<DynamicEntity> linked = getEntities(
-                    new ArrayList<UUID>( associatedIds.values() ), (Class)null );
-
-            for ( DynamicEntity l : linked ) {
-                Map<String, Object> p = l.getDynamicProperties();
-                if ( ( p != null ) && ( p.size() > 0 ) ) {
-                    Entity e = results.getEntitiesMap().get( associatedIds.inverse().get( l.getUuid() ) );
-                    if ( l.getType().endsWith( TYPE_MEMBER ) ) {
-                        e.setProperty( TYPE_MEMBER, p );
-                    }
-                    else if ( l.getType().endsWith( TYPE_CONNECTION ) ) {
-                        e.setProperty( TYPE_CONNECTION, p );
-                    }
-                }
-            }
-        }
-
-        return results;
-    }
-
-
-    @Override
-    public void update( Entity entity ) throws Exception {
-        updateProperties( entity.getUuid(), entity.getType(), entity.getProperties() );
-    }
-
-
-    @Override
-    public Object getProperty( EntityRef entityRef, String propertyName ) throws Exception {
-        Entity entity = loadPartialEntity( entityRef.getUuid(), propertyName );
-
-        if ( entity == null ) {
-            return null;
-        }
-
-        return entity.getProperty( propertyName );
-    }
-
-
-    @Override
-    public Map<String, Object> getProperties( EntityRef entityRef ) throws Exception {
-        Entity entity = loadPartialEntity( entityRef.getUuid() );
-        Map<String, Object> props = entity.getProperties();
-        return props;
-    }
-
-
-    @Override
-    public List<Entity> getPartialEntities( Collection<UUID> ids, Collection<String> fields ) throws Exception {
-
-        List<Entity> entities = new ArrayList<Entity>( ids.size() );
-
-        if ( ids == null || ids.size() == 0 ) {
-            return entities;
-        }
-
-        fields.add( PROPERTY_UUID );
-        fields.add( PROPERTY_TYPE );
-
-        Rows<UUID, String, ByteBuffer> results = null;
-
-        results = cass.getRows( cass.getApplicationKeyspace( applicationId ), ENTITY_PROPERTIES, ids, fields, ue, se,
-                be );
-
-        if ( results == null ) {
-            return entities;
-        }
-
-        for ( Row<UUID, String, ByteBuffer> row : results ) {
-
-
-            Map<String, Object> properties =
-                    deserializeEntityProperties( results.getByKey( row.getKey() ).getColumnSlice().getColumns(), true,
-                            false );
-
-            //Could get a tombstoned row if the index is behind, just ignore it
-            if ( properties == null ) {
-                logger.warn( "Received row key {} with no type or properties, skipping", row.getKey() );
-                continue;
-            }
-
-            UUID id = uuid( properties.get( PROPERTY_UUID ) );
-            String type = string( properties.get( PROPERTY_TYPE ) );
-
-            if ( id == null || type == null ) {
-                logger.warn( "Error retrieving entity with key {} no type or id deseriazable, skipping", row.getKey() );
-                continue;
-            }
-
-            Entity entity = EntityFactory.newEntity( id, type );
-            entity.setProperties( properties );
-
-            entities.add( entity );
-        }
-
-
-        return entities;
-    }
-
-
-    @Override
-    public void setProperty( EntityRef entityRef, String propertyName, Object propertyValue ) throws Exception {
-
-        setProperty( entityRef, propertyName, propertyValue, false );
-    }
-
-
-    @Override
-    public void setProperty( EntityRef entityRef, String propertyName, Object propertyValue, boolean override )
-            throws Exception {
-
-        if ( ( propertyValue instanceof String ) && ( ( String ) propertyValue ).equals( "" ) ) {
-            propertyValue = null;
-        }
-
-        // todo: would this ever need to load more?
-        DynamicEntity entity = loadPartialEntity( entityRef.getUuid(), propertyName );
-
-        UUID timestampUuid = newTimeUUID();
-        Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator( cass.getApplicationKeyspace( applicationId ),
-                be );
-
-        propertyValue = getDefaultSchema().validateEntityPropertyValue( entity.getType(), propertyName, propertyValue );
-
-        entity.setProperty( propertyName, propertyValue );
-        batch = batchSetProperty( batch, entity, propertyName, propertyValue, override, false, timestampUuid );
-        batchExecute( batch, CassandraService.RETRY_COUNT );
-    }
-
-
-    @Override
-    public void updateProperties( EntityRef entityRef, Map<String, Object> properties ) throws Exception {
-        entityRef = validate( entityRef );
-        properties = getDefaultSchema().cleanUpdatedProperties( entityRef.getType(), properties, false );
-        updateProperties( entityRef.getUuid(), entityRef.getType(), properties );
-    }
-
-
-    @Override
-    public void addToDictionary( EntityRef entityRef, String dictionaryName, Object elementValue ) throws Exception {
-        addToDictionary( entityRef, dictionaryName, elementValue, null );
-    }
-
-
-    @Override
-    public void addToDictionary( EntityRef entityRef, String dictionaryName, Object elementValue,
-                                 Object elementCoValue ) throws Exception {
-
-        if ( elementValue == null ) {
-            return;
-        }
-
-        EntityRef entity = get( entityRef );
-
-        UUID timestampUuid = newTimeUUID();
-        Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator( cass.getApplicationKeyspace( applicationId ),
-                be );
-
-        batch = batchUpdateDictionary( batch, entity, dictionaryName, elementValue, elementCoValue, false,
-                timestampUuid );
-
-        batchExecute( batch, CassandraService.RETRY_COUNT );
-    }
-
-
-    @Override
-    public void addSetToDictionary( EntityRef entityRef, String dictionaryName, Set<?> elementValues )
-            throws Exception {
-
-        if ( ( elementValues == null ) || elementValues.isEmpty() ) {
-            return;
-        }
-
-        EntityRef entity = get( entityRef );
-
-        UUID timestampUuid = newTimeUUID();
-        Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator( cass.getApplicationKeyspace( applicationId ),
-                be );
-
-        for ( Object elementValue : elementValues ) {
-            batch = batchUpdateDictionary( batch, entity, dictionaryName, elementValue, null, false, timestampUuid );
-        }
-
-        batchExecute( batch, CassandraService.RETRY_COUNT );
-    }
-
-
-    @Override
-    public void addMapToDictionary( EntityRef entityRef, String dictionaryName, Map<?, ?> elementValues )
-            throws Exception {
-
-        if ( ( elementValues == null ) || elementValues.isEmpty() || entityRef == null ) {
-            return;
-        }
-
-        EntityRef entity = get( entityRef );
-
-        UUID timestampUuid = newTimeUUID();
-        Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator( cass.getApplicationKeyspace( applicationId ),
-                be );
-
-        for ( Map.Entry<?, ?> elementValue : elementValues.entrySet() ) {
-            batch = batchUpdateDictionary( batch, entity, dictionaryName, elementValue.getKey(),
-                    elementValue.getValue(), false, timestampUuid );
-        }
-
-        batchExecute( batch, CassandraService.RETRY_COUNT );
-    }
-
-
-    @Override
-    public void removeFromDictionary( EntityRef entityRef, String dictionaryName, Object elementValue )
-            throws Exception {
-
-        if ( elementValue == null ) {
-            return;
-        }
-
-        EntityRef entity = get( entityRef );
-
-        UUID timestampUuid = newTimeUUID();
-        Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator( cass.getApplicationKeyspace( applicationId ),
-                be );
-
-        batch = batchUpdateDictionary( batch, entity, dictionaryName, elementValue, true, timestampUuid );
-
-        batchExecute( batch, CassandraService.RETRY_COUNT );
-    }
-
-
-    @Override
-    public Set<String> getDictionaries( EntityRef entity ) throws Exception {
-        return getDictionaryNames( entity );
-    }
-
-
-    @Override
-    public void deleteProperty( EntityRef entityRef, String propertyName ) throws Exception {
-        setProperty( entityRef, propertyName, null );
-    }
-
-
-    @Override
-    public Set<String> getApplicationCollections() throws Exception {
-        Set<String> collections = new TreeSet<String>( CASE_INSENSITIVE_ORDER );
-        Set<String> dynamic_collections = cast( getDictionaryAsSet( getApplicationRef(), DICTIONARY_COLLECTIONS ) );
-        if ( dynamic_collections != null ) {
-            for ( String collection : dynamic_collections ) {
-                if ( !Schema.isAssociatedEntityType( collection ) ) {
-                    collections.add( collection );
-                }
-            }
-        }
-        Set<String> system_collections = getDefaultSchema().getCollectionNames( Application.ENTITY_TYPE );
-        if ( system_collections != null ) {
-            for ( String collection : system_collections ) {
-                if ( !Schema.isAssociatedEntityType( collection ) ) {
-                    collections.add( collection );
-                }
-            }
-        }
-        return collections;
-    }
-
-
-    @Override
-    public long getApplicationCollectionSize( String collectionName ) throws Exception {
-        Long count = null;
-        if ( !Schema.isAssociatedEntityType( collectionName ) ) {
-            Map<String, Long> counts = getApplicationCounters();
-            count = counts.get( new String( APPLICATION_COLLECTION + collectionName ) );
-        }
-        return count != null ? count : 0;
-    }
-
-
-    @Override
-    public Map<String, Object> getApplicationCollectionMetadata() throws Exception {
-        Set<String> collections = getApplicationCollections();
-        Map<String, Long> counts = getApplicationCounters();
-        Map<String, Object> metadata = new HashMap<String, Object>();
-        if ( collections != null ) {
-            for ( String collectionName : collections ) {
-                if ( !Schema.isAssociatedEntityType( collectionName ) ) {
-                    Long count = counts.get( APPLICATION_COLLE

<TRUNCATED>

[07/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/GeoIndexManager.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/GeoIndexManager.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/GeoIndexManager.java
deleted file mode 100644
index 4d696ae..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/GeoIndexManager.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra;
-
-
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.UUID;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.IndexBucketLocator;
-import org.apache.usergrid.persistence.IndexBucketLocator.IndexType;
-import org.apache.usergrid.persistence.geo.EntityLocationRef;
-import org.apache.usergrid.persistence.geo.GeocellManager;
-import org.apache.usergrid.persistence.geo.model.Point;
-import org.apache.usergrid.persistence.hector.CountingMutator;
-
-import me.prettyprint.cassandra.serializers.ByteBufferSerializer;
-import me.prettyprint.hector.api.Keyspace;
-import me.prettyprint.hector.api.beans.DynamicComposite;
-import me.prettyprint.hector.api.beans.HColumn;
-import me.prettyprint.hector.api.mutation.Mutator;
-
-import static me.prettyprint.hector.api.factory.HFactory.createColumn;
-import static me.prettyprint.hector.api.factory.HFactory.createMutator;
-import org.apache.usergrid.persistence.EntityManager;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_GEOCELL;
-import static org.apache.usergrid.persistence.Schema.INDEX_CONNECTIONS;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_INDEX;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.addInsertToMutator;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.batchExecute;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.logBatchOperation;
-import static org.apache.usergrid.utils.ConversionUtils.bytebuffer;
-
-
-public class GeoIndexManager {
-
-    private static final Logger logger = LoggerFactory.getLogger( GeoIndexManager.class );
-
-    /**
-     * We only ever go down to max resolution of 9 because we use bucket hashing. Every level divides the region by
-     * 1/16. Our original "box" is 90 degrees by 45 degrees. We therefore have 90 * (1/16)^(r-1) and 45 * (1/16)^(r-1)
-     * for our size where r is the largest bucket resolution. This gives us a size of 90 deg => 0.0000000209547 deg =
-     * .2cm and 45 deg => 0.00000001047735 deg = .1 cm
-     */
-    public static final int MAX_RESOLUTION = 9;
-
-
-    EntityManager em;
-    CassandraService cass;
-
-
-    public GeoIndexManager() {
-    }
-
-
-    public GeoIndexManager init( EntityManager em ) {
-        this.em = em;
-        this.cass = em.getCass();
-        return this;
-    }
-
-
-    public static Mutator<ByteBuffer> addLocationEntryInsertionToMutator( Mutator<ByteBuffer> m, Object key,
-                                                                          EntityLocationRef entry ) {
-
-        DynamicComposite columnName = entry.getColumnName();
-        DynamicComposite columnValue = entry.getColumnValue();
-        long ts = entry.getTimestampInMicros();
-
-        logBatchOperation( "Insert", ENTITY_INDEX, key, columnName, columnValue, ts );
-
-        HColumn<ByteBuffer, ByteBuffer> column =
-                createColumn( columnName.serialize(), columnValue.serialize(), ts, ByteBufferSerializer.get(),
-                        ByteBufferSerializer.get() );
-        m.addInsertion( bytebuffer( key ), ENTITY_INDEX.toString(), column );
-
-        return m;
-    }
-
-
-    private static Mutator<ByteBuffer> batchAddConnectionIndexEntries( Mutator<ByteBuffer> m,
-                                                                       IndexBucketLocator locator, UUID appId,
-                                                                       String propertyName, String geoCell,
-                                                                       UUID[] index_keys, ByteBuffer columnName,
-                                                                       ByteBuffer columnValue, long timestamp ) {
-
-        // entity_id,prop_name
-        Object property_index_key =
-                key( index_keys[ConnectionRefImpl.ALL], INDEX_CONNECTIONS, propertyName, DICTIONARY_GEOCELL, geoCell,
-                        locator.getBucket( appId, IndexType.CONNECTION, index_keys[ConnectionRefImpl.ALL], geoCell ) );
-
-        // entity_id,entity_type,prop_name
-        Object entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], INDEX_CONNECTIONS, propertyName, DICTIONARY_GEOCELL,
-                        geoCell,
-                        locator.getBucket( appId, IndexType.CONNECTION, index_keys[ConnectionRefImpl.BY_ENTITY_TYPE],
-                                geoCell ) );
-
-        // entity_id,connection_type,prop_name
-        Object connection_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], INDEX_CONNECTIONS, propertyName,
-                        DICTIONARY_GEOCELL, geoCell, locator.getBucket( appId, IndexType.CONNECTION,
-                        index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], geoCell ) );
-
-        // entity_id,connection_type,entity_type,prop_name
-        Object connection_type_and_entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], INDEX_CONNECTIONS, propertyName,
-                        DICTIONARY_GEOCELL, geoCell, locator.getBucket( appId, IndexType.CONNECTION,
-                        index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], geoCell ) );
-
-        // composite(property_value,connected_entity_id,connection_type,entity_type,entry_timestamp)
-        addInsertToMutator( m, ENTITY_INDEX, property_index_key, columnName, columnValue, timestamp );
-
-        // composite(property_value,connected_entity_id,connection_type,entry_timestamp)
-        addInsertToMutator( m, ENTITY_INDEX, entity_type_prop_index_key, columnName, columnValue, timestamp );
-
-        // composite(property_value,connected_entity_id,entity_type,entry_timestamp)
-        addInsertToMutator( m, ENTITY_INDEX, connection_type_prop_index_key, columnName, columnValue, timestamp );
-
-        // composite(property_value,connected_entity_id,entry_timestamp)
-        addInsertToMutator( m, ENTITY_INDEX, connection_type_and_entity_type_prop_index_key, columnName, columnValue,
-                timestamp );
-
-        return m;
-    }
-
-
-    public static void batchStoreLocationInConnectionsIndex( Mutator<ByteBuffer> m, IndexBucketLocator locator,
-                                                             UUID appId, UUID[] index_keys, String propertyName,
-                                                             EntityLocationRef location ) {
-
-        logger.debug("batchStoreLocationInConnectionsIndex");
-
-        Point p = location.getPoint();
-        List<String> cells = GeocellManager.generateGeoCell( p );
-
-        ByteBuffer columnName = location.getColumnName().serialize();
-        ByteBuffer columnValue = location.getColumnValue().serialize();
-        long ts = location.getTimestampInMicros();
-        for ( String cell : cells ) {
-            batchAddConnectionIndexEntries( m, locator, appId, propertyName, cell, index_keys, columnName, columnValue,
-                    ts );
-        }
-
-        logger.info( "Geocells to be saved for Point({} , {} ) are: {}", new Object[] {
-                location.getLatitude(), location.getLongitude(), cells
-        } );
-    }
-
-
-    private static Mutator<ByteBuffer> addLocationEntryDeletionToMutator( Mutator<ByteBuffer> m, Object key,
-                                                                          EntityLocationRef entry ) {
-
-        DynamicComposite columnName = entry.getColumnName();
-        long ts = entry.getTimestampInMicros();
-
-        logBatchOperation( "Delete", ENTITY_INDEX, key, columnName, null, ts );
-
-        m.addDeletion( bytebuffer( key ), ENTITY_INDEX.toString(), columnName.serialize(), ByteBufferSerializer.get(),
-                ts + 1 );
-
-        return m;
-    }
-
-
-    private static Mutator<ByteBuffer> batchDeleteConnectionIndexEntries( Mutator<ByteBuffer> m,
-                                                                          IndexBucketLocator locator, UUID appId,
-                                                                          String propertyName, String geoCell,
-                                                                          UUID[] index_keys, ByteBuffer columnName,
-                                                                          long timestamp ) {
-
-        // entity_id,prop_name
-        Object property_index_key =
-                key( index_keys[ConnectionRefImpl.ALL], INDEX_CONNECTIONS, propertyName, DICTIONARY_GEOCELL, geoCell,
-                        locator.getBucket( appId, IndexType.CONNECTION, index_keys[ConnectionRefImpl.ALL], geoCell ) );
-
-        // entity_id,entity_type,prop_name
-        Object entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], INDEX_CONNECTIONS, propertyName, DICTIONARY_GEOCELL,
-                        geoCell,
-                        locator.getBucket( appId, IndexType.CONNECTION, index_keys[ConnectionRefImpl.BY_ENTITY_TYPE],
-                                geoCell ) );
-
-        // entity_id,connection_type,prop_name
-        Object connection_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], INDEX_CONNECTIONS, propertyName,
-                        DICTIONARY_GEOCELL, geoCell, locator.getBucket( appId, IndexType.CONNECTION,
-                        index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], geoCell ) );
-
-        // entity_id,connection_type,entity_type,prop_name
-        Object connection_type_and_entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], INDEX_CONNECTIONS, propertyName,
-                        DICTIONARY_GEOCELL, geoCell, locator.getBucket( appId, IndexType.CONNECTION,
-                        index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], geoCell ) );
-
-        // composite(property_value,connected_entity_id,connection_type,entity_type,entry_timestamp)
-        m.addDeletion( bytebuffer( property_index_key ), ENTITY_INDEX.toString(), columnName,
-                ByteBufferSerializer.get(), timestamp );
-
-        // composite(property_value,connected_entity_id,connection_type,entry_timestamp)
-        m.addDeletion( bytebuffer( entity_type_prop_index_key ), ENTITY_INDEX.toString(), columnName,
-                ByteBufferSerializer.get(), timestamp );
-
-        // composite(property_value,connected_entity_id,entity_type,entry_timestamp)
-        m.addDeletion( bytebuffer( connection_type_prop_index_key ), ENTITY_INDEX.toString(), columnName,
-                ByteBufferSerializer.get(), timestamp );
-
-        // composite(property_value,connected_entity_id,entry_timestamp)
-        m.addDeletion( bytebuffer( connection_type_and_entity_type_prop_index_key ), ENTITY_INDEX.toString(),
-                columnName, ByteBufferSerializer.get(), timestamp );
-
-        return m;
-    }
-
-
-    public static void batchDeleteLocationInConnectionsIndex( Mutator<ByteBuffer> m, IndexBucketLocator locator,
-                                                              UUID appId, UUID[] index_keys, String propertyName,
-                                                              EntityLocationRef location ) {
-
-        logger.debug("batchDeleteLocationInConnectionsIndex");
-
-        Point p = location.getPoint();
-        List<String> cells = GeocellManager.generateGeoCell( p );
-
-        ByteBuffer columnName = location.getColumnName().serialize();
-
-        long ts = location.getTimestampInMicros();
-
-        for ( String cell : cells ) {
-
-            batchDeleteConnectionIndexEntries( m, locator, appId, propertyName, cell, index_keys, columnName, ts );
-        }
-
-        logger.info( "Geocells to be saved for Point({} , {} ) are: {}", new Object[] {
-                location.getLatitude(), location.getLongitude(), cells
-        } );
-    }
-
-
-    public static void batchStoreLocationInCollectionIndex( Mutator<ByteBuffer> m, IndexBucketLocator locator,
-                                                            UUID appId, Object key, UUID entityId,
-                                                            EntityLocationRef location ) {
-
-        Point p = location.getPoint();
-        List<String> cells = GeocellManager.generateGeoCell( p );
-
-        for ( int i = 0; i < MAX_RESOLUTION; i++ ) {
-            String cell = cells.get( i );
-
-            String indexBucket = locator.getBucket( appId, IndexType.GEO, entityId, cell );
-
-            addLocationEntryInsertionToMutator( m, key( key, DICTIONARY_GEOCELL, cell, indexBucket ), location );
-        }
-
-        if ( logger.isInfoEnabled() ) {
-            logger.info( "Geocells to be saved for Point({},{}) are: {}", new Object[] {
-                    location.getLatitude(), location.getLongitude(), cells
-            } );
-        }
-    }
-
-
-    public void storeLocationInCollectionIndex( EntityRef owner, String collectionName, UUID entityId,
-                                                String propertyName, EntityLocationRef location ) {
-
-        Keyspace ko = cass.getApplicationKeyspace( em.getApplicationId() );
-        Mutator<ByteBuffer> m = CountingMutator.createFlushingMutator( ko, ByteBufferSerializer.get() );
-
-        batchStoreLocationInCollectionIndex( m, em.getIndexBucketLocator(), em.getApplicationId(),
-                key( owner.getUuid(), collectionName, propertyName ), owner.getUuid(), location );
-
-        batchExecute( m, CassandraService.RETRY_COUNT );
-    }
-
-
-    public static void batchRemoveLocationFromCollectionIndex( Mutator<ByteBuffer> m, IndexBucketLocator locator,
-                                                               UUID appId, Object key, EntityLocationRef location ) {
-
-        Point p = location.getPoint();
-        List<String> cells = GeocellManager.generateGeoCell( p );
-
-        // delete for every bucket in every resolution
-        for ( int i = 0; i < MAX_RESOLUTION; i++ ) {
-
-            String cell = cells.get( i );
-
-            for ( String indexBucket : locator.getBuckets( appId, IndexType.GEO, cell ) ) {
-
-                addLocationEntryDeletionToMutator( m, key( key, DICTIONARY_GEOCELL, cell, indexBucket ), location );
-            }
-        }
-
-        if ( logger.isInfoEnabled() ) {
-            logger.info( "Geocells to be deleted for Point({},{}) are: {}", new Object[] {
-                    location.getLatitude(), location.getLongitude(), cells
-            } );
-        }
-    }
-
-
-    public void removeLocationFromCollectionIndex( EntityRef owner, String collectionName, String propertyName,
-                                                   EntityLocationRef location ) {
-
-        Keyspace ko = cass.getApplicationKeyspace( em.getApplicationId() );
-        Mutator<ByteBuffer> m = CountingMutator.createFlushingMutator( ko, ByteBufferSerializer.get() );
-
-        batchRemoveLocationFromCollectionIndex( m, em.getIndexBucketLocator(), em.getApplicationId(),
-                key( owner.getUuid(), collectionName, propertyName ), location );
-
-        batchExecute( m, CassandraService.RETRY_COUNT );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/IndexUpdate.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/IndexUpdate.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/IndexUpdate.java
index 2634b5d..882170d 100644
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/IndexUpdate.java
+++ b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/IndexUpdate.java
@@ -45,6 +45,8 @@ import static org.apache.usergrid.utils.JsonUtils.toJsonNode;
 import static org.apache.usergrid.utils.UUIDUtils.getTimestampInMicros;
 
 
+//"Once queues are removed, remove this"
+@Deprecated()
 public class IndexUpdate {
 
     private static final Logger logger = LoggerFactory.getLogger( IndexUpdate.class );

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/QueryProcessor.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/QueryProcessor.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/QueryProcessor.java
deleted file mode 100644
index edd9fc5..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/QueryProcessor.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2014 The Apache Software Foundation.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.usergrid.persistence.cassandra;
-
-import java.nio.ByteBuffer;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.query.ir.QueryNode;
-import org.apache.usergrid.persistence.query.ir.QuerySlice;
-import org.apache.usergrid.persistence.query.ir.SearchVisitor;
-import org.apache.usergrid.persistence.schema.CollectionInfo;
-
-import me.prettyprint.cassandra.serializers.UUIDSerializer;
-
-public interface QueryProcessor {
-    int PAGE_SIZE = 1000;
-
-    /**
-     * Apply cursor position and sort order to this slice. This should only be invoke 
-     * at evaluation time to ensure that the IR tree has already been fully constructed
-     */
-    void applyCursorAndSort(QuerySlice slice);
-
-    CollectionInfo getCollectionInfo();
-
-    /**
-     * Return the node id from the cursor cache
-     */
-    ByteBuffer getCursorCache(int nodeId);
-
-    EntityManager getEntityManager();
-
-    QueryNode getFirstNode();
-
-    /** @return the pageSizeHint */
-    int getPageSizeHint(QueryNode node);
-
-    Query getQuery();
-
-    /** Return the iterator results, ordered if required */
-    Results getResults(SearchVisitor visitor) throws Exception;
-
-    void setQuery(Query query);
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/QueryProcessorImpl.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/QueryProcessorImpl.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/QueryProcessorImpl.java
deleted file mode 100644
index 874ff88..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/QueryProcessorImpl.java
+++ /dev/null
@@ -1,727 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra;
-
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Stack;
-import java.util.UUID;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.Schema;
-import static org.apache.usergrid.persistence.Schema.getDefaultSchema;
-import org.apache.usergrid.persistence.entities.User;
-import org.apache.usergrid.persistence.exceptions.PersistenceException;
-import org.apache.usergrid.persistence.index.exceptions.IndexException;
-import org.apache.usergrid.persistence.index.exceptions.NoFullTextIndexException;
-import org.apache.usergrid.persistence.index.exceptions.NoIndexException;
-import org.apache.usergrid.persistence.index.query.Identifier;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.index.query.Query.SortDirection;
-import org.apache.usergrid.persistence.index.query.Query.SortPredicate;
-import org.apache.usergrid.persistence.index.query.tree.AndOperand;
-import org.apache.usergrid.persistence.index.query.tree.ContainsOperand;
-import org.apache.usergrid.persistence.index.query.tree.Equal;
-import org.apache.usergrid.persistence.index.query.tree.EqualityOperand;
-import org.apache.usergrid.persistence.index.query.tree.GreaterThan;
-import org.apache.usergrid.persistence.index.query.tree.GreaterThanEqual;
-import org.apache.usergrid.persistence.index.query.tree.LessThan;
-import org.apache.usergrid.persistence.index.query.tree.LessThanEqual;
-import org.apache.usergrid.persistence.index.query.tree.Literal;
-import org.apache.usergrid.persistence.index.query.tree.NotOperand;
-import org.apache.usergrid.persistence.index.query.tree.Operand;
-import org.apache.usergrid.persistence.index.query.tree.OrOperand;
-import org.apache.usergrid.persistence.index.query.tree.QueryVisitor;
-import org.apache.usergrid.persistence.index.query.tree.StringLiteral;
-import org.apache.usergrid.persistence.index.query.tree.WithinOperand;
-import org.apache.usergrid.persistence.query.ir.AllNode;
-import org.apache.usergrid.persistence.query.ir.AndNode;
-import org.apache.usergrid.persistence.query.ir.EmailIdentifierNode;
-import org.apache.usergrid.persistence.query.ir.NameIdentifierNode;
-import org.apache.usergrid.persistence.query.ir.NotNode;
-import org.apache.usergrid.persistence.query.ir.OrNode;
-import org.apache.usergrid.persistence.query.ir.OrderByNode;
-import org.apache.usergrid.persistence.query.ir.QueryNode;
-import org.apache.usergrid.persistence.query.ir.QuerySlice;
-import org.apache.usergrid.persistence.query.ir.SearchVisitor;
-import org.apache.usergrid.persistence.query.ir.SliceNode;
-import org.apache.usergrid.persistence.query.ir.UuidIdentifierNode;
-import org.apache.usergrid.persistence.query.ir.WithinNode;
-import org.apache.usergrid.persistence.query.ir.result.ResultIterator;
-import org.apache.usergrid.persistence.query.ir.result.ResultsLoader;
-import org.apache.usergrid.persistence.query.ir.result.ResultsLoaderFactory;
-import org.apache.usergrid.persistence.query.ir.result.ScanColumn;
-import org.apache.usergrid.persistence.schema.CollectionInfo;
-import org.elasticsearch.index.query.FilterBuilder;
-import org.elasticsearch.index.query.QueryBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-public class QueryProcessorImpl implements QueryProcessor {
-
-    public static final int PAGE_SIZE = 1000;
-    private static final Logger logger = LoggerFactory.getLogger( QueryProcessor.class );
-
-    private static final Schema SCHEMA = getDefaultSchema();
-
-    private final CollectionInfo collectionInfo;
-    private final EntityManager em;
-    private final ResultsLoaderFactory loaderFactory;
-
-    private Operand rootOperand;
-    private List<SortPredicate> sorts;
-    private CursorCache cursorCache;
-    private QueryNode rootNode;
-    private String entityType;
-
-    private int size;
-    private Query query;
-    private int pageSizeHint;
-
-
-    public QueryProcessorImpl( Query query, CollectionInfo collectionInfo, EntityManager em,
-            ResultsLoaderFactory loaderFactory ) throws PersistenceException {
-        setQuery( query );
-        this.collectionInfo = collectionInfo;
-        this.em = em;
-        this.loaderFactory = loaderFactory;
-        process();
-    }
-
-
-    public Query getQuery() {
-        return query;
-    }
-
-
-    public void setQuery( Query query ) {
-        this.sorts = query.getSortPredicates();
-        this.cursorCache = new CursorCache( query.getCursor() );
-        this.rootOperand = query.getRootOperand();
-        this.entityType = query.getEntityType();
-        this.size = query.getLimit();
-        this.query = query;
-    }
-
-
-    public CollectionInfo getCollectionInfo() {
-        return collectionInfo;
-    }
-
-
-    private void process() throws PersistenceException {
-
-        int opCount = 0;
-
-        // no operand. Check for sorts
-        if ( rootOperand != null ) {
-            // visit the tree
-
-            TreeEvaluator visitor = new TreeEvaluator();
-
-            rootOperand.visit( visitor );
-
-            rootNode = visitor.getRootNode();
-
-            opCount = visitor.getSliceCount();
-        }
-
-        // see if we have sorts, if so, we can add them all as a single node at
-        // the root
-        if ( sorts.size() > 0 ) {
-
-            OrderByNode order = generateSorts( opCount );
-
-            opCount += order.getFirstPredicate().getAllSlices().size();
-
-            rootNode = order;
-        }
-
-
-        //if we still don't have a root node, no query nor order by was specified,
-        // just use the all node or the identifiers
-        if ( rootNode == null ) {
-
-
-            //a name alias or email alias was specified
-            if ( query.containsSingleNameOrEmailIdentifier() ) {
-
-                Identifier ident = query.getSingleIdentifier();
-
-                //an email was specified.  An edge case that only applies to users.  This is fulgy to put here,
-                // but required
-                if ( query.getEntityType().equals( User.ENTITY_TYPE ) && ident.isEmail() ) {
-                    rootNode = new EmailIdentifierNode( ident );
-                }
-
-                //use the ident with the default alias.  could be an email
-                else {
-                    rootNode = new NameIdentifierNode( ident.getName() );
-                }
-            }
-            //a uuid was specified
-            else if ( query.containsSingleUuidIdentifier() ) {
-                rootNode = new UuidIdentifierNode( query.getSingleUuidIdentifier() );
-            }
-
-
-            //nothing was specified, order it by uuid
-            else {
-
-
-                //this is a bit ugly, but how we handle the start parameter
-                UUID startResult = query.getStartResult();
-
-                boolean startResultSet = startResult != null;
-
-                AllNode allNode = new AllNode( 0, startResultSet );
-
-                if ( startResultSet ) {
-                    cursorCache.setNextCursor( allNode.getSlice().hashCode(),
-                            Serializers.ue.toByteBuffer( startResult ) );
-                }
-
-                rootNode = allNode;
-            }
-        }
-
-        if ( opCount > 1 ) {
-            pageSizeHint = PAGE_SIZE;
-        }
-        else {
-            pageSizeHint = Math.min( size, PAGE_SIZE );
-        }
-    }
-
-
-    public QueryNode getFirstNode() {
-        return rootNode;
-    }
-
-
-    /**
-     * Apply cursor position and sort order to this slice. This should only be invoke at evaluation time to ensure that
-     * the IR tree has already been fully constructed
-     */
-    public void applyCursorAndSort( QuerySlice slice ) {
-        // apply the sort first, since this can change the hash code
-        SortPredicate sort = getSort( slice.getPropertyName() );
-
-        if ( sort != null ) {
-            boolean isReversed = sort.getDirection() == SortDirection.DESCENDING;
-
-            //we're reversing the direction of this slice, reverse the params as well
-            if ( isReversed != slice.isReversed() ) {
-                slice.reverse();
-            }
-        }
-        // apply the cursor
-        ByteBuffer cursor = cursorCache.getCursorBytes( slice.hashCode() );
-
-        if ( cursor != null ) {
-            slice.setCursor( cursor );
-        }
-    }
-
-
-    /**
-     * Return the node id from the cursor cache
-     * @param nodeId
-     * @return
-     */
-    public ByteBuffer getCursorCache(int nodeId){
-        return cursorCache.getCursorBytes( nodeId );
-    }
-
-
-    private SortPredicate getSort( String propertyName ) {
-        for ( SortPredicate sort : sorts ) {
-            if ( sort.getPropertyName().equals( propertyName ) ) {
-                return sort;
-            }
-        }
-        return null;
-    }
-
-
-    /** Return the iterator results, ordered if required */
-    public Results getResults( SearchVisitor visitor ) throws Exception {
-        // if we have no order by just load the results
-
-        if ( rootNode == null ) {
-            return null;
-        }
-
-        rootNode.visit( visitor );
-
-        ResultIterator itr = visitor.getResults();
-
-        List<ScanColumn> entityIds = new ArrayList<ScanColumn>( Math.min( size, Query.MAX_LIMIT ) );
-
-        CursorCache resultsCursor = new CursorCache();
-
-        while ( entityIds.size() < size && itr.hasNext() ) {
-            entityIds.addAll( itr.next() );
-        }
-
-        //set our cursor, we paged through more entities than we want to return
-        if ( entityIds.size() > 0 ) {
-            int resultSize = Math.min( entityIds.size(), size );
-            entityIds = entityIds.subList( 0, resultSize );
-
-            if ( resultSize == size ) {
-                itr.finalizeCursor( resultsCursor, entityIds.get( resultSize - 1 ).getUUID() );
-            }
-        }
-        if (logger.isDebugEnabled()) {
-        	logger.debug("Getting result for query: [{}],  returning entityIds size: {}", 
-                    getQuery(), entityIds.size());
-        }
-
-        final ResultsLoader loader = loaderFactory.getResultsLoader( em, query, query.getResultsLevel() );
-        final Results results = loader.getResults( entityIds, query.getEntityType() );
-
-        if ( results == null ) {
-            return null;
-        }
-
-        // now we need to set the cursor from our tree evaluation for return
-        results.setCursor( resultsCursor.asString() );
-
-        results.setQuery( query );
-        results.setQueryProcessor( this );
-        results.setSearchVisitor( visitor );
-
-        return results;
-    }
-
-
-    private class TreeEvaluator implements QueryVisitor {
-
-        // stack for nodes that will be used to construct the tree and create
-        // objects
-        private CountingStack<QueryNode> nodes = new CountingStack<QueryNode>();
-
-
-        private int contextCount = -1;
-
-
-        /** Get the root node in our tree for runtime evaluation */
-        public QueryNode getRootNode() {
-            return nodes.peek();
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.AndOperand)
-         */
-        @Override
-        public void visit( AndOperand op ) throws IndexException {
-
-            op.getLeft().visit( this );
-
-            QueryNode leftResult = nodes.peek();
-
-            op.getRight().visit( this );
-
-            QueryNode rightResult = nodes.peek();
-
-            // if the result of the left and right are the same, we don't want
-            // to create an AND. We'll use the same SliceNode. Do nothing
-            if ( leftResult == rightResult ) {
-                return;
-            }
-
-            // otherwise create a new AND node from the result of the visit
-
-            QueryNode right = nodes.pop();
-            QueryNode left = nodes.pop();
-
-            AndNode newNode = new AndNode( left, right );
-
-            nodes.push( newNode );
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.OrOperand)
-         */
-        @Override
-        public void visit( OrOperand op ) throws IndexException {
-
-            // we need to create a new slicenode for the children of this
-            // operation
-
-            Operand left = op.getLeft();
-            Operand right = op.getRight();
-
-            // we only create a new slice node if our children are && and ||
-            // operations
-            createNewSlice( left );
-
-            left.visit( this );
-
-            // we only create a new slice node if our children are && and ||
-            // operations
-            createNewSlice( right );
-
-            right.visit( this );
-
-            QueryNode rightResult = nodes.pop();
-            QueryNode leftResult = nodes.pop();
-
-            // rewrite with the new Or operand
-            OrNode orNode = new OrNode( leftResult, rightResult,  ++contextCount );
-
-            nodes.push( orNode );
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.NotOperand)
-         */
-        @Override
-        public void visit( NotOperand op ) throws IndexException {
-
-            // create a new context since any child of NOT will need to be
-            // evaluated independently
-            Operand child = op.getOperation();
-            createNewSlice( child );
-            child.visit( this );
-
-            nodes.push( new NotNode( nodes.pop(), new AllNode( ++contextCount, false ) ) );
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.ContainsOperand)
-         */
-        @Override
-        public void visit( ContainsOperand op ) throws NoFullTextIndexException {
-
-            String propertyName = op.getProperty().getValue();
-
-            if ( !SCHEMA.isPropertyFulltextIndexed( entityType, propertyName ) ) {
-                throw new NoFullTextIndexException( entityType, propertyName );
-            }
-
-            StringLiteral string = op.getString();
-
-            String indexName = op.getProperty().getIndexedValue();
-
-            SliceNode node = null;
-
-            // sdg - if left & right have same field name, we need to create a new
-            // slice
-            if ( !nodes.isEmpty() && nodes.peek() instanceof SliceNode
-                    && ( ( SliceNode ) nodes.peek() ).getSlice( indexName ) != null ) {
-                node = newSliceNode();
-            }
-            else {
-                node = getUnionNode( op );
-            }
-
-            String fieldName = op.getProperty().getIndexedValue();
-
-            node.setStart( fieldName, string.getValue(), true );
-            node.setFinish( fieldName, string.getEndValue(), true );
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.WithinOperand)
-         */
-        @Override
-        public void visit( WithinOperand op ) {
-
-            // change the property name to coordinates
-            nodes.push( new WithinNode( op.getProperty().getIndexedName(), op.getDistance().getFloatValue(),
-                    op.getLatitude().getFloatValue(), op.getLongitude().getFloatValue(), ++contextCount ) );
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.LessThan)
-         */
-        @Override
-        public void visit( LessThan op ) throws NoIndexException {
-            String propertyName = op.getProperty().getValue();
-
-            checkIndexed( propertyName );
-
-            getUnionNode( op ).setFinish( propertyName, op.getLiteral().getValue(), false );
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.LessThanEqual)
-         */
-        @Override
-        public void visit( LessThanEqual op ) throws NoIndexException {
-
-            String propertyName = op.getProperty().getValue();
-
-            checkIndexed( propertyName );
-
-            getUnionNode( op ).setFinish( propertyName, op.getLiteral().getValue(), true );
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.Equal)
-         */
-        @Override
-        public void visit( Equal op ) throws NoIndexException {
-            String fieldName = op.getProperty().getValue();
-
-            checkIndexed( fieldName );
-
-            Literal<?> literal = op.getLiteral();
-            SliceNode node = getUnionNode( op );
-
-            // this is an edge case. If we get more edge cases, we need to push
-            // this down into the literals and let the objects
-            // handle this
-            if ( literal instanceof StringLiteral ) {
-
-                StringLiteral stringLiteral = ( StringLiteral ) literal;
-
-                String endValue = stringLiteral.getEndValue();
-
-                if ( endValue != null ) {
-                    node.setFinish( fieldName, endValue, true );
-                }
-            }
-            else {
-                node.setFinish( fieldName, literal.getValue(), true );
-            }
-
-            node.setStart( fieldName, literal.getValue(), true );
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.GreaterThan)
-         */
-        @Override
-        public void visit( GreaterThan op ) throws NoIndexException {
-            String propertyName = op.getProperty().getValue();
-
-            checkIndexed( propertyName );
-
-            getUnionNode( op ).setStart( propertyName, op.getLiteral().getValue(), false );
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see org.apache.usergrid.persistence.query.tree.QueryVisitor#visit(org.apache.usergrid
-         * .persistence.query.tree.GreaterThanEqual)
-         */
-        @Override
-        public void visit( GreaterThanEqual op ) throws NoIndexException {
-            String propertyName = op.getProperty().getValue();
-
-            checkIndexed( propertyName );
-
-            getUnionNode( op ).setStart( propertyName, op.getLiteral().getValue(), true );
-        }
-
-
-        /**
-         * Return the current leaf node to add to if it exists. This means that we can compress multiple 'AND'
-         * operations and ranges into a single node. Otherwise a new node is created and pushed to the stack
-         *
-         * @param current The current operand node
-         */
-        private SliceNode getUnionNode( EqualityOperand current ) {
-
-            /**
-             * we only create a new slice node in 3 situations 1. No nodes exist 2.
-             * The parent node is not an AND node. Meaning we can't add this slice to
-             * the current set of slices 3. Our current top of stack is not a slice
-             * node.
-             */
-            // no nodes exist
-            if ( nodes.size() == 0 || !( nodes.peek() instanceof SliceNode ) ) {
-                return newSliceNode();
-            }
-
-            return ( SliceNode ) nodes.peek();
-        }
-
-
-        /** The new slice node */
-        private SliceNode newSliceNode() {
-            SliceNode sliceNode = new SliceNode( ++contextCount );
-
-            nodes.push( sliceNode );
-
-            return sliceNode;
-        }
-
-
-        /** Create a new slice if one will be required within the context of this node */
-        private void createNewSlice( Operand child ) {
-            if ( child instanceof EqualityOperand || child instanceof AndOperand || child instanceof ContainsOperand ) {
-                newSliceNode();
-            }
-        }
-
-
-        public int getSliceCount() {
-            return nodes.getSliceCount();
-        }
-
-        @Override
-        public QueryBuilder getQueryBuilder() {
-            throw new UnsupportedOperationException("Not supported by this vistor implementation."); 
-        }
-
-        @Override
-        public FilterBuilder getFilterBuilder() {
-            throw new UnsupportedOperationException("Not supported by this vistor implementation."); 
-        }
-    }
-
-
-    private static class CountingStack<T> extends Stack<T> {
-
-        private int count = 0;
-        private static final long serialVersionUID = 1L;
-
-
-        /* (non-Javadoc)
-         * @see java.util.Stack#pop()
-         */
-        @Override
-        public synchronized T pop() {
-            T entry = super.pop();
-
-            if ( entry instanceof SliceNode ) {
-                count += ( ( SliceNode ) entry ).getAllSlices().size();
-            }
-
-            return entry;
-        }
-
-
-        public int getSliceCount() {
-
-            Iterator<T> itr = this.iterator();
-
-            T entry;
-
-            while ( itr.hasNext() ) {
-                entry = itr.next();
-
-                if ( entry instanceof SliceNode ) {
-                    count += ( ( SliceNode ) entry ).getAllSlices().size();
-                }
-            }
-
-            return count;
-        }
-    }
-
-
-    /** @return the pageSizeHint */
-    public int getPageSizeHint( QueryNode node ) {
-        /*****
-         * DO NOT REMOVE THIS PIECE OF CODE!!!!!!!!!!!
-         * It is crucial that the root iterator only needs the result set size per page
-         * otherwise our cursor logic will fail when passing cursor data to the leaf nodes
-         *******/
-        if(node == rootNode){
-            return size;
-        }
-
-        return pageSizeHint;
-    }
-
-
-    /** Generate a slice node with scan ranges for all the properties in our sort cache */
-    private OrderByNode generateSorts( int opCount ) throws NoIndexException {
-
-        // the value is irrelevant since we'll only ever have 1 slice node
-        // if this is called
-        SliceNode slice = new SliceNode( opCount );
-
-        SortPredicate first = sorts.get( 0 );
-
-        String propertyName = first.getPropertyName();
-
-        checkIndexed( propertyName );
-
-        slice.setStart( propertyName, null, true );
-        slice.setFinish( propertyName, null, true );
-
-
-        for ( int i = 1; i < sorts.size(); i++ ) {
-            checkIndexed( sorts.get( i ).getPropertyName() );
-        }
-
-
-        return new OrderByNode( slice, sorts.subList( 1, sorts.size() ), rootNode );
-    }
-
-
-    private void checkIndexed( String propertyName ) throws NoIndexException {
-
-        if ( propertyName == null || propertyName.isEmpty() || ( !SCHEMA.isPropertyIndexed( entityType, propertyName )
-                && collectionInfo != null ) ) {
-            throw new NoIndexException( entityType, propertyName );
-        }
-    }
-
-
-    public EntityManager getEntityManager() {
-        return em;
-    }
-}
\ No newline at end of file


[06/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/RelationManagerImpl.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/RelationManagerImpl.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/RelationManagerImpl.java
deleted file mode 100644
index be84176..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/RelationManagerImpl.java
+++ /dev/null
@@ -1,2338 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra;
-
-
-import java.nio.ByteBuffer;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.UUID;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.util.Assert;
-import org.apache.usergrid.persistence.CollectionRef;
-import org.apache.usergrid.persistence.ConnectedEntityRef;
-import org.apache.usergrid.persistence.ConnectionRef;
-import org.apache.usergrid.persistence.Entity;
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.IndexBucketLocator;
-import org.apache.usergrid.persistence.IndexBucketLocator.IndexType;
-import org.apache.usergrid.persistence.PagingResultsIterator;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.RelationManager;
-import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.RoleRef;
-import org.apache.usergrid.persistence.Schema;
-import org.apache.usergrid.persistence.SimpleCollectionRef;
-import org.apache.usergrid.persistence.SimpleEntityRef;
-import org.apache.usergrid.persistence.SimpleRoleRef;
-import org.apache.usergrid.persistence.cassandra.IndexUpdate.IndexEntry;
-import org.apache.usergrid.persistence.cassandra.index.ConnectedIndexScanner;
-import org.apache.usergrid.persistence.cassandra.index.IndexBucketScanner;
-import org.apache.usergrid.persistence.cassandra.index.IndexScanner;
-import org.apache.usergrid.persistence.cassandra.index.NoOpIndexScanner;
-import org.apache.usergrid.persistence.entities.Group;
-import org.apache.usergrid.persistence.geo.CollectionGeoSearch;
-import org.apache.usergrid.persistence.geo.ConnectionGeoSearch;
-import org.apache.usergrid.persistence.geo.EntityLocationRef;
-import org.apache.usergrid.persistence.geo.model.Point;
-import org.apache.usergrid.persistence.hector.CountingMutator;
-import org.apache.usergrid.persistence.query.ir.AllNode;
-import org.apache.usergrid.persistence.query.ir.NameIdentifierNode;
-import org.apache.usergrid.persistence.query.ir.QueryNode;
-import org.apache.usergrid.persistence.query.ir.QuerySlice;
-import org.apache.usergrid.persistence.query.ir.SearchVisitor;
-import org.apache.usergrid.persistence.query.ir.WithinNode;
-import org.apache.usergrid.persistence.query.ir.result.CollectionResultsLoaderFactory;
-import org.apache.usergrid.persistence.query.ir.result.ConnectionIndexSliceParser;
-import org.apache.usergrid.persistence.query.ir.result.ConnectionResultsLoaderFactory;
-import org.apache.usergrid.persistence.query.ir.result.ConnectionTypesIterator;
-import org.apache.usergrid.persistence.query.ir.result.EmptyIterator;
-import org.apache.usergrid.persistence.query.ir.result.GeoIterator;
-import org.apache.usergrid.persistence.query.ir.result.SliceIterator;
-import org.apache.usergrid.persistence.query.ir.result.StaticIdIterator;
-import org.apache.usergrid.persistence.query.ir.result.UUIDIndexSliceParser;
-import org.apache.usergrid.persistence.schema.CollectionInfo;
-import org.apache.usergrid.utils.IndexUtils;
-import org.apache.usergrid.utils.MapUtils;
-
-import com.google.common.base.Preconditions;
-import com.yammer.metrics.annotation.Metered;
-
-import me.prettyprint.hector.api.Keyspace;
-import me.prettyprint.hector.api.beans.DynamicComposite;
-import me.prettyprint.hector.api.beans.HColumn;
-import me.prettyprint.hector.api.mutation.Mutator;
-
-import static java.lang.String.CASE_INSENSITIVE_ORDER;
-import static java.util.Arrays.asList;
-import static me.prettyprint.hector.api.factory.HFactory.createMutator;
-import org.apache.usergrid.persistence.EntityManager;
-import static org.apache.usergrid.persistence.Schema.COLLECTION_ROLES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_COLLECTIONS;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTED_ENTITIES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTED_TYPES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTING_ENTITIES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTING_TYPES;
-import static org.apache.usergrid.persistence.Schema.INDEX_CONNECTIONS;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_COLLECTION_NAME;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_INACTIVITY;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_ITEM;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_ITEM_TYPE;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_NAME;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_TITLE;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_TYPE;
-import static org.apache.usergrid.persistence.Schema.TYPE_APPLICATION;
-import static org.apache.usergrid.persistence.Schema.TYPE_ENTITY;
-import static org.apache.usergrid.persistence.Schema.TYPE_MEMBER;
-import static org.apache.usergrid.persistence.Schema.TYPE_ROLE;
-import static org.apache.usergrid.persistence.Schema.defaultCollectionName;
-import static org.apache.usergrid.persistence.Schema.getDefaultSchema;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_COMPOSITE_DICTIONARIES;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_DICTIONARIES;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_ID_SETS;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_INDEX;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_INDEX_ENTRIES;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.addDeleteToMutator;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.addInsertToMutator;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.batchExecute;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.INDEX_ENTRY_LIST_COUNT;
-import static org.apache.usergrid.persistence.cassandra.ConnectionRefImpl.CONNECTION_ENTITY_CONNECTION_TYPE;
-import static org.apache.usergrid.persistence.cassandra.GeoIndexManager.batchDeleteLocationInConnectionsIndex;
-import static org.apache.usergrid.persistence.cassandra.GeoIndexManager.batchRemoveLocationFromCollectionIndex;
-import static org.apache.usergrid.persistence.cassandra.GeoIndexManager.batchStoreLocationInCollectionIndex;
-import static org.apache.usergrid.persistence.cassandra.GeoIndexManager.batchStoreLocationInConnectionsIndex;
-import static org.apache.usergrid.persistence.cassandra.IndexUpdate.indexValueCode;
-import static org.apache.usergrid.persistence.cassandra.IndexUpdate.toIndexableValue;
-import static org.apache.usergrid.persistence.cassandra.IndexUpdate.validIndexableValue;
-import static org.apache.usergrid.utils.ClassUtils.cast;
-import static org.apache.usergrid.utils.CompositeUtils.setGreaterThanEqualityFlag;
-import static org.apache.usergrid.utils.ConversionUtils.string;
-import static org.apache.usergrid.utils.InflectionUtils.singularize;
-import static org.apache.usergrid.utils.MapUtils.addMapSet;
-import static org.apache.usergrid.utils.UUIDUtils.getTimestampInMicros;
-import static org.apache.usergrid.utils.UUIDUtils.newTimeUUID;
-
-import org.apache.usergrid.persistence.entities.Application;
-import org.apache.usergrid.persistence.index.query.Query.Level;
-
-
-public class RelationManagerImpl implements RelationManager {
-
-    private static final Logger logger = LoggerFactory.getLogger( RelationManagerImpl.class );
-
-    private EntityManager em;
-    private CassandraService cass;
-    private UUID applicationId;
-    private EntityRef headEntity;
-    private IndexBucketLocator indexBucketLocator;
-
-
-    public RelationManagerImpl() {
-    }
-
-
-    public RelationManagerImpl init( EntityManager em, CassandraService cass, UUID applicationId,
-                                     EntityRef headEntity, IndexBucketLocator indexBucketLocator ) {
-
-        Assert.notNull( em, "Entity manager cannot be null" );
-        Assert.notNull( cass, "Cassandra service cannot be null" );
-        Assert.notNull( applicationId, "Application Id cannot be null" );
-        Assert.notNull( headEntity, "Head entity cannot be null" );
-        Assert.notNull( headEntity.getUuid(), "Head entity uuid cannot be null" );
-        Assert.notNull( indexBucketLocator, "Index bucket locator cannot be null" );
-
-        this.em = em;
-        this.applicationId = applicationId;
-        this.cass = cass;
-        this.headEntity = headEntity;
-        this.indexBucketLocator = indexBucketLocator;
-
-        return this;
-    }
-
-
-    private RelationManagerImpl getRelationManager( EntityRef headEntity ) {
-        RelationManagerImpl rmi = new RelationManagerImpl();
-        rmi.init( em, cass, applicationId, headEntity, indexBucketLocator );
-        return rmi;
-    }
-
-
-    /** side effect: converts headEntity into an Entity if it is an EntityRef! */
-    private Entity getHeadEntity() throws Exception {
-        Entity entity = null;
-        if ( headEntity instanceof Entity ) {
-            entity = ( Entity ) headEntity;
-        }
-        else {
-            entity = em.get( headEntity );
-            headEntity = entity;
-        }
-        return entity;
-    }
-
-
-    /**
-     * Batch update collection index.
-     *
-     * @param indexUpdate The update to apply
-     * @param owner The entity that is the owner context of this entity update.  Can either be an application, or
-     * another entity
-     * @param collectionName the collection name
-     *
-     * @return The indexUpdate with batch mutations
-     *
-     * @throws Exception the exception
-     */
-    @Metered(group = "core", name = "RelationManager_batchUpdateCollectionIndex")
-    public IndexUpdate batchUpdateCollectionIndex( IndexUpdate indexUpdate, EntityRef owner, String collectionName )
-            throws Exception {
-
-        logger.debug( "batchUpdateCollectionIndex" );
-
-        Entity indexedEntity = indexUpdate.getEntity();
-
-        String bucketId = indexBucketLocator
-                .getBucket( applicationId, IndexType.COLLECTION, indexedEntity.getUuid(), indexedEntity.getType(),
-                        indexUpdate.getEntryName() );
-
-        // the root name without the bucket
-        // entity_id,collection_name,prop_name,
-        Object index_name = null;
-        // entity_id,collection_name,prop_name, bucketId
-        Object index_key = null;
-
-        // entity_id,collection_name,collected_entity_id,prop_name
-
-        for ( IndexEntry entry : indexUpdate.getPrevEntries() ) {
-
-            if ( entry.getValue() != null ) {
-
-                index_name = key( owner.getUuid(), collectionName, entry.getPath() );
-
-                index_key = key( index_name, bucketId );
-
-                addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, index_key, entry.getIndexComposite(),
-                        indexUpdate.getTimestamp() );
-
-                if ( "location.coordinates".equals( entry.getPath() ) ) {
-                    EntityLocationRef loc = new EntityLocationRef( indexUpdate.getEntity(), entry.getTimestampUuid(),
-                            entry.getValue().toString() );
-                    batchRemoveLocationFromCollectionIndex( indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                            index_name, loc );
-                }
-            }
-            else {
-                logger.error( "Unexpected condition - deserialized property value is null" );
-            }
-        }
-
-        if ( ( indexUpdate.getNewEntries().size() > 0 ) && ( !indexUpdate.isMultiValue() || ( indexUpdate.isMultiValue()
-                && !indexUpdate.isRemoveListEntry() ) ) ) {
-
-            for ( IndexEntry indexEntry : indexUpdate.getNewEntries() ) {
-
-                // byte valueCode = indexEntry.getValueCode();
-
-                index_name = key( owner.getUuid(), collectionName, indexEntry.getPath() );
-
-                index_key = key( index_name, bucketId );
-
-                // int i = 0;
-
-                addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, index_key, indexEntry.getIndexComposite(),
-                        null, indexUpdate.getTimestamp() );
-
-                if ( "location.coordinates".equals( indexEntry.getPath() ) ) {
-                    EntityLocationRef loc =
-                            new EntityLocationRef( indexUpdate.getEntity(), indexEntry.getTimestampUuid(),
-                                    indexEntry.getValue().toString() );
-                    batchStoreLocationInCollectionIndex( indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                            index_name, indexedEntity.getUuid(), loc );
-                }
-
-                // i++;
-            }
-        }
-
-        for ( String index : indexUpdate.getIndexesSet() ) {
-            addInsertToMutator( indexUpdate.getBatch(), ENTITY_DICTIONARIES,
-                    key( owner.getUuid(), collectionName, Schema.DICTIONARY_INDEXES ), index, null,
-                    indexUpdate.getTimestamp() );
-        }
-
-        return indexUpdate;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_getCollectionIndexes")
-    public Set<String> getCollectionIndexes( String collectionName ) throws Exception {
-
-        // TODO TN, read all buckets here
-        List<HColumn<String, String>> results =
-                cass.getAllColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_DICTIONARIES,
-                        key( headEntity.getUuid(), collectionName, Schema.DICTIONARY_INDEXES ), Serializers.se, Serializers.se );
-        Set<String> indexes = new TreeSet<String>();
-        if ( results != null ) {
-            for ( HColumn<String, String> column : results ) {
-                String propertyName = column.getName();
-                if ( !propertyName.endsWith( ".keywords" ) ) {
-                    indexes.add( column.getName() );
-                }
-            }
-        }
-        return indexes;
-    }
-
-
-    public Map<EntityRef, Set<String>> getContainingCollections() throws Exception {
-        Map<EntityRef, Set<String>> results = new LinkedHashMap<EntityRef, Set<String>>();
-
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-
-        // TODO TN get all buckets here
-
-        List<HColumn<DynamicComposite, ByteBuffer>> containers = cass.getAllColumns( ko, ENTITY_COMPOSITE_DICTIONARIES,
-                key( headEntity.getUuid(), Schema.DICTIONARY_CONTAINER_ENTITIES ), Serializers.dce, Serializers.be );
-        if ( containers != null ) {
-            for ( HColumn<DynamicComposite, ByteBuffer> container : containers ) {
-                DynamicComposite composite = container.getName();
-                if ( composite != null ) {
-                    String ownerType = ( String ) composite.get( 0 );
-                    String collectionName = ( String ) composite.get( 1 );
-                    UUID ownerId = ( UUID ) composite.get( 2 );
-                    addMapSet( results, new SimpleEntityRef( ownerType, ownerId ), collectionName );
-                    if ( logger.isDebugEnabled() ) {
-                        logger.debug( " {} ( {} ) is in collection {} ( {} ).", new Object[] {
-                                headEntity.getType(), headEntity.getUuid(), ownerType, collectionName, ownerId
-                        } );
-                    }
-                }
-            }
-        }
-        EntityRef applicationRef = new SimpleEntityRef( TYPE_APPLICATION, applicationId );
-        if ( !results.containsKey( applicationRef ) ) {
-            addMapSet( results, applicationRef, defaultCollectionName( headEntity.getType() ) );
-        }
-        return results;
-    }
-
-
-    @SuppressWarnings("unchecked")
-    public void batchCreateCollectionMembership( Mutator<ByteBuffer> batch, EntityRef ownerRef, String collectionName,
-                                                 EntityRef itemRef, EntityRef membershipRef, UUID timestampUuid )
-            throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        if ( membershipRef == null ) {
-            membershipRef = new SimpleCollectionRef( ownerRef, collectionName, itemRef );
-        }
-
-        Map<String, Object> properties = new TreeMap<String, Object>( CASE_INSENSITIVE_ORDER );
-        properties.put( PROPERTY_TYPE, membershipRef.getType() );
-        properties.put( PROPERTY_COLLECTION_NAME, collectionName );
-        properties.put( PROPERTY_ITEM, itemRef.getUuid() );
-        properties.put( PROPERTY_ITEM_TYPE, itemRef.getType() );
-
-        em.batchCreate( batch, membershipRef.getType(), null, properties, membershipRef.getUuid(), timestampUuid );
-
-        addInsertToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                key( membershipRef.getUuid(), Schema.DICTIONARY_CONTAINER_ENTITIES ),
-                asList( ownerRef.getType(), collectionName, ownerRef.getUuid() ), membershipRef.getUuid(), timestamp );
-    }
-
-
-    /**
-     * Batch add to collection.
-     *
-     * @param batch the batch
-     * @param collectionName the collection name
-     * @param entity The entity to add to the batch
-     * @param timestampUuid The timestamp of this update in a time uuid
-     *
-     * @return batch
-     *
-     * @throws Exception the exception
-     */
-    public Mutator<ByteBuffer> batchAddToCollection( Mutator<ByteBuffer> batch, String collectionName, Entity entity,
-                                                     UUID timestampUuid ) throws Exception {
-        List<UUID> ids = new ArrayList<UUID>( 1 );
-        ids.add( headEntity.getUuid() );
-        return batchAddToCollections( batch, headEntity.getType(), ids, collectionName, entity, timestampUuid );
-    }
-
-
-    @SuppressWarnings("unchecked")
-    @Metered(group = "core", name = "RelationManager_batchAddToCollections")
-    public Mutator<ByteBuffer> batchAddToCollections( Mutator<ByteBuffer> batch, String ownerType, List<UUID> ownerIds,
-                                                      String collectionName, Entity entity, UUID timestampUuid )
-            throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        if ( Schema.isAssociatedEntityType( entity.getType() ) ) {
-            logger.error( "Cant add an extended type to any collection", new Throwable() );
-            return batch;
-        }
-
-        Map<UUID, CollectionRef> membershipRefs = new LinkedHashMap<UUID, CollectionRef>();
-
-        for ( UUID ownerId : ownerIds ) {
-
-            CollectionRef membershipRef =
-                    new SimpleCollectionRef( new SimpleEntityRef( ownerType, ownerId ), collectionName, entity );
-
-            membershipRefs.put( ownerId, membershipRef );
-
-            // get the bucket this entityId needs to be inserted into
-            String bucketId = indexBucketLocator
-                    .getBucket( applicationId, IndexType.COLLECTION, entity.getUuid(), collectionName );
-
-            Object collections_key = key( ownerId, Schema.DICTIONARY_COLLECTIONS, collectionName, bucketId );
-
-            // Insert in main collection
-
-            addInsertToMutator( batch, ENTITY_ID_SETS, collections_key, entity.getUuid(), membershipRef.getUuid(),
-                    timestamp );
-
-            addInsertToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key( entity.getUuid(), Schema.DICTIONARY_CONTAINER_ENTITIES ),
-                    asList( ownerType, collectionName, ownerId ), membershipRef.getUuid(), timestamp );
-        }
-
-
-        Schema schema = getDefaultSchema();
-
-        // Add property indexes
-        for ( String propertyName : entity.getProperties().keySet() ) {
-            boolean indexed_property = schema.isPropertyIndexed( entity.getType(), propertyName );
-            if ( indexed_property ) {
-                boolean collection_indexes_property =
-                        schema.isPropertyIndexedInCollection( ownerType, collectionName, propertyName );
-                boolean item_schema_has_property = schema.hasProperty( entity.getType(), propertyName );
-                boolean fulltext_indexed = schema.isPropertyFulltextIndexed( entity.getType(), propertyName );
-                if ( collection_indexes_property || !item_schema_has_property ) {
-                    Object propertyValue = entity.getProperty( propertyName );
-                    IndexUpdate indexUpdate =
-                            batchStartIndexUpdate( batch, entity, propertyName, propertyValue, timestampUuid,
-                                    item_schema_has_property, false, false, fulltext_indexed, true );
-                    for ( UUID ownerId : ownerIds ) {
-                        EntityRef owner = new SimpleEntityRef( ownerType, ownerId );
-                        batchUpdateCollectionIndex( indexUpdate, owner, collectionName );
-                    }
-                }
-            }
-        }
-
-        // Add set property indexes
-
-        Set<String> dictionaryNames = em.getDictionaryNames( entity );
-
-        for ( String dictionaryName : dictionaryNames ) {
-            boolean has_dictionary = schema.hasDictionary( entity.getType(), dictionaryName );
-            boolean dictionary_indexed =
-                    schema.isDictionaryIndexedInCollection( ownerType, collectionName, dictionaryName );
-
-            if ( dictionary_indexed || !has_dictionary ) {
-                Set<Object> elementValues = em.getDictionaryAsSet( entity, dictionaryName );
-                for ( Object elementValue : elementValues ) {
-                    IndexUpdate indexUpdate =
-                            batchStartIndexUpdate( batch, entity, dictionaryName, elementValue, timestampUuid,
-                                    has_dictionary, true, false, false, true );
-                    for ( UUID ownerId : ownerIds ) {
-                        EntityRef owner = new SimpleEntityRef( ownerType, ownerId );
-                        batchUpdateCollectionIndex( indexUpdate, owner, collectionName );
-                    }
-                }
-            }
-        }
-
-        for ( UUID ownerId : ownerIds ) {
-            EntityRef owner = new SimpleEntityRef( ownerType, ownerId );
-            batchCreateCollectionMembership( batch, owner, collectionName, entity, membershipRefs.get( ownerId ),
-                    timestampUuid );
-        }
-
-        return batch;
-    }
-
-
-    /**
-     * Batch remove from collection.
-     * <p/>
-     * * Batch add to collection.
-     *
-     * @param batch the batch
-     * @param collectionName the collection name
-     * @param entity The entity to add to the batch
-     * @param timestampUuid The timestamp of this update in a time uuid
-     *
-     * @return The mutation with the delete operations added
-     *
-     * @throws Exception the exception
-     */
-    public Mutator<ByteBuffer> batchRemoveFromCollection( Mutator<ByteBuffer> batch, String collectionName,
-                                                          Entity entity, UUID timestampUuid ) throws Exception {
-        return this.batchRemoveFromCollection( batch, collectionName, entity, false, timestampUuid );
-    }
-
-
-    @SuppressWarnings("unchecked")
-    @Metered(group = "core", name = "RelationManager_batchRemoveFromCollection")
-    public Mutator<ByteBuffer> batchRemoveFromCollection( Mutator<ByteBuffer> batch, String collectionName,
-                                                          Entity entity, boolean force, UUID timestampUuid )
-            throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        if ( !force && headEntity.getUuid().equals( applicationId ) ) {
-            // Can't remove entities from root collections
-            return batch;
-        }
-
-        Object collections_key = key( headEntity.getUuid(), Schema.DICTIONARY_COLLECTIONS, collectionName,
-                indexBucketLocator.getBucket( applicationId, IndexType.COLLECTION, entity.getUuid(), collectionName ) );
-
-        // Remove property indexes
-
-        Schema schema = getDefaultSchema();
-        for ( String propertyName : entity.getProperties().keySet() ) {
-            boolean collection_indexes_property =
-                    schema.isPropertyIndexedInCollection( headEntity.getType(), collectionName, propertyName );
-            boolean item_schema_has_property = schema.hasProperty( entity.getType(), propertyName );
-            boolean fulltext_indexed = schema.isPropertyFulltextIndexed( entity.getType(), propertyName );
-            if ( collection_indexes_property || !item_schema_has_property ) {
-                IndexUpdate indexUpdate = batchStartIndexUpdate( batch, entity, propertyName, null, timestampUuid,
-                        item_schema_has_property, false, false, fulltext_indexed );
-                batchUpdateCollectionIndex( indexUpdate, headEntity, collectionName );
-            }
-        }
-
-        // Remove set indexes
-
-        Set<String> dictionaryNames = em.getDictionaryNames( entity );
-
-        for ( String dictionaryName : dictionaryNames ) {
-            boolean has_dictionary = schema.hasDictionary( entity.getType(), dictionaryName );
-            boolean dictionary_indexed =
-                    schema.isDictionaryIndexedInCollection( headEntity.getType(), collectionName, dictionaryName );
-
-            if ( dictionary_indexed || !has_dictionary ) {
-                Set<Object> elementValues = em.getDictionaryAsSet( entity, dictionaryName );
-                for ( Object elementValue : elementValues ) {
-                    IndexUpdate indexUpdate =
-                            batchStartIndexUpdate( batch, entity, dictionaryName, elementValue, timestampUuid,
-                                    has_dictionary, true, true, false );
-                    batchUpdateCollectionIndex( indexUpdate, headEntity, collectionName );
-                }
-            }
-        }
-
-        // Delete actual property
-
-        addDeleteToMutator( batch, ENTITY_ID_SETS, collections_key, entity.getUuid(), timestamp );
-
-        addDeleteToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                key( entity.getUuid(), Schema.DICTIONARY_CONTAINER_ENTITIES ),
-                asList( headEntity.getType(), collectionName, headEntity.getUuid() ), timestamp );
-
-        if ( !headEntity.getType().equalsIgnoreCase( TYPE_APPLICATION ) && !Schema
-                .isAssociatedEntityType( entity.getType() ) ) {
-
-            CollectionRef cref = new SimpleCollectionRef( headEntity, collectionName, entity ); 
-            em.delete( new SimpleEntityRef( cref.getType(), cref.getUuid() ) );
-        }
-
-        return batch;
-    }
-
-
-    @Metered(group = "core", name = "RelationManager_batchDeleteConnectionIndexEntries")
-    public Mutator<ByteBuffer> batchDeleteConnectionIndexEntries( IndexUpdate indexUpdate, IndexEntry entry,
-                                                                  ConnectionRefImpl connection, UUID[] index_keys )
-            throws Exception {
-
-        // entity_id,prop_name
-        Object property_index_key = key( index_keys[ConnectionRefImpl.ALL], INDEX_CONNECTIONS, entry.getPath(),
-                indexBucketLocator.getBucket( applicationId, IndexType.CONNECTION, index_keys[ConnectionRefImpl.ALL],
-                        entry.getPath() ) );
-
-        // entity_id,entity_type,prop_name
-        Object entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], entry.getPath() ) );
-
-        // entity_id,connection_type,prop_name
-        Object connection_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], entry.getPath() ) );
-
-        // entity_id,connection_type,entity_type,prop_name
-        Object connection_type_and_entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], entry.getPath() ) );
-
-        // composite(property_value,connected_entity_id,connection_type,entity_type,entry_timestamp)
-        addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, property_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId(), connection.getConnectionType(),
-                        connection.getConnectedEntityType() ), indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,connection_type,entry_timestamp)
-        addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, entity_type_prop_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId(), connection.getConnectionType() ),
-                indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,entity_type,entry_timestamp)
-        addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, connection_type_prop_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId(), connection.getConnectedEntityType() ),
-                indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,entry_timestamp)
-        addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, connection_type_and_entity_type_prop_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId() ), indexUpdate.getTimestamp() );
-
-        return indexUpdate.getBatch();
-    }
-
-
-    @Metered(group = "core", name = "RelationManager_batchAddConnectionIndexEntries")
-    public Mutator<ByteBuffer> batchAddConnectionIndexEntries( IndexUpdate indexUpdate, IndexEntry entry,
-                                                               ConnectionRefImpl connection, UUID[] index_keys ) {
-
-        // entity_id,prop_name
-        Object property_index_key = key( index_keys[ConnectionRefImpl.ALL], INDEX_CONNECTIONS, entry.getPath(),
-                indexBucketLocator.getBucket( applicationId, IndexType.CONNECTION, index_keys[ConnectionRefImpl.ALL],
-                        entry.getPath() ) );
-
-        // entity_id,entity_type,prop_name
-        Object entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], entry.getPath() ) );
-
-        // entity_id,connection_type,prop_name
-        Object connection_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], entry.getPath() ) );
-
-        // entity_id,connection_type,entity_type,prop_name
-        Object connection_type_and_entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], entry.getPath() ) );
-
-        // composite(property_value,connected_entity_id,connection_type,entity_type,entry_timestamp)
-        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, property_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId(), connection.getConnectionType(),
-                        connection.getConnectedEntityType() ), connection.getUuid(), indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,connection_type,entry_timestamp)
-        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, entity_type_prop_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId(), connection.getConnectionType() ),
-                connection.getUuid(), indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,entity_type,entry_timestamp)
-        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, connection_type_prop_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId(), connection.getConnectedEntityType() ),
-                connection.getUuid(), indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,entry_timestamp)
-        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, connection_type_and_entity_type_prop_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId() ), connection.getUuid(),
-                indexUpdate.getTimestamp() );
-
-        return indexUpdate.getBatch();
-    }
-
-
-    /**
-     * Batch update connection index.
-     *
-     * @param indexUpdate The update operation to perform
-     * @param connection The connection to update
-     *
-     * @return The index with the batch mutation udpated
-     *
-     * @throws Exception the exception
-     */
-    @Metered(group = "core", name = "RelationManager_batchUpdateConnectionIndex")
-    public IndexUpdate batchUpdateConnectionIndex( IndexUpdate indexUpdate, ConnectionRefImpl connection )
-            throws Exception {
-
-        // UUID connection_id = connection.getUuid();
-
-        UUID[] index_keys = connection.getIndexIds();
-
-        // Delete all matching entries from entry list
-        for ( IndexEntry entry : indexUpdate.getPrevEntries() ) {
-
-            if ( entry.getValue() != null ) {
-
-                batchDeleteConnectionIndexEntries( indexUpdate, entry, connection, index_keys );
-
-                if ( "location.coordinates".equals( entry.getPath() ) ) {
-                    EntityLocationRef loc = new EntityLocationRef( indexUpdate.getEntity(), entry.getTimestampUuid(),
-                            entry.getValue().toString() );
-                    batchDeleteLocationInConnectionsIndex( indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                            index_keys, entry.getPath(), loc );
-                }
-            }
-            else {
-                logger.error( "Unexpected condition - deserialized property value is null" );
-            }
-        }
-
-        if ( ( indexUpdate.getNewEntries().size() > 0 ) && ( !indexUpdate.isMultiValue() || ( indexUpdate.isMultiValue()
-                && !indexUpdate.isRemoveListEntry() ) ) ) {
-
-            for ( IndexEntry indexEntry : indexUpdate.getNewEntries() ) {
-
-                batchAddConnectionIndexEntries( indexUpdate, indexEntry, connection, index_keys );
-
-                if ( "location.coordinates".equals( indexEntry.getPath() ) ) {
-                    EntityLocationRef loc =
-                            new EntityLocationRef( indexUpdate.getEntity(), indexEntry.getTimestampUuid(),
-                                    indexEntry.getValue().toString() );
-                    batchStoreLocationInConnectionsIndex( indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                            index_keys, indexEntry.getPath(), loc );
-                }
-            }
-
-      /*
-       * addInsertToMutator(batch, EntityCF.SETS, key(connection_id,
-       * Schema.INDEXES_SET), indexEntry.getKey(), null, false, timestamp); }
-       * 
-       * addInsertToMutator(batch, EntityCF.SETS, key(connection_id,
-       * Schema.INDEXES_SET), entryName, null, false, timestamp);
-       */
-        }
-
-        for ( String index : indexUpdate.getIndexesSet() ) {
-            addInsertToMutator( indexUpdate.getBatch(), ENTITY_DICTIONARIES,
-                    key( connection.getConnectingIndexId(), Schema.DICTIONARY_INDEXES ), index, null,
-                    indexUpdate.getTimestamp() );
-        }
-
-        return indexUpdate;
-    }
-
-
-    public Set<String> getConnectionIndexes( ConnectionRefImpl connection ) throws Exception {
-        List<HColumn<String, String>> results =
-                cass.getAllColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_DICTIONARIES,
-                        key( connection.getConnectingIndexId(), Schema.DICTIONARY_INDEXES ), Serializers.se, Serializers.se );
-        Set<String> indexes = new TreeSet<String>();
-        if ( results != null ) {
-            for ( HColumn<String, String> column : results ) {
-                String propertyName = column.getName();
-                if ( !propertyName.endsWith( ".keywords" ) ) {
-                    indexes.add( column.getName() );
-                }
-            }
-        }
-        return indexes;
-    }
-
-
-    /**
-     * Batch update backward connections property indexes.
-     *
-     * @param indexUpdate The update to run for incoming connections
-     *
-     * @return The index update to run
-     *
-     * @throws Exception the exception
-     */
-    @Metered(group = "core", name = "RelationManager_batchUpdateBackwardConnectionsPropertyIndexes")
-    public IndexUpdate batchUpdateBackwardConnectionsPropertyIndexes( IndexUpdate indexUpdate ) throws Exception {
-
-        logger.debug( "batchUpdateBackwordConnectionsPropertyIndexes" );
-
-        boolean entitySchemaHasProperty = indexUpdate.isSchemaHasProperty();
-
-        if ( entitySchemaHasProperty ) {
-            if ( !getDefaultSchema()
-                    .isPropertyIndexed( indexUpdate.getEntity().getType(), indexUpdate.getEntryName() ) ) {
-                return indexUpdate;
-            }
-        }
-
-
-        return doBackwardConnectionsUpdate( indexUpdate );
-    }
-
-
-    /**
-     * Search each reverse connection type in the graph for connections.  If one is found, update the index
-     * appropriately
-     *
-     * @param indexUpdate The index update to use
-     *
-     * @return The updated index update
-     */
-    private IndexUpdate doBackwardConnectionsUpdate( IndexUpdate indexUpdate ) throws Exception {
-        final Entity targetEntity = indexUpdate.getEntity();
-
-        final ConnectionTypesIterator connectionTypes =
-                new ConnectionTypesIterator( cass, applicationId, targetEntity.getUuid(), false, 100 );
-
-        for ( String connectionType : connectionTypes ) {
-
-            PagingResultsIterator itr = getReversedConnectionsIterator( targetEntity, connectionType );
-
-            for ( Object connection : itr ) {
-
-                final ConnectedEntityRef sourceEntity = ( ConnectedEntityRef ) connection;
-
-                //we need to create a connection ref from the source entity (found via reverse edge) to the entity
-                // we're about to update.  This is the index that needs updated
-                final ConnectionRefImpl connectionRef =
-                        new ConnectionRefImpl( sourceEntity, connectionType, indexUpdate.getEntity() );
-
-                batchUpdateConnectionIndex( indexUpdate, connectionRef );
-            }
-        }
-
-        return indexUpdate;
-    }
-
-
-    /**
-     * Get a paging results iterator.  Should return an iterator for all results
-     *
-     * @param targetEntity The target entity search connections from
-     *
-     * @return connectionType The name of the edges to search
-     */
-    private PagingResultsIterator getReversedConnectionsIterator( EntityRef targetEntity, String connectionType )
-            throws Exception {
-        return new PagingResultsIterator( getConnectingEntities( targetEntity, connectionType, null, Level.REFS ) );
-    }
-
-
-    /**
-     * Batch update backward connections set indexes.
-     *
-     * @param indexUpdate The index to update in the dictionary
-     *
-     * @return The index update
-     *
-     * @throws Exception the exception
-     */
-    @Metered(group = "core", name = "RelationManager_batchUpdateBackwardConnectionsDictionaryIndexes")
-    public IndexUpdate batchUpdateBackwardConnectionsDictionaryIndexes( IndexUpdate indexUpdate ) throws Exception {
-
-        logger.debug( "batchUpdateBackwardConnectionsListIndexes" );
-
-        boolean entityHasDictionary = getDefaultSchema()
-                .isDictionaryIndexedInConnections( indexUpdate.getEntity().getType(), indexUpdate.getEntryName() );
-
-        if ( !entityHasDictionary ) {
-            return indexUpdate;
-        }
-
-
-        return doBackwardConnectionsUpdate( indexUpdate );
-    }
-
-
-    @SuppressWarnings("unchecked")
-    @Metered(group = "core", name = "RelationManager_batchUpdateEntityConnection")
-    public Mutator<ByteBuffer> batchUpdateEntityConnection( Mutator<ByteBuffer> batch, 
-        boolean disconnect, ConnectionRefImpl connection, UUID timestampUuid ) throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        Entity connectedEntity = em.get( new SimpleEntityRef( 
-                connection.getConnectedEntityType(), connection.getConnectedEntityId()) );
-
-        if ( connectedEntity == null ) {
-            return batch;
-        }
-
-        // Create connection for requested params
-
-
-        if ( disconnect ) {
-            addDeleteToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key( connection.getConnectingEntityId(), DICTIONARY_CONNECTED_ENTITIES,
-                            connection.getConnectionType() ),
-                    asList( connection.getConnectedEntityId(), connection.getConnectedEntityType() ), timestamp );
-
-            addDeleteToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key( connection.getConnectedEntityId(), DICTIONARY_CONNECTING_ENTITIES,
-                            connection.getConnectionType() ),
-                    asList( connection.getConnectingEntityId(), connection.getConnectingEntityType() ), timestamp );
-
-            // delete the connection path if there will be no connections left
-
-            boolean delete = true;
-
-            //check out outbound edges of the given type.  If we have more than the 1 specified,
-            // we shouldn't delete the connection types from our outbound index
-            PagingResultsIterator itr = new PagingResultsIterator(
-                    getConnectedEntities( connection.getConnectingEntity(), connection.getConnectionType(), null,
-                            Level.REFS ) );
-
-            ConnectedEntityRef c;
-
-            while ( itr.hasNext() ) {
-                c = ( ConnectedEntityRef ) itr.next();
-
-                if ( !connection.getConnectedEntityId().equals( c.getUuid() ) ) {
-                    delete = false;
-                    break;
-                }
-
-
-                //        c = (ConnectionRef) itr.next();
-                //        if (c.getConnectedEntity().getConnectionType().equals(connection.getConnectedEntity()
-                // .getConnectionType()) &&!c.getConnectedEntity().getUuid().equals(connection.getConnectedEntity()
-                // .getUuid())) {
-                //            delete = false;
-                //            break;
-                //        }
-
-            }
-            //      for (ConnectionRefImpl c : getConnectionsWithEntity(connection.getConnectingEntityId())) {
-            //        if (c.getConnectedEntity().getConnectionType().equals(connection.getConnectedEntity()
-            // .getConnectionType())) {
-            //          if (!c.getConnectedEntity().getUuid().equals(connection.getConnectedEntity().getUuid())) {
-            //            delete = false;
-            //            break;
-            //          }
-            //        }
-            //      }
-            if ( delete ) {
-                addDeleteToMutator( batch, ENTITY_DICTIONARIES,
-                        key( connection.getConnectingEntityId(), DICTIONARY_CONNECTED_TYPES ),
-                        connection.getConnectionType(), timestamp );
-            }
-
-            // delete the connection path if there will be no connections left
-            delete = true;
-
-
-            //check out inbound edges of the given type.  If we have more than the 1 specified,
-            // we shouldn't delete the connection types from our outbound index
-            itr = new PagingResultsIterator(
-                    getConnectingEntities( connection.getConnectingEntity(), connection.getConnectionType(), null,
-                            Level.REFS ) );
-
-            while ( itr.hasNext() ) {
-                c = ( ConnectedEntityRef ) itr.next();
-
-                if ( !connection.getConnectedEntityId().equals( c.getUuid() ) ) {
-                    delete = false;
-                    break;
-                }
-                //        if (c.getConnectedEntity().getConnectionType().equals(connection.getConnectedEntity()
-                // .getConnectionType()) && !c.getConnectingEntity().getUuid().equals(connection.getConnectingEntity
-                // ().getUuid())) {
-                //            delete = false;
-                //            break;
-                //        }
-
-            }
-
-            //      for (ConnectionRefImpl c : getConnectionsWithEntity(connection.getConnectedEntityId())) {
-            //        if (c.getConnectedEntity().getConnectionType().equals(connection.getConnectedEntity()
-            // .getConnectionType())) {
-            //          if (!c.getConnectingEntity().getUuid().equals(connection.getConnectingEntity().getUuid())) {
-            //            delete = false;
-            //            break;
-            //          }
-            //        }
-            //      }
-            if ( delete ) {
-                addDeleteToMutator( batch, ENTITY_DICTIONARIES,
-                        key( connection.getConnectedEntityId(), DICTIONARY_CONNECTING_TYPES ),
-                        connection.getConnectionType(), timestamp );
-            }
-        }
-        else {
-            addInsertToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key( connection.getConnectingEntityId(), DICTIONARY_CONNECTED_ENTITIES,
-                            connection.getConnectionType() ),
-                    asList( connection.getConnectedEntityId(), connection.getConnectedEntityType() ), timestamp,
-                    timestamp );
-
-            addInsertToMutator( batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key( connection.getConnectedEntityId(), DICTIONARY_CONNECTING_ENTITIES,
-                            connection.getConnectionType() ),
-                    asList( connection.getConnectingEntityId(), connection.getConnectingEntityType() ), timestamp,
-                    timestamp );
-
-            // Add connection type to connections set
-            addInsertToMutator( batch, ENTITY_DICTIONARIES,
-                    key( connection.getConnectingEntityId(), DICTIONARY_CONNECTED_TYPES ),
-                    connection.getConnectionType(), null, timestamp );
-
-            // Add connection type to connections set
-            addInsertToMutator( batch, ENTITY_DICTIONARIES,
-                    key( connection.getConnectedEntityId(), DICTIONARY_CONNECTING_TYPES ),
-                    connection.getConnectionType(), null, timestamp );
-        }
-
-        // Add property indexes
-
-        // Iterate though all the properties of the connected entity
-
-        Schema schema = getDefaultSchema();
-        for ( String propertyName : connectedEntity.getProperties().keySet() ) {
-            Object propertyValue = connectedEntity.getProperties().get( propertyName );
-
-            boolean indexed = schema.isPropertyIndexed( connectedEntity.getType(), propertyName );
-
-            boolean connection_indexes_property = schema.isPropertyIndexed( connectedEntity.getType(), propertyName );
-            boolean item_schema_has_property = schema.hasProperty( connectedEntity.getType(), propertyName );
-            boolean fulltext_indexed = schema.isPropertyFulltextIndexed( connectedEntity.getType(), propertyName );
-            // For each property, if the schema says it's indexed, update its
-            // index
-
-            if ( indexed && ( connection_indexes_property || !item_schema_has_property ) ) {
-                IndexUpdate indexUpdate =
-                        batchStartIndexUpdate( batch, connectedEntity, propertyName, disconnect ? null : propertyValue,
-                                timestampUuid, item_schema_has_property, false, false, fulltext_indexed );
-                batchUpdateConnectionIndex( indexUpdate, connection );
-            }
-        }
-
-        // Add indexes for the connected entity's list properties
-
-        // Get the names of the list properties in the connected entity
-        Set<String> dictionaryNames = em.getDictionaryNames( connectedEntity );
-
-        // For each list property, get the values in the list and
-        // update the index with those values
-
-        for ( String dictionaryName : dictionaryNames ) {
-            boolean has_dictionary = schema.hasDictionary( connectedEntity.getType(), dictionaryName );
-            boolean dictionary_indexed =
-                    schema.isDictionaryIndexedInConnections( connectedEntity.getType(), dictionaryName );
-
-            if ( dictionary_indexed || !has_dictionary ) {
-                Set<Object> elementValues = em.getDictionaryAsSet( connectedEntity, dictionaryName );
-                for ( Object elementValue : elementValues ) {
-                    IndexUpdate indexUpdate =
-                            batchStartIndexUpdate( batch, connectedEntity, dictionaryName, elementValue, timestampUuid,
-                                    has_dictionary, true, disconnect, false );
-                    batchUpdateConnectionIndex( indexUpdate, connection );
-                }
-            }
-        }
-
-        return batch;
-    }
-
-
-    public void updateEntityConnection( boolean disconnect, ConnectionRefImpl connection ) throws Exception {
-
-        UUID timestampUuid = newTimeUUID();
-        Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator( cass.getApplicationKeyspace( applicationId ), Serializers.be );
-
-        // Make or break the connection
-
-        batchUpdateEntityConnection( batch, disconnect, connection, timestampUuid );
-
-        // Make or break a connection from the connecting entity
-        // to the connection itself
-
-        ConnectionRefImpl loopback = connection.getConnectionToConnectionEntity();
-        if ( !disconnect ) {
-            em.insertEntity( new SimpleEntityRef( 
-                    CONNECTION_ENTITY_CONNECTION_TYPE, loopback.getConnectedEntityId() ) );
-        }
-
-        batchUpdateEntityConnection( batch, disconnect, loopback, timestampUuid );
-
-        batchExecute( batch, CassandraService.RETRY_COUNT );
-    }
-
-
-    @Metered(group = "core", name = "RelationManager_batchDisconnect")
-    public void batchDisconnect( Mutator<ByteBuffer> batch, UUID timestampUuid ) throws Exception {
-
-
-        PagingResultsIterator itr =
-                new PagingResultsIterator( getConnectingEntities( headEntity, null, null, Level.REFS ) );
-
-        ConnectionRefImpl connection = null;
-
-        while ( itr.hasNext() ) {
-            Object itrObj = itr.next();
-            if ( itrObj instanceof ConnectionRefImpl ) {
-                connection = (ConnectionRefImpl) itrObj;
-            }
-            else if ( itrObj instanceof SimpleEntityRef ) {
-                connection = new ConnectionRefImpl( (SimpleEntityRef) itrObj );
-            }
-            else if ( itrObj instanceof EntityRef ) {
-                    connection = new ConnectionRefImpl( new SimpleEntityRef((EntityRef) itr.next()));
-            }
-            else if ( itrObj instanceof UUID ) {
-                    connection = new ConnectionRefImpl( new SimpleEntityRef((UUID)itr.next()));
-            }
-
-            batchUpdateEntityConnection( batch, true, connection, timestampUuid );
-        }
-    }
-
-
-    public IndexUpdate batchStartIndexUpdate( Mutator<ByteBuffer> batch, Entity entity, String entryName,
-                                              Object entryValue, UUID timestampUuid, boolean schemaHasProperty,
-                                              boolean isMultiValue, boolean removeListEntry, boolean fulltextIndexed )
-            throws Exception {
-        return batchStartIndexUpdate( batch, entity, entryName, entryValue, timestampUuid, schemaHasProperty,
-                isMultiValue, removeListEntry, fulltextIndexed, false );
-    }
-
-
-    @Metered(group = "core", name = "RelationManager_batchStartIndexUpdate")
-    public IndexUpdate batchStartIndexUpdate( Mutator<ByteBuffer> batch, Entity entity, String entryName,
-                                              Object entryValue, UUID timestampUuid, boolean schemaHasProperty,
-                                              boolean isMultiValue, boolean removeListEntry, boolean fulltextIndexed,
-                                              boolean skipRead ) throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        IndexUpdate indexUpdate =
-                new IndexUpdate( batch, entity, entryName, entryValue, schemaHasProperty, isMultiValue, removeListEntry,
-                        timestampUuid );
-
-        // entryName = entryName.toLowerCase();
-
-        // entity_id,connection_type,connected_entity_id,prop_name
-
-        if ( !skipRead ) {
-
-            List<HColumn<ByteBuffer, ByteBuffer>> entries = null;
-
-            if ( isMultiValue && validIndexableValue( entryValue ) ) {
-                entries = cass.getColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_INDEX_ENTRIES,
-                        entity.getUuid(),
-                        new DynamicComposite( entryName, indexValueCode( entryValue ), toIndexableValue( entryValue ) ),
-                        setGreaterThanEqualityFlag( new DynamicComposite( entryName, indexValueCode( entryValue ),
-                                toIndexableValue( entryValue ) ) ), INDEX_ENTRY_LIST_COUNT, false );
-            }
-            else {
-                entries = cass.getColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_INDEX_ENTRIES,
-                        entity.getUuid(), new DynamicComposite( entryName ),
-                        setGreaterThanEqualityFlag( new DynamicComposite( entryName ) ), INDEX_ENTRY_LIST_COUNT,
-                        false );
-            }
-
-            if ( logger.isDebugEnabled() ) {
-                logger.debug( "Found {} previous index entries for {} of entity {}", new Object[] {
-                        entries.size(), entryName, entity.getUuid()
-                } );
-            }
-
-            // Delete all matching entries from entry list
-            for ( HColumn<ByteBuffer, ByteBuffer> entry : entries ) {
-                UUID prev_timestamp = null;
-                Object prev_value = null;
-                String prev_obj_path = null;
-
-                // new format:
-                // composite(entryName,
-                // value_code,prev_value,prev_timestamp,prev_obj_path) = null
-                DynamicComposite composite = DynamicComposite.fromByteBuffer( entry.getName().duplicate() );
-                prev_value = composite.get( 2 );
-                prev_timestamp = ( UUID ) composite.get( 3 );
-                if ( composite.size() > 4 ) {
-                    prev_obj_path = ( String ) composite.get( 4 );
-                }
-
-                if ( prev_value != null ) {
-
-                    String entryPath = entryName;
-                    if ( ( prev_obj_path != null ) && ( prev_obj_path.length() > 0 ) ) {
-                        entryPath = entryName + "." + prev_obj_path;
-                    }
-
-                    indexUpdate.addPrevEntry( entryPath, prev_value, prev_timestamp, entry.getName().duplicate() );
-
-                    // composite(property_value,connected_entity_id,entry_timestamp)
-                    // addDeleteToMutator(batch, ENTITY_INDEX_ENTRIES,
-                    // entity.getUuid(), entry.getName(), timestamp);
-
-                }
-                else {
-                    logger.error( "Unexpected condition - deserialized property value is null" );
-                }
-            }
-        }
-
-        if ( !isMultiValue || ( isMultiValue && !removeListEntry ) ) {
-
-            List<Map.Entry<String, Object>> list = IndexUtils.getKeyValueList( entryName, entryValue, fulltextIndexed );
-
-            if ( entryName.equalsIgnoreCase( "location" ) && ( entryValue instanceof Map ) ) {
-                @SuppressWarnings("rawtypes") double latitude =
-                        MapUtils.getDoubleValue( ( Map ) entryValue, "latitude" );
-                @SuppressWarnings("rawtypes") double longitude =
-                        MapUtils.getDoubleValue( ( Map ) entryValue, "longitude" );
-                list.add( new AbstractMap.SimpleEntry<String, Object>( "location.coordinates",
-                        latitude + "," + longitude ) );
-            }
-
-            for ( Map.Entry<String, Object> indexEntry : list ) {
-
-                if ( validIndexableValue( indexEntry.getValue() ) ) {
-                    indexUpdate.addNewEntry( indexEntry.getKey(), toIndexableValue( indexEntry.getValue() ) );
-                }
-            }
-
-            if ( isMultiValue ) {
-                addInsertToMutator( batch, ENTITY_INDEX_ENTRIES, entity.getUuid(),
-                        asList( entryName, indexValueCode( entryValue ), toIndexableValue( entryValue ),
-                                indexUpdate.getTimestampUuid() ), null, timestamp );
-            }
-            else {
-                // int i = 0;
-
-                for ( Map.Entry<String, Object> indexEntry : list ) {
-
-                    String name = indexEntry.getKey();
-                    if ( name.startsWith( entryName + "." ) ) {
-                        name = name.substring( entryName.length() + 1 );
-                    }
-                    else if ( name.startsWith( entryName ) ) {
-                        name = name.substring( entryName.length() );
-                    }
-
-                    byte code = indexValueCode( indexEntry.getValue() );
-                    Object val = toIndexableValue( indexEntry.getValue() );
-                    addInsertToMutator( batch, ENTITY_INDEX_ENTRIES, entity.getUuid(),
-                            asList( entryName, code, val, indexUpdate.getTimestampUuid(), name ), null, timestamp );
-
-                    indexUpdate.addIndex( indexEntry.getKey() );
-                }
-            }
-
-            indexUpdate.addIndex( entryName );
-        }
-
-        return indexUpdate;
-    }
-
-
-    @Metered(group = "core", name = "RelationManager_batchUpdatePropertyIndexes")
-    public void batchUpdatePropertyIndexes( Mutator<ByteBuffer> batch, String propertyName, Object propertyValue,
-                                            boolean entitySchemaHasProperty, boolean noRead, UUID timestampUuid )
-            throws Exception {
-
-        Entity entity = getHeadEntity();
-
-        UUID associatedId = null;
-        String associatedType = null;
-
-        if ( Schema.isAssociatedEntityType( entity.getType() ) ) {
-            Object item = entity.getProperty( PROPERTY_ITEM );
-            if ( ( item instanceof UUID ) && ( entity.getProperty( PROPERTY_COLLECTION_NAME ) instanceof String ) ) {
-                associatedId = ( UUID ) item;
-                associatedType = string( entity.getProperty( PROPERTY_ITEM_TYPE ) );
-                String entryName = TYPE_MEMBER + "." + propertyName;
-                if ( logger.isDebugEnabled() ) {
-                    logger.debug( "Extended property {} ( {} ).{} indexed as {} ({})." + entryName, new Object[] {
-                            entity.getType(), entity.getUuid(), propertyName, associatedType, associatedId
-                    } );
-                }
-                propertyName = entryName;
-            }
-        }
-
-        IndexUpdate indexUpdate = batchStartIndexUpdate( batch, entity, propertyName, propertyValue, timestampUuid,
-                entitySchemaHasProperty, false, false,
-                getDefaultSchema().isPropertyFulltextIndexed( entity.getType(), propertyName ), noRead );
-
-        // Update collections
-
-        String effectiveType = entity.getType();
-        if ( associatedType != null ) {
-            indexUpdate.setAssociatedId( associatedId );
-            effectiveType = associatedType;
-        }
-
-        Map<String, Set<CollectionInfo>> containers = getDefaultSchema().getContainers( effectiveType );
-        if ( containers != null ) {
-
-            Map<EntityRef, Set<String>> containerEntities = null;
-            if ( noRead ) {
-                containerEntities = new LinkedHashMap<EntityRef, Set<String>>();
-                EntityRef applicationRef = new SimpleEntityRef( TYPE_APPLICATION, applicationId );
-                addMapSet( containerEntities, applicationRef, defaultCollectionName( entity.getType() ) );
-            }
-            else {
-                containerEntities = getContainingCollections();
-            }
-
-            for ( EntityRef containerEntity : containerEntities.keySet() ) {
-                if ( containerEntity.getType().equals( TYPE_APPLICATION ) && Schema
-                        .isAssociatedEntityType( entity.getType() ) ) {
-                    logger.debug( "Extended properties for {} not indexed by application", entity.getType() );
-                    continue;
-                }
-                Set<String> collectionNames = containerEntities.get( containerEntity );
-                Set<CollectionInfo> collections = containers.get( containerEntity.getType() );
-
-                if ( collections != null ) {
-                    for ( CollectionInfo collection : collections ) {
-                        if ( collectionNames.contains( collection.getName() ) ) {
-                            batchUpdateCollectionIndex( indexUpdate, containerEntity, collection.getName() );
-                        }
-                    }
-                }
-            }
-        }
-
-        if ( !noRead ) {
-            batchUpdateBackwardConnectionsPropertyIndexes( indexUpdate );
-        }
-
-        /**
-         * We've updated the properties, add the deletes to the ledger
-         *
-         */
-
-        for ( IndexEntry entry : indexUpdate.getPrevEntries() ) {
-            addDeleteToMutator( batch, ENTITY_INDEX_ENTRIES, entity.getUuid(), entry.getLedgerColumn(),
-                    indexUpdate.getTimestamp() );
-        }
-    }
-
-
-    public void batchUpdateSetIndexes( Mutator<ByteBuffer> batch, String setName, Object elementValue,
-                                       boolean removeFromSet, UUID timestampUuid ) throws Exception {
-
-        Entity entity = getHeadEntity();
-
-        elementValue = getDefaultSchema().validateEntitySetValue( entity.getType(), setName, elementValue );
-
-        IndexUpdate indexUpdate =
-                batchStartIndexUpdate( batch, entity, setName, elementValue, timestampUuid, true, true, removeFromSet,
-                        false );
-
-        // Update collections
-        Map<String, Set<CollectionInfo>> containers =
-                getDefaultSchema().getContainersIndexingDictionary( entity.getType(), setName );
-
-        if ( containers != null ) {
-            Map<EntityRef, Set<String>> containerEntities = getContainingCollections();
-            for ( EntityRef containerEntity : containerEntities.keySet() ) {
-                if ( containerEntity.getType().equals( TYPE_APPLICATION ) && Schema
-                        .isAssociatedEntityType( entity.getType() ) ) {
-                    logger.debug( "Extended properties for {} not indexed by application", entity.getType() );
-                    continue;
-                }
-                Set<String> collectionNames = containerEntities.get( containerEntity );
-                Set<CollectionInfo> collections = containers.get( containerEntity.getType() );
-
-                if ( collections != null ) {
-
-                    for ( CollectionInfo collection : collections ) {
-                        if ( collectionNames.contains( collection.getName() ) ) {
-
-                            batchUpdateCollectionIndex( indexUpdate, containerEntity, collection.getName() );
-                        }
-                    }
-                }
-            }
-        }
-
-        batchUpdateBackwardConnectionsDictionaryIndexes( indexUpdate );
-    }
-
-
-    private IndexScanner searchIndex( Object indexKey, QuerySlice slice, int pageSize ) throws Exception {
-
-        DynamicComposite[] range = slice.getRange();
-
-        Object keyPrefix = key( indexKey, slice.getPropertyName() );
-
-        IndexScanner scanner =
-                new IndexBucketScanner( cass, indexBucketLocator, ENTITY_INDEX, applicationId, IndexType.CONNECTION,
-                        keyPrefix, range[0], range[1], slice.isReversed(), pageSize, slice.hasCursor(), slice.getPropertyName() );
-
-        return scanner;
-    }
-
-
-    /**
-     * Search the collection index using all the buckets for the given collection
-     *
-     * @param indexKey The index key to read
-     * @param slice Slice set in the query
-     * @param collectionName The name of the collection to search
-     * @param pageSize The page size to load when iterating
-     */
-    private IndexScanner searchIndexBuckets( Object indexKey, QuerySlice slice, String collectionName, int pageSize )
-            throws Exception {
-
-        DynamicComposite[] range = slice.getRange();
-
-        Object keyPrefix = key( indexKey, slice.getPropertyName() );
-
-        IndexScanner scanner =
-                new IndexBucketScanner( cass, indexBucketLocator, ENTITY_INDEX, applicationId, IndexType.COLLECTION,
-                        keyPrefix, range[0], range[1], slice.isReversed(), pageSize, slice.hasCursor(), collectionName );
-
-        return scanner;
-    }
-
-
-    @SuppressWarnings("unchecked")
-    @Override
-    @Metered(group = "core", name = "RelationManager_isOwner")
-    public boolean isCollectionMember( String collectionName, EntityRef entity ) throws Exception {
-
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-
-        ByteBuffer col = DynamicComposite
-                .toByteBuffer( asList( this.headEntity.getType(), collectionName, headEntity.getUuid() ) );
-
-        HColumn<ByteBuffer, ByteBuffer> result = cass.getColumn( ko, ENTITY_COMPOSITE_DICTIONARIES,
-                key( entity.getUuid(), Schema.DICTIONARY_CONTAINER_ENTITIES ), col, Serializers.be, Serializers.be );
-
-        return result != null;
-    }
-
-
-    /** @param connectionName The name of hte connection */
-    public boolean isConnectionMember( String connectionName, EntityRef entity ) throws Exception {
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-
-        Object key = key( this.headEntity.getUuid(), DICTIONARY_CONNECTED_ENTITIES, connectionName );
-
-        DynamicComposite start = new DynamicComposite( entity.getUuid() );
-
-        List<HColumn<ByteBuffer, ByteBuffer>> cols =
-                cass.getColumns( ko, ENTITY_COMPOSITE_DICTIONARIES, key, start, null, 1, false );
-
-        if ( cols == null || cols.size() == 0 ) {
-            return false;
-        }
-
-        UUID returnedUUID = ( UUID ) DynamicComposite.fromByteBuffer( cols.get( 0 ).getName() ).get( 0 );
-
-        return entity.getUuid().equals( returnedUUID );
-
-
-        //    addDeleteToMutator(batch, ENTITY_COMPOSITE_DICTIONARIES,
-        //        key(connection.getConnectedEntityId(), DICTIONARY_CONNECTING_ENTITIES,
-        // connection.getConnectionType()),
-        //        asList(connection.getConnectingEntityId(), connection.getConnectingEntityType()), timestamp);
-        //
-        //
-        //    ConnectionRefImpl ref = new ConnectionRefImpl(this.headEntity, connectionName, entity);
-        //
-        //
-        //
-        //
-        //
-        //
-        //    HColumn<String, UUID> col = cass.getColumn(ko, ENTITY_CONNECTIONS, ref.getUuid(),
-        //        ConnectionRefImpl.CONNECTED_ENTITY_ID, se, ue);
-        //
-        //
-        //    getConnectedEntities(this.headEntity, connectionName, )
-        //
-        //    return col != null && entity.getUuid().equals(col.getValue());
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_getOwners")
-    public Map<String, Map<UUID, Set<String>>> getOwners() throws Exception {
-        Map<EntityRef, Set<String>> containerEntities = getContainingCollections();
-        Map<String, Map<UUID, Set<String>>> owners = new LinkedHashMap<String, Map<UUID, Set<String>>>();
-
-        for ( EntityRef owner : containerEntities.keySet() ) {
-            Set<String> collections = containerEntities.get( owner );
-            for ( String collection : collections ) {
-                MapUtils.addMapMapSet( owners, owner.getType(), owner.getUuid(), collection );
-            }
-        }
-
-        return owners;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_getCollections")
-    public Set<String> getCollections() throws Exception {
-
-        Map<String, CollectionInfo> collections = getDefaultSchema().getCollections( headEntity.getType() );
-        if ( collections == null ) {
-            return null;
-        }
-
-        return collections.keySet();
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_getCollection_start_result")
-    public Results getCollection( String collectionName, UUID startResult, int count, Level resultsLevel,
-                                  boolean reversed ) throws Exception {
-        // changed intentionally to delegate to search so that behavior is
-        // consistent across all index access.
-
-        // TODO T.N fix cursor parsing here so startResult can be used in this
-        // context. Needs a bit of refactor
-        // for accommodating cursor I/O USERGRID-1750. A bit hacky, but until a
-        // furthur refactor this works.
-
-        Query query = new Query().withResultsLevel( resultsLevel ).withReversed( reversed ).withLimit( count )
-                                 .withStartResult( startResult );
-
-        return searchCollection( collectionName, query );
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_getCollecitonForQuery")
-    public Results getCollection( String collectionName, Query query, Level resultsLevel ) throws Exception {
-
-        // changed intentionally to delegate to search so that behavior is
-        // consistent across all index access.
-
-        return searchCollection( collectionName, query );
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_addToCollection")
-    public Entity addToCollection( String collectionName, EntityRef itemRef ) throws Exception {
-
-        Entity itemEntity = em.get( itemRef );
-
-        if ( itemEntity == null ) {
-            return null;
-        }
-
-        CollectionInfo collection = getDefaultSchema().getCollection( headEntity.getType(), collectionName );
-        if ( ( collection != null ) && !collection.getType().equals( itemRef.getType() ) ) {
-            return null;
-        }
-
-        UUID timestampUuid = newTimeUUID();
-        Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator(cass.getApplicationKeyspace( applicationId ), Serializers.be );
-
-        batchAddToCollection( batch, collectionName, itemEntity, timestampUuid );
-
-        if ( collection.getLinkedCollection() != null ) {
-            getRelationManager( itemEntity )
-                    .batchAddToCollection( batch, collection.getLinkedCollection(), getHeadEntity(), timestampUuid );
-        }
-
-        batchExecute( batch, CassandraService.RETRY_COUNT );
-
-        return itemEntity;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_addToCollections")
-    public Entity addToCollections( List<EntityRef> owners, String collectionName ) throws Exception {
-
-        Entity itemEntity = getHeadEntity();
-
-        Map<String, List<UUID>> collectionsByType = new LinkedHashMap<String, List<UUID>>();
-        for ( EntityRef owner : owners ) {
-            MapUtils.addMapList( collectionsByType, owner.getType(), owner.getUuid() );
-        }
-
-        UUID timestampUuid = newTimeUUID();
-        Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator( cass.getApplicationKeyspace( applicationId ), Serializers.be );
-
-        Schema schema = getDefaultSchema();
-        for ( Entry<String, List<UUID>> entry : collectionsByType.entrySet() ) {
-            CollectionInfo collection = schema.getCollection( entry.getKey(), collectionName );
-            if ( ( collection != null ) && !collection.getType().equals( headEntity.getType() ) ) {
-                continue;
-            }
-            batchAddToCollections( batch, entry.getKey(), entry.getValue(), collectionName, itemEntity, timestampUuid );
-
-            if ( collection.getLinkedCollection() != null ) {
-                logger.error(
-                        "Bulk add to collections used on a linked collection, linked connection will not be updated" );
-            }
-        }
-
-        batchExecute( batch, CassandraService.RETRY_COUNT );
-
-        return null;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_createItemInCollection")
-    public Entity createItemInCollection( String collectionName, String itemType, Map<String, Object> properties )
-            throws Exception {
-
-        if ( headEntity.getUuid().equals( applicationId ) ) {
-            if ( itemType.equals( TYPE_ENTITY ) ) {
-                itemType = singularize( collectionName );
-            }
-            if ( itemType.equals( TYPE_ROLE ) ) {
-                Long inactivity = ( Long ) properties.get( PROPERTY_INACTIVITY );
-                if ( inactivity == null ) {
-                    inactivity = 0L;
-                }
-                return em.createRole( ( String ) properties.get( PROPERTY_NAME ),
-                        ( String ) properties.get( PROPERTY_TITLE ), inactivity );
-            }
-            return em.create( itemType, properties );
-        }
-        else if ( headEntity.getType().equals( Group.ENTITY_TYPE ) && ( collectionName.equals( COLLECTION_ROLES ) ) ) {
-            UUID groupId = headEntity.getUuid();
-            String roleName = ( String ) properties.get( PROPERTY_NAME );
-            return em.createGroupRole( groupId, roleName, ( Long ) properties.get( PROPERTY_INACTIVITY ) );
-        }
-
-        CollectionInfo collection = getDefaultSchema().getCollection( headEntity.getType(), collectionName );
-        if ( ( collection != null ) && !collection.getType().equals( itemType ) ) {
-            return null;
-        }
-
-        properties = getDefaultSchema().cleanUpdatedProperties( itemType, properties, true );
-
-        Entity itemEntity = em.create( itemType, properties );
-
-        if ( itemEntity != null ) {
-            UUID timestampUuid = newTimeUUID();
-            Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator( cass.getApplicationKeyspace( applicationId ), Serializers.be );
-
-            batchAddToCollection( batch, collectionName, itemEntity, timestampUuid );
-
-            if ( collection.getLinkedCollection() != null ) {
-                getRelationManager( itemEntity )
-                        .batchAddToCollection( batch, collection.getLinkedCollection(), getHeadEntity(),
-                                timestampUuid );
-            }
-
-            batchExecute( batch, CassandraService.RETRY_COUNT );
-        }
-
-        return itemEntity;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_removeFromCollection")
-    public void removeFromCollection( String collectionName, EntityRef itemRef ) throws Exception {
-
-        if ( headEntity.getUuid().equals( applicationId ) ) {
-            if ( collectionName.equals( COLLECTION_ROLES ) ) {
-                Entity itemEntity = em.get( itemRef );
-                if ( itemEntity != null ) {
-                    RoleRef roleRef = SimpleRoleRef.forRoleEntity( itemEntity );
-                    em.deleteRole( roleRef.getApplicationRoleName() );
-                    return;
-                }
-                em.delete( itemEntity );
-                return;
-            }
-            em.delete( itemRef );
-            return;
-        }
-
-        Entity itemEntity = em.get( itemRef );
-
-        if ( itemEntity == null ) {
-            return;
-        }
-
-        UUID timestampUuid = newTimeUUID();
-        Mutator<ByteBuffer> batch = CountingMutator.createFlushingMutator( cass.getApplicationKeyspace( applicationId ), Serializers.be );
-
-        batchRemoveFromCollection( batch, collectionName, itemEntity, timestampUuid );
-
-        CollectionInfo collection = getDefaultSchema().getCollection( headEntity.getType(), collectionName );
-        if ( ( collection != null ) && ( collection.getLinkedCollection() != null ) ) {
-            getRelationManager( itemEntity )
-                    .batchRemoveFromCollection( batch, collection.getLinkedCollection(), getHeadEntity(),
-                            timestampUuid );
-        }
-
-        batchExecute( batch, CassandraService.RETRY_COUNT );
-
-        if ( headEntity.getType().equals( Group.ENTITY_TYPE ) ) {
-            if ( collectionName.equals( COLLECTION_ROLES ) ) {
-                String path = ( String ) ( ( Entity ) itemRef ).getMetadata( "path" );
-                if ( path.startsWith( "/roles/" ) ) {
-                    RoleRef roleRef = SimpleRoleRef.forRoleEntity( itemEntity );
-                    em.deleteRole( roleRef.getApplicationRoleName() );
-                }
-            }
-        }
-    }
-
-
-    @Metered(group = "core", name = "RelationManager_batchRemoveFromContainers")
-    public void batchRemoveFromContainers( Mutator<ByteBuffer> m, UUID timestampUuid ) throws Exception {
-        Entity entity = getHeadEntity();
-        // find all the containing collections
-        Map<EntityRef, Set<String>> containers = getContainingCollections();
-        if ( containers != null ) {
-            for ( Entry<EntityRef, Set<String>> container : containers.entrySet() ) {
-                for ( String collectionName : container.getValue() ) {
-                    getRelationManager( container.getKey() )
-                            .batchRemoveFromCollection( m, collectionName, entity, true, timestampUuid );
-                }
-            }
-        }
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_copyRelationships")
-    public void copyRelationships( String srcRelationName, EntityRef dstEntityRef, String dstRelationName )
-            throws Exception {
-
-        headEntity = em.validate( headEntity );
-        dstEntityRef = em.validate( dstEntityRef );
-
-        CollectionInfo srcCollection = getDefaultSchema().getCollection( headEntity.getType(), srcRelationName );
-
-        CollectionInfo dstCollection = getDefaultSchema().getCollection( dstEntityRef.getType(), dstRelationName );
-
-        Results results = null;
-        do {
-            if ( srcCollection != null ) {
-                results = em.getCollection( headEntity, srcRelationName, null, 5000, Level.REFS, false );
-            }
-            else {
-                results = em.getConnectedEntities( headEntity, srcRelationName, null, Level.REFS );
-            }
-
-            if ( ( results != null ) && ( results.size() > 0 ) ) {
-                List<EntityRef> refs = results.getRefs();
-                for ( EntityRef ref : refs ) {
-                    if ( dstCollection != null ) {
-                        em.addToCollection( dstEntityRef, dstRelationName, ref );
-                    }
-                    else {
-                        em.createConnection( dstEntityRef, dstRelationName, ref );
-                    }
-                }
-            }
-        }
-        while ( ( results != null ) && ( results.hasMoreResults() ) );
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_searchCollection")
-    public Results searchCollection( String collectionName, Query query ) throws Exception {
-
-        if ( query == null ) {
-            query = new Query();
-        }
-
-        headEntity = em.validate( headEntity );
-
-        CollectionInfo collection = getDefaultSchema().getCollection( headEntity.getType(), collectionName );
-
-        query.setEntityType( collection.getType() );
-
-        final CollectionResultsLoaderFactory factory = new CollectionResultsLoaderFactory();
-
-        // we have something to search with, visit our tree and evaluate the
-        // results
-        QueryProcessorImpl qp = new QueryProcessorImpl( query, collection, em, factory );
-        SearchCollectionVisitor visitor = new SearchCollectionVisitor( qp );
-
-        return qp.getResults( visitor );
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_createConnection_connection_ref")
-    public ConnectionRef createConnection( ConnectionRef connection ) throws Exception {
-        ConnectionRefImpl connectionImpl = new ConnectionRefImpl( connection );
-
-        updateEntityConnection( false, connectionImpl );
-
-        return connection;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_createConnection_connectionType")
-    public ConnectionRef createConnection( String connectionType, EntityRef connectedEntityRef ) throws Exception {
-
-        headEntity = em.validate( headEntity );
-        connectedEntityRef = em.validate( connectedEntityRef );
-
-        ConnectionRefImpl connection = new ConnectionRefImpl( headEntity, connectionType, connectedEntityRef );
-
-        updateEntityConnection( false, connection );
-
-        return connection;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_createConnection_paired_connection_type")
-    public ConnectionRef createConnection( String pairedConnectionType, EntityRef pairedEntity, String connectionType,
-                                           EntityRef connectedEntityRef ) throws Exception {
-
-        ConnectionRefImpl connection =
-                new ConnectionRefImpl( headEntity, new ConnectedEntityRefImpl( pairedConnectionType, pairedEntity ),
-                        new ConnectedEntityRefImpl( connectionType, connectedEntityRef ) );
-
-        updateEntityConnection( false, connection );
-
-        return connection;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_createConnection_connected_entity_ref")
-    public ConnectionRef createConnection( ConnectedEntityRef... connections ) throws Exception {
-
-        ConnectionRefImpl connection = new ConnectionRefImpl( headEntity, connections );
-
-        updateEntityConnection( false, connection );
-
-        return connection;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_connectionRef_type_entity")
-    public ConnectionRef connectionRef( String connectionType, EntityRef connectedEntityRef ) throws Exception {
-
-        ConnectionRef connection = new ConnectionRefImpl( headEntity, connectionType, connectedEntityRef );
-
-        return connection;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_connectionRef_entity_to_entity")
-    public ConnectionRef connectionRef( String pairedConnectionType, EntityRef pairedEntity, String connectionType,
-                                        EntityRef connectedEntityRef ) throws Exception {
-
-        ConnectionRef connection =
-                new ConnectionRefImpl( headEntity, new ConnectedEntityRefImpl( pairedConnectionType, pairedEntity ),
-                        new ConnectedEntityRefImpl( connectionType, connectedEntityRef ) );
-
-        return connection;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_connectionRef_connections")
-    public ConnectionRef connectionRef( ConnectedEntityRef... connections ) {
-
-        ConnectionRef connection = new ConnectionRefImpl( headEntity, connections );
-
-        return connection;
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_deleteConnection")
-    public void deleteConnection( ConnectionRef connectionRef ) throws Exception {
-        updateEntityConnection( true, new ConnectionRefImpl( connectionRef ) );
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "RelationManager_getConnectionTypes_entity_id")
-    public Set<String> getConnectionTypes( UUID connectedEntityId ) throws Exception {
-        // Add connection type to connections set
-        //    addInsertToMutator(batch, ENTITY_DICTIONARIES,
-        //        key(connection.getConnectingEntityId(), DICTIONARY_CONNECTED_TYPES),
-        // connection.getConnectionType(), null,
-        //        timestamp);
-        //
-        //    // Add connection type to connections set
-        //    addInsertToMutator(batch, ENTITY_DICTIONARIES,
-        //        key(connection.getConnectedEntityId(), DICTIONARY_CONNECTING_TYPES),
-        // connection.getConnectionType(), null,
-        //        timestamp);
-        //
-        //
-        //    Object key = key(connectedEntityId, DICTIONARY_CONNECTED_TYPES);
-
-        Set<String> connections = cast( em.getDictionaryAsSet( new SimpleEntityRef( connectedEntityId ),
-                Schema.DICTIONARY_CONNECTED_TYPES ) );
-
-        return connections;
-
-        //    Set<String> connection_types = new TreeSet<String>(CASE_INSENSITIVE_ORDER);
-        //
-        //    //TODO T.N. get this from the dictionary
-        //    List<ConnectionRefImpl> connections = getConnections(new ConnectionRefImpl(headEntity,
-        // new ConnectedEntityRefImpl(
-        //        NULL_ID), new ConnectedEntityRefImpl(connectedEntityId)), false);
-        //
-        //    for (ConnectionRefImpl connection : connections) {
-        //      if ((connection.getConnectionType() != null) && (connection.getFirstPairedConnectedEntityId() ==
-        // null)) {
-        //        connection_types.add(connection.getConnectionType());
-        //      }
-        //   

<TRUNCATED>

[09/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/EntityManager.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/EntityManager.java b/stack/core/src/main/java/org/apache/usergrid/persistence/EntityManager.java
index 6e571d1..b0dba1d 100644
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/EntityManager.java
+++ b/stack/core/src/main/java/org/apache/usergrid/persistence/EntityManager.java
@@ -17,24 +17,24 @@
 package org.apache.usergrid.persistence;
 
 
-import org.apache.usergrid.persistence.index.query.Query;
 import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
-import me.prettyprint.hector.api.mutation.Mutator;
 
 import org.apache.usergrid.persistence.cassandra.CassandraService;
-import org.apache.usergrid.persistence.cassandra.GeoIndexManager;
 import org.apache.usergrid.persistence.core.util.Health;
 import org.apache.usergrid.persistence.entities.Application;
 import org.apache.usergrid.persistence.entities.Role;
 import org.apache.usergrid.persistence.index.query.CounterResolution;
 import org.apache.usergrid.persistence.index.query.Identifier;
+import org.apache.usergrid.persistence.index.query.Query;
 import org.apache.usergrid.persistence.index.query.Query.Level;
 
+import me.prettyprint.hector.api.mutation.Mutator;
+
 
 /**
  * The interface class for the data access object for Applications. Each application contains a set of users as well as
@@ -42,9 +42,6 @@ import org.apache.usergrid.persistence.index.query.Query.Level;
  */
 public interface EntityManager {
 
-//    public void setApplicationId( UUID applicationId );
-
-    public GeoIndexManager getGeoIndexManager();
 
     public EntityRef getApplicationRef();
 
@@ -52,8 +49,6 @@ public interface EntityManager {
 
     public void updateApplication( Application app ) throws Exception;
 
-    public void updateApplication( Map<String, Object> properties ) throws Exception;
-
     public RelationManager getRelationManager( EntityRef entityRef );
 
     /** Get all collections for the application. Includes both user defined collections and schema collections */
@@ -89,7 +84,7 @@ public interface EntityManager {
      *
      * @throws Exception the exception
      */
-    public Entity create( UUID importId, String entityType, Map<String, Object> properties ) 
+    public Entity create( UUID importId, String entityType, Map<String, Object> properties )
             throws Exception;
 
     public void createApplicationCollection( String entityType ) throws Exception;
@@ -103,7 +98,7 @@ public interface EntityManager {
      * @param collectionName The name of the collection
      * @param aliasValue The value of the alias
      */
-    public EntityRef getAlias( EntityRef ownerRef, String collectionName, String aliasValue ) 
+    public EntityRef getAlias( EntityRef ownerRef, String collectionName, String aliasValue )
             throws Exception;
 
     public Map<String, EntityRef> getAlias( String aliasType, List<String> aliases ) throws Exception;
@@ -115,11 +110,11 @@ public interface EntityManager {
      * @param collectionName The name of the collection
      * @param aliases The alias property
      */
-    public Map<String, EntityRef> getAlias( EntityRef ownerRef, String collectionName, 
+    public Map<String, EntityRef> getAlias( EntityRef ownerRef, String collectionName,
             List<String> aliases ) throws Exception;
 
     /**
-     * Validates that the entity exists in the datastore meaning that it exists and the type has 
+     * Validates that the entity exists in the datastore meaning that it exists and the type has
      * been loaded if not already provided.
      *
      * @return an validated EntityRef or null.
@@ -142,7 +137,7 @@ public interface EntityManager {
      *
      * @return a list of entity objects.
      */
-    public Results get( Collection<UUID> entityIds, Class<? extends Entity> entityClass, 
+    public Results get( Collection<UUID> entityIds, Class<? extends Entity> entityClass,
             Level resultsLevel ) throws Exception;
 
     /**
@@ -150,7 +145,7 @@ public interface EntityManager {
      *
      * @return a list of entity objects.
      */
-    public Results get( Collection<UUID> entityIds, String entityType, 
+    public Results get( Collection<UUID> entityIds, String entityType,
         Class<? extends Entity> entityClass, Level resultsLevel ) throws Exception;
 
     public Results getEntities(List<UUID> ids, String type);
@@ -175,13 +170,6 @@ public interface EntityManager {
     public Object getProperty( EntityRef entityRef, String propertyName ) throws Exception;
 
     /**
-     * Do a single load of all entities with the given properties.  Efficient if you have a subset of properties, and
-     * know the ids of them.  The entity UUID is in the key, the runtime subtype of Entity is in the value.  Note that
-     * if an entity cannot be loaded (id doesn't exist) it is simply ignored
-     */
-    public List<Entity> getPartialEntities( Collection<UUID> ids, Collection<String> properties ) throws Exception;
-
-    /**
      * Gets the properties for the specified entity property.
      *
      * @param entityRef an entity reference
@@ -222,13 +210,13 @@ public interface EntityManager {
      *
      * @throws Exception the exception
      */
-    public void updateProperties( EntityRef entityRef, Map<String, Object> properties ) 
+    public void updateProperties( EntityRef entityRef, Map<String, Object> properties )
             throws Exception;
 
     public void deleteProperty( EntityRef entityRef, String propertyName ) throws Exception;
 
     /**
-     * Gets the values from an entity list property. Lists are a special type of entity property 
+     * Gets the values from an entity list property. Lists are a special type of entity property
      * that can contain an unordered set of non-duplicate values.
      *
      * @param entityRef an entity reference
@@ -238,11 +226,11 @@ public interface EntityManager {
      *
      * @throws Exception the exception
      */
-    public Set<Object> getDictionaryAsSet( EntityRef entityRef, String dictionaryName ) 
+    public Set<Object> getDictionaryAsSet( EntityRef entityRef, String dictionaryName )
             throws Exception;
 
     /**
-     * Adds the specified value to the named entity list property. Lists are a special type of 
+     * Adds the specified value to the named entity list property. Lists are a special type of
      * entity property that can contain an unordered set of non-duplicate values.
      *
      * @param entityRef an entity reference
@@ -251,26 +239,26 @@ public interface EntityManager {
      *
      * @throws Exception the exception
      */
-    public void addToDictionary( EntityRef entityRef, String dictionaryName, Object elementValue ) 
+    public void addToDictionary( EntityRef entityRef, String dictionaryName, Object elementValue )
             throws Exception;
 
-    public void addToDictionary( EntityRef entityRef, String dictionaryName, Object elementName, 
+    public void addToDictionary( EntityRef entityRef, String dictionaryName, Object elementName,
             Object elementValue ) throws Exception;
 
-    public void addSetToDictionary( EntityRef entityRef, String dictionaryName, 
+    public void addSetToDictionary( EntityRef entityRef, String dictionaryName,
             Set<?> elementValues ) throws Exception;
 
-    public void addMapToDictionary( EntityRef entityRef, String dictionaryName, 
+    public void addMapToDictionary( EntityRef entityRef, String dictionaryName,
             Map<?, ?> elementValues ) throws Exception;
 
-    public Map<Object, Object> getDictionaryAsMap( EntityRef entityRef, String dictionaryName ) 
+    public Map<Object, Object> getDictionaryAsMap( EntityRef entityRef, String dictionaryName )
             throws Exception;
 
-    public Object getDictionaryElementValue( EntityRef entityRef, String dictionaryName, 
+    public Object getDictionaryElementValue( EntityRef entityRef, String dictionaryName,
             String elementName ) throws Exception;
 
     /**
-     * Removes the specified value to the named entity list property. Lists are a special type of 
+     * Removes the specified value to the named entity list property. Lists are a special type of
      * entity property that can contain an unordered set of non-duplicate values.
      *
      * @param entityRef an entity reference
@@ -293,17 +281,6 @@ public interface EntityManager {
      */
     public void delete( EntityRef entityRef ) throws Exception;
 
-    /**
-     * Gets the entities and collections that the specified entity is a member of.
-     *
-     * @param entityRef an entity reference
-     *
-     * @return a map of entity references to set of collection names for the entities and 
-     * collections that this entity is a member of.
-     *
-     * @throws Exception the exception
-     */
-    public Map<String, Map<UUID, Set<String>>> getOwners( EntityRef entityRef ) throws Exception;
 
     /**
      * Return true if the owner entity ref is an owner of the entity;
@@ -312,7 +289,7 @@ public interface EntityManager {
      * @param collectionName The collection name
      * @param entity The entity in the collection
      */
-    public boolean isCollectionMember( EntityRef owner, String collectionName, EntityRef entity ) 
+    public boolean isCollectionMember( EntityRef owner, String collectionName, EntityRef entity )
             throws Exception;
 
     /**
@@ -322,12 +299,12 @@ public interface EntityManager {
      * @param connectionName The collection name
      * @param entity The entity in the collection
      */
-    public boolean isConnectionMember( EntityRef owner, String connectionName, EntityRef entity ) 
+    public boolean isConnectionMember( EntityRef owner, String connectionName, EntityRef entity )
             throws Exception;
 
 
     /**
-     * Gets the collections for the specified entity. Collection for a given type are encoded 
+     * Gets the collections for the specified entity. Collection for a given type are encoded
      * in the schema, this method loads the entity type and returns the collections from the schema.
      *
      * @param entityRef an entity reference
@@ -391,20 +368,20 @@ public interface EntityManager {
      *
      * @throws Exception the exception
      */
-    public void removeFromCollection( EntityRef entityRef, String collectionName, EntityRef itemRef) 
+    public void removeFromCollection( EntityRef entityRef, String collectionName, EntityRef itemRef)
             throws Exception;
 
-    public Results searchCollection( EntityRef entityRef, String collectionName, Query query ) 
+    public Results searchCollection( EntityRef entityRef, String collectionName, Query query )
             throws Exception;
 
-    public Set<String> getCollectionIndexes( EntityRef entity, String collectionName ) 
+    public Set<String> getCollectionIndexes( EntityRef entity, String collectionName )
             throws Exception;
 
-    public void copyRelationships( EntityRef srcEntityRef, String srcRelationName, 
+    public void copyRelationships( EntityRef srcEntityRef, String srcRelationName,
             EntityRef dstEntityRef, String dstRelationName ) throws Exception;
 
     /**
-     * Connect the specified entity to another entity with the specified connection type. 
+     * Connect the specified entity to another entity with the specified connection type.
      * Connections are directional relationships that can be traversed in either direction.
      *
      * @throws Exception the exception
@@ -415,24 +392,24 @@ public interface EntityManager {
                                            EntityRef connectedEntityRef ) throws Exception;
 
     public ConnectionRef createConnection( EntityRef connectingEntity, String pairedConnectionType,
-                                           EntityRef pairedEntity, String connectionType, 
+                                           EntityRef pairedEntity, String connectionType,
                                            EntityRef connectedEntityRef ) throws Exception;
 
-    public ConnectionRef createConnection( 
+    public ConnectionRef createConnection(
             EntityRef connectingEntity, ConnectedEntityRef... connections )
             throws Exception;
 
     public ConnectionRef connectionRef( EntityRef connectingEntity, String connectionType,
                                         EntityRef connectedEntityRef ) throws Exception;
 
-    public ConnectionRef connectionRef( EntityRef connectingEntity, String pairedConnectionType, 
-            EntityRef pairedEntity, String connectionType, EntityRef connectedEntityRef ) 
+    public ConnectionRef connectionRef( EntityRef connectingEntity, String pairedConnectionType,
+            EntityRef pairedEntity, String connectionType, EntityRef connectedEntityRef )
             throws Exception;
 
     public ConnectionRef connectionRef( EntityRef connectingEntity, ConnectedEntityRef... connections );
 
     /**
-     * Disconnects two connected entities with the specified connection type. Connections are 
+     * Disconnects two connected entities with the specified connection type. Connections are
      * directional relationships that can be traversed in either direction.
      *
      * @throws Exception the exception
@@ -444,7 +421,7 @@ public interface EntityManager {
 
 
     /**
-     * Gets the entities of the specified type connected to the specified entity, optionally 
+     * Gets the entities of the specified type connected to the specified entity, optionally
      * matching the specified connection types and/or entity types. Returns a list of entity ids.
      *
      * @param entityId an entity reference
@@ -455,11 +432,11 @@ public interface EntityManager {
      *
      * @throws Exception the exception
      */
-    public Results getConnectedEntities( EntityRef entityRef, String connectionType, 
+    public Results getConnectedEntities( EntityRef entityRef, String connectionType,
             String connectedEntityType, Level resultsLevel ) throws Exception;
 
     /**
-     * Gets the entities connecting to this entity, optionally with the specified connection 
+     * Gets the entities connecting to this entity, optionally with the specified connection
      * type and/or entity type.
      * <p/>
      * e.g. "get users who have favorited this place"
@@ -472,7 +449,7 @@ public interface EntityManager {
      *
      * @throws Exception the exception
      */
-    public Results getConnectingEntities( EntityRef entityRef, String connectionType, 
+    public Results getConnectingEntities( EntityRef entityRef, String connectionType,
             String connectedEntityType, Level resultsLevel ) throws Exception;
 
     public Results getConnectingEntities( EntityRef entityRef, String connectionType,
@@ -541,23 +518,17 @@ public interface EntityManager {
 
     public void revokeUserPermission( UUID userId, String permission ) throws Exception;
 
-    // User role membership
-
-    public Map<String, String> getUserGroupRoles( UUID userId, UUID groupId ) throws Exception;
-
     public void addUserToGroupRole( UUID userId, UUID groupId, String roleName ) throws Exception;
 
-    public void removeUserFromGroupRole( UUID userId, UUID groupId, String roleName ) throws Exception;
-
     public Results getUsersInGroupRole( UUID groupId, String roleName, Level level ) throws Exception;
 
-    public void incrementAggregateCounters( UUID userId, UUID groupId, String category, 
+    public void incrementAggregateCounters( UUID userId, UUID groupId, String category,
             String counterName, long value );
 
-    public Results getAggregateCounters( UUID userId, UUID groupId, String category, 
+    public Results getAggregateCounters( UUID userId, UUID groupId, String category,
             String counterName, CounterResolution resolution, long start, long finish, boolean pad );
 
-    public Results getAggregateCounters( UUID userId, UUID groupId, UUID queueId, String category, 
+    public Results getAggregateCounters( UUID userId, UUID groupId, UUID queueId, String category,
             String counterName, CounterResolution resolution, long start, long finish, boolean pad );
 
     public Results getAggregateCounters( Query query ) throws Exception;
@@ -572,10 +543,10 @@ public interface EntityManager {
 
     public Map<String, Long> getApplicationCounters() throws Exception;
 
-    public void incrementAggregateCounters( 
+    public void incrementAggregateCounters(
             UUID userId, UUID groupId, String category, Map<String, Long> counters );
 
-    public boolean isPropertyValueUniqueForEntity( 
+    public boolean isPropertyValueUniqueForEntity(
             String entityType, String propertyName, Object propertyValue ) throws Exception;
 
     @Deprecated
@@ -610,8 +581,8 @@ public interface EntityManager {
     public void revokeGroupPermission( UUID groupId, String permission ) throws Exception;
 
 
-    <A extends Entity> A batchCreate(Mutator<ByteBuffer> m, String entityType, 
-            Class<A> entityClass, Map<String, Object> properties, 
+    <A extends Entity> A batchCreate(Mutator<ByteBuffer> m, String entityType,
+            Class<A> entityClass, Map<String, Object> properties,
             UUID importId, UUID timestampUuid) throws Exception;
     /**
      * Batch dictionary property.
@@ -626,15 +597,15 @@ public interface EntityManager {
      *
      * @throws Exception the exception
      */
-    Mutator<ByteBuffer> batchSetProperty(Mutator<ByteBuffer> batch, EntityRef entity, 
+    Mutator<ByteBuffer> batchSetProperty(Mutator<ByteBuffer> batch, EntityRef entity,
             String propertyName, Object propertyValue, UUID timestampUuid) throws Exception;
 
-    Mutator<ByteBuffer> batchSetProperty(Mutator<ByteBuffer> batch, EntityRef entity, 
-            String propertyName, Object propertyValue, boolean force, boolean noRead, 
+    Mutator<ByteBuffer> batchSetProperty(Mutator<ByteBuffer> batch, EntityRef entity,
+            String propertyName, Object propertyValue, boolean force, boolean noRead,
             UUID timestampUuid) throws Exception;
 
-    Mutator<ByteBuffer> batchUpdateDictionary(Mutator<ByteBuffer> batch, EntityRef entity, 
-            String dictionaryName, Object elementValue, Object elementCoValue, 
+    Mutator<ByteBuffer> batchUpdateDictionary(Mutator<ByteBuffer> batch, EntityRef entity,
+            String dictionaryName, Object elementValue, Object elementCoValue,
             boolean removeFromDictionary, UUID timestampUuid) throws Exception;
 
     /**
@@ -651,30 +622,15 @@ public interface EntityManager {
      *
      * @throws Exception the exception
      */
-    Mutator<ByteBuffer> batchUpdateDictionary(Mutator<ByteBuffer> batch, EntityRef entity, 
-            String dictionaryName, Object elementValue, 
+    Mutator<ByteBuffer> batchUpdateDictionary(Mutator<ByteBuffer> batch, EntityRef entity,
+            String dictionaryName, Object elementValue,
             boolean removeFromDictionary, UUID timestampUuid) throws Exception;
 
-    /**
-     * Batch update properties.
-     *
-     * @param batch the batch
-     * @param entity The owning entity reference
-     * @param properties the properties to set
-     * @param timestampUuid the timestamp of the update operation as a time uuid
-     *
-     * @return batch
-     *
-     * @throws Exception the exception
-     */
-    Mutator<ByteBuffer> batchUpdateProperties(Mutator<ByteBuffer> batch, 
-            EntityRef entity, Map<String, Object> properties, UUID timestampUuid) throws Exception;
 
     Set<String> getDictionaryNames(EntityRef entity) throws Exception;
 
-    void insertEntity( EntityRef ref ) throws Exception;
 
-    /** @return the applicationId */
+  /** @return the applicationId */
     UUID getApplicationId();
 
     /** @return the indexBucketLocator */
@@ -683,7 +639,7 @@ public interface EntityManager {
     /** @return the cass */
     CassandraService getCass();
 
-    /** 
+    /**
      * Refresh the applications index -- use sparingly.
      */
     void refreshIndex();

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/MultiQueryIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/MultiQueryIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/MultiQueryIterator.java
index 5b64d0b..52e235c 100644
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/MultiQueryIterator.java
+++ b/stack/core/src/main/java/org/apache/usergrid/persistence/MultiQueryIterator.java
@@ -24,7 +24,7 @@ import org.apache.usergrid.persistence.index.query.Query.Level;
 
 
 /**
- * For each in a set of source refs executes a sub-query and provides a unified iterator over 
+ * For each in a set of source refs executes a sub-query and provides a unified iterator over
  * the union of all results. Honors page sizes for the Query to ensure memory isn't blown out.
  */
 public class MultiQueryIterator implements ResultsIterator {
@@ -36,10 +36,6 @@ public class MultiQueryIterator implements ResultsIterator {
     private Iterator currentIterator;
 
 
-    public MultiQueryIterator( Results results, Query query ) {
-        this( results.getQueryProcessor().getEntityManager(), 
-                new PagingResultsIterator( results, Level.IDS ), query );
-    }
 
 
     public MultiQueryIterator( EntityManager entityManager, Iterator<EntityRef> source, Query query ) {

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/RelationManager.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/RelationManager.java b/stack/core/src/main/java/org/apache/usergrid/persistence/RelationManager.java
index c543e75..28e0a50 100644
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/RelationManager.java
+++ b/stack/core/src/main/java/org/apache/usergrid/persistence/RelationManager.java
@@ -57,12 +57,11 @@ public interface RelationManager {
     public Entity addToCollections( List<EntityRef> owners, String collectionName ) throws Exception;
 
     public Entity createItemInCollection( String collectionName, String itemType, Map<String, Object> properties )
-            throws Exception;
+        throws Exception;
 
     public void removeFromCollection( String collectionName, EntityRef itemRef ) throws Exception;
 
-    public void copyRelationships( String srcRelationName, EntityRef dstEntityRef, String dstRelationName )
-            throws Exception;
+    public void copyRelationships( String srcRelationName, EntityRef dstEntityRef, String dstRelationName ) throws Exception;
 
     public Results searchCollection( String collectionName, Query query ) throws Exception;
 
@@ -75,8 +74,6 @@ public interface RelationManager {
 
     public ConnectionRef createConnection( ConnectedEntityRef... connections ) throws Exception;
 
-    public ConnectionRef connectionRef( String connectionType, EntityRef connectedEntityRef ) throws Exception;
-
     public ConnectionRef connectionRef( String pairedConnectionType, EntityRef pairedEntity, String connectionType,
                                         EntityRef connectedEntityRef ) throws Exception;
 
@@ -84,8 +81,6 @@ public interface RelationManager {
 
     public void deleteConnection( ConnectionRef connectionRef ) throws Exception;
 
-    public Set<String> getConnectionTypes( UUID connectedEntityId ) throws Exception;
-
     public Set<String> getConnectionTypes() throws Exception;
 
     public Set<String> getConnectionTypes( boolean filterConnection ) throws Exception;
@@ -96,11 +91,9 @@ public interface RelationManager {
      * @param connectionType The type/name of the connection
      * @param connectedEntityType The type of
      */
-    public Results getConnectedEntities( String connectionType, String connectedEntityType, Level resultsLevel )
-            throws Exception;
+    public Results getConnectedEntities( String connectionType, String connectedEntityType, Level resultsLevel ) throws Exception;
 
-    public Results getConnectingEntities( String connectionType, String connectedEntityType,
-                                          Level resultsLevel ) throws Exception;
+    public Results getConnectingEntities( String connectionType, String connectedEntityType, Level resultsLevel ) throws Exception;
 
     // public Results searchConnectedEntitiesForProperty(String connectionType,
     // String connectedEntityType, String propertyName,
@@ -108,14 +101,10 @@ public interface RelationManager {
     // UUID startResult, int count, boolean reversed, Level resultsLevel)
     // throws Exception;
 
-    public Results getConnectingEntities(
-            String connectionType, String entityType, Level level, int count) throws Exception;
+    public Results getConnectingEntities( String connectionType, String entityType, Level level, int count ) throws Exception;
 
-	public Results searchConnectedEntities( Query query ) throws Exception;
+    public Results searchConnectedEntities( Query query ) throws Exception;
 
 
     public Set<String> getConnectionIndexes( String connectionType ) throws Exception;
-
-    public void batchUpdateSetIndexes( Mutator<ByteBuffer> batch, String setName, Object elementValue,
-                                       boolean removeFromSet, UUID timestampUuid ) throws Exception;
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/Results.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/Results.java b/stack/core/src/main/java/org/apache/usergrid/persistence/Results.java
index 388b29e..fa98f41 100644
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/Results.java
+++ b/stack/core/src/main/java/org/apache/usergrid/persistence/Results.java
@@ -30,10 +30,8 @@ import java.util.UUID;
 
 import javax.xml.bind.annotation.XmlRootElement;
 
-import org.apache.usergrid.persistence.cassandra.QueryProcessor;
 import org.apache.usergrid.persistence.index.query.Query;
 import org.apache.usergrid.persistence.index.query.Query.Level;
-import org.apache.usergrid.persistence.query.ir.SearchVisitor;
 import org.apache.usergrid.utils.MapUtils;
 import org.apache.usergrid.utils.StringUtils;
 
@@ -82,8 +80,6 @@ public class Results implements Iterable<Entity> {
     Object data;
     String dataName;
 
-    private QueryProcessor queryProcessor;
-    private SearchVisitor searchVisitor;
 
 
     public Results() {
@@ -1268,19 +1264,6 @@ public class Results implements Iterable<Entity> {
     }
 
 
-    protected QueryProcessor getQueryProcessor() {
-        return queryProcessor;
-    }
-
-
-    public void setQueryProcessor( QueryProcessor queryProcessor ) {
-        this.queryProcessor = queryProcessor;
-    }
-
-
-    public void setSearchVisitor( SearchVisitor searchVisitor ) {
-        this.searchVisitor = searchVisitor;
-    }
 
 
     /** uses cursor to get next batch of Results (returns null if no cursor) */
@@ -1289,10 +1272,6 @@ public class Results implements Iterable<Entity> {
             return null;
         }
 
-        Query q = new Query( query );
-        q.setCursor( getCursor() );
-        queryProcessor.setQuery( q );
-
-        return queryProcessor.getResults( searchVisitor );
+        throw new UnsupportedOperationException( "This needs to be implemented" );
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/CassandraService.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/CassandraService.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/CassandraService.java
index 2322b95..01651cd 100644
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/CassandraService.java
+++ b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/CassandraService.java
@@ -32,10 +32,6 @@ import java.util.UUID;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.usergrid.locking.LockManager;
-import org.apache.usergrid.persistence.IndexBucketLocator;
-import org.apache.usergrid.persistence.IndexBucketLocator.IndexType;
-import org.apache.usergrid.persistence.cassandra.index.IndexBucketScanner;
-import org.apache.usergrid.persistence.cassandra.index.IndexScanner;
 import org.apache.usergrid.persistence.hector.CountingMutator;
 import org.apache.usergrid.utils.MapUtils;
 
@@ -1002,48 +998,10 @@ public class CassandraService {
 
 
 
-    /**
-     * Gets the id list.
-     *
-     * @param ko the keyspace
-     * @param key the key
-     * @param start the start
-     * @param finish the finish
-     * @param count the count
-     * @param reversed True if the scan should be reversed
-     * @param locator The index locator instance
-     * @param applicationId The applicationId
-     * @param collectionName The name of the collection to get the Ids for
-     *
-     * @return list of columns as UUIDs
-     *
-     * @throws Exception the exception
-     */
-    public IndexScanner getIdList( Keyspace ko, Object key, UUID start, UUID finish, int count, boolean reversed,
-                                   IndexBucketLocator locator, UUID applicationId, String collectionName, boolean keepFirst )
-            throws Exception {
-
-        if ( count <= 0 ) {
-            count = DEFAULT_COUNT;
-        }
-
-        if ( NULL_ID.equals( start ) ) {
-            start = null;
-        }
 
 
-        final boolean skipFirst = start != null && !keepFirst;
-
-        IndexScanner scanner =
-                new IndexBucketScanner( this, locator, ENTITY_ID_SETS, applicationId, IndexType.COLLECTION, key, start,
-                        finish, reversed, count, skipFirst, collectionName );
-
-        return scanner;
-    }
-
 
 
-    
     public void destroy() throws Exception {
     	if (cluster != null) {
     		HConnectionManager connectionManager = cluster.getConnectionManager();

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/EntityManagerFactoryImpl.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/EntityManagerFactoryImpl.java b/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/EntityManagerFactoryImpl.java
deleted file mode 100644
index a598a82..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/cassandra/EntityManagerFactoryImpl.java
+++ /dev/null
@@ -1,490 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.cassandra;
-
-
-import java.nio.ByteBuffer;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.UUID;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.BeansException;
-import org.springframework.context.ApplicationContext;
-import org.springframework.context.ApplicationContextAware;
-import org.apache.usergrid.persistence.DynamicEntity;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.EntityManagerFactory;
-import org.apache.usergrid.persistence.entities.Application;
-import org.apache.usergrid.persistence.exceptions.ApplicationAlreadyExistsException;
-import org.apache.usergrid.persistence.hector.CountingMutator;
-import org.apache.usergrid.utils.UUIDUtils;
-
-import org.apache.commons.lang.StringUtils;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.yammer.metrics.annotation.Metered;
-
-import me.prettyprint.hector.api.Keyspace;
-import me.prettyprint.hector.api.beans.ColumnSlice;
-import me.prettyprint.hector.api.beans.HColumn;
-import me.prettyprint.hector.api.beans.OrderedRows;
-import me.prettyprint.hector.api.beans.Row;
-import me.prettyprint.hector.api.beans.Rows;
-import me.prettyprint.hector.api.mutation.Mutator;
-import me.prettyprint.hector.api.query.QueryResult;
-import me.prettyprint.hector.api.query.RangeSlicesQuery;
-
-import static java.lang.String.CASE_INSENSITIVE_ORDER;
-
-
-import static me.prettyprint.hector.api.factory.HFactory.createRangeSlicesQuery;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_NAME;
-import static org.apache.usergrid.persistence.Schema.PROPERTY_UUID;
-import static org.apache.usergrid.persistence.Schema.TYPE_APPLICATION;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.addInsertToMutator;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.asMap;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.batchExecute;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.APPLICATIONS_CF;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.PROPERTIES_CF;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.RETRY_COUNT;
-import static org.apache.usergrid.utils.ConversionUtils.uuid;
-import static org.apache.usergrid.persistence.cassandra.Serializers.*;
-import org.apache.usergrid.persistence.core.util.Health;
-
-
-/**
- * Cassandra-specific implementation of Datastore
- *
- * @author edanuff
- */
-public class EntityManagerFactoryImpl implements EntityManagerFactory, ApplicationContextAware {
-
-    private static final Logger logger = LoggerFactory.getLogger( EntityManagerFactoryImpl.class );
-
-    public static String IMPLEMENTATION_DESCRIPTION = "Cassandra Entity Manager Factory 1.0";
-
-    public static final Class<DynamicEntity> APPLICATION_ENTITY_CLASS = DynamicEntity.class;
-
-
-    ApplicationContext applicationContext;
-
-    CassandraService cass;
-    CounterUtils counterUtils;
-
-    private boolean skipAggregateCounters;
-
-    private LoadingCache<UUID, EntityManager> entityManagers =
-            CacheBuilder.newBuilder().maximumSize( 100 ).build( new CacheLoader<UUID, EntityManager>() {
-                public EntityManager load( UUID appId ) { // no checked exception
-                    return _getEntityManager( appId );
-                }
-            } );
-
-    private static final int REBUILD_PAGE_SIZE = 100;
-
-
-    /**
-     * Must be constructed with a CassandraClientPool.
-     *
-     * @param cass the cassandraService instance
-     */
-    public EntityManagerFactoryImpl( CassandraService cass, CounterUtils counterUtils, boolean skipAggregateCounters ) {
-        this.cass = cass;
-        this.counterUtils = counterUtils;
-        this.skipAggregateCounters = skipAggregateCounters;
-        if ( skipAggregateCounters ) {
-            logger.warn( "NOTE: Counters have been disabled by configuration..." );
-        }
-    }
-
-
-    /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.usergrid.core.Datastore#getImplementationDescription()
-     */
-    @Override
-    public String getImplementationDescription() {
-        return IMPLEMENTATION_DESCRIPTION;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.usergrid.core.Datastore#getEntityDao(java.util.UUID,
-     * java.util.UUID)
-     */
-    @Override
-    public EntityManager getEntityManager( UUID applicationId ) {
-        try {
-            return entityManagers.get( applicationId );
-        }
-        catch ( Exception ex ) {
-            ex.printStackTrace();
-        }
-        return _getEntityManager( applicationId );
-    }
-
-
-    private EntityManager _getEntityManager( UUID applicationId ) {
-        EntityManagerImpl em = new EntityManagerImpl();
-        em.init( this, cass, counterUtils, applicationId, skipAggregateCounters );
-        em.setApplicationId( applicationId );
-        return em;
-    }
-
-
-    public ApplicationContext getApplicationContext() {
-        return applicationContext;
-    }
-
-
-    /**
-     * Gets the setup.
-     *
-     * @return Setup helper
-     */
-    public SetupImpl getSetup() {
-        return new SetupImpl( this, cass );
-    }
-
-
-    @Override
-    public void setup() throws Exception {
-        Setup setup = getSetup();
-        setup.init();
-
-        if ( cass.getPropertiesMap() != null ) {
-            updateServiceProperties( cass.getPropertiesMap() );
-        }
-    }
-
-
-    /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.usergrid.core.Datastore#createApplication(java.lang.String)
-     */
-    @Override
-    public UUID createApplication( String organization, String name ) throws Exception {
-        return createApplication( organization, name, null );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.usergrid.core.Datastore#createApplication(java.lang.String,
-     * java.util.Map)
-     */
-    @Override
-    public UUID createApplication( String organizationName, String name, Map<String, Object> properties )
-            throws Exception {
-
-        String appName = buildAppName( organizationName, name );
-
-        HColumn<String, ByteBuffer> column =
-                cass.getColumn( cass.getSystemKeyspace(), APPLICATIONS_CF, appName, PROPERTY_UUID );
-        if ( column != null ) {
-            throw new ApplicationAlreadyExistsException( name );
-            // UUID uuid = uuid(column.getValue());
-            // return uuid;
-        }
-
-        UUID applicationId = UUIDUtils.newTimeUUID();
-        logger.info( "New application id " + applicationId.toString() );
-
-        initializeApplication( organizationName, applicationId, appName, properties );
-
-        return applicationId;
-    }
-
-    
-    @Override
-    public void deleteApplication(UUID applicationId) throws Exception {
-        // TODO implement deleteApplication in Usergrid 1 code base (master branch?)
-        throw new UnsupportedOperationException("Not supported."); 
-    }
-
-
-    private String buildAppName( String organizationName, String name ) {
-        return StringUtils.lowerCase( name.contains( "/" ) ? name : organizationName + "/" + name );
-    }
-
-
-    public UUID initializeApplication( String organizationName, UUID applicationId, String name,
-                                       Map<String, Object> properties ) throws Exception {
-
-        String appName = buildAppName( organizationName, name );
-        // check for pre-existing
-        if ( lookupApplication( appName ) != null ) {
-            throw new ApplicationAlreadyExistsException( appName );
-        }
-        if ( properties == null ) {
-            properties = new TreeMap<String, Object>( CASE_INSENSITIVE_ORDER );
-        }
-
-        properties.put( PROPERTY_NAME, appName );
-
-        getSetup().setupApplicationKeyspace( applicationId, appName );
-
-        Keyspace ko = cass.getSystemKeyspace();
-        Mutator<ByteBuffer> m = CountingMutator.createFlushingMutator( ko, be );
-
-        long timestamp = cass.createTimestamp();
-
-        addInsertToMutator( m, APPLICATIONS_CF, appName, PROPERTY_UUID, applicationId, timestamp );
-        addInsertToMutator( m, APPLICATIONS_CF, appName, PROPERTY_NAME, appName, timestamp );
-
-        batchExecute( m, RETRY_COUNT );
-
-        EntityManager em = getEntityManager( applicationId );
-        em.create( TYPE_APPLICATION, APPLICATION_ENTITY_CLASS, properties );
-
-        em.resetRoles();
-
-        return applicationId;
-    }
-
-
-    @Override
-    public UUID importApplication( String organizationName, UUID applicationId, String name,
-                                   Map<String, Object> properties ) throws Exception {
-
-        name = buildAppName( organizationName, name );
-
-        HColumn<String, ByteBuffer> column =
-                cass.getColumn( cass.getSystemKeyspace(), APPLICATIONS_CF, name, PROPERTY_UUID );
-        if ( column != null ) {
-            throw new ApplicationAlreadyExistsException( name );
-            // UUID uuid = uuid(column.getValue());
-            // return uuid;
-        }
-
-        return initializeApplication( organizationName, applicationId, name, properties );
-    }
-
-
-    @Override
-    @Metered(group = "core", name = "EntityManagerFactory_lookupApplication_byName")
-    public UUID lookupApplication( String name ) throws Exception {
-        name = name.toLowerCase();
-        HColumn<String, ByteBuffer> column =
-                cass.getColumn( cass.getSystemKeyspace(), APPLICATIONS_CF, name, PROPERTY_UUID );
-        if ( column != null ) {
-            return uuid( column.getValue() );
-        }
-        return null;
-    }
-
-
-    /**
-     * Gets the application.
-     *
-     * @param name the name
-     *
-     * @return application for name
-     *
-     * @throws Exception the exception
-     */
-    @Metered(group = "core", name = "EntityManagerFactory_getApplication")
-    public Application getApplication( String name ) throws Exception {
-        name = name.toLowerCase();
-        HColumn<String, ByteBuffer> column =
-                cass.getColumn( cass.getSystemKeyspace(), APPLICATIONS_CF, name, PROPERTY_UUID );
-        if ( column == null ) {
-            return null;
-        }
-
-        UUID applicationId = uuid( column.getValue() );
-
-        EntityManager em = getEntityManager( applicationId );
-        return ( ( EntityManagerImpl ) em ).getEntity( applicationId, Application.class );
-    }
-
-
-    @Override
-    public Map<String, UUID> getApplications() throws Exception {
-        Map<String, UUID> applications = new TreeMap<String, UUID>( CASE_INSENSITIVE_ORDER );
-        Keyspace ko = cass.getSystemKeyspace();
-        RangeSlicesQuery<String, String, UUID> q = createRangeSlicesQuery( ko, se, se, ue );
-        q.setKeys( "", "\uFFFF" );
-        q.setColumnFamily( APPLICATIONS_CF );
-        q.setColumnNames( PROPERTY_UUID );
-        q.setRowCount( 10000 );
-        QueryResult<OrderedRows<String, String, UUID>> r = q.execute();
-        Rows<String, String, UUID> rows = r.get();
-        for ( Row<String, String, UUID> row : rows ) {
-            ColumnSlice<String, UUID> slice = row.getColumnSlice();
-            HColumn<String, UUID> column = slice.getColumnByName( PROPERTY_UUID );
-            applications.put( row.getKey(), column.getValue() );
-        }
-        return applications;
-    }
-
-
-    @Override
-    public boolean setServiceProperty( String name, String value ) {
-        try {
-            cass.setColumn( cass.getSystemKeyspace(), PROPERTIES_CF, PROPERTIES_CF, name, value );
-            return true;
-        }
-        catch ( Exception e ) {
-            logger.error( "Unable to set property " + name + ": " + e.getMessage() );
-        }
-        return false;
-    }
-
-
-    @Override
-    public boolean deleteServiceProperty( String name ) {
-        try {
-            cass.deleteColumn( cass.getSystemKeyspace(), PROPERTIES_CF, PROPERTIES_CF, name );
-            return true;
-        }
-        catch ( Exception e ) {
-            logger.error( "Unable to delete property " + name + ": " + e.getMessage() );
-        }
-        return false;
-    }
-
-
-    @Override
-    public boolean updateServiceProperties( Map<String, String> properties ) {
-        try {
-            cass.setColumns( cass.getSystemKeyspace(), PROPERTIES_CF, PROPERTIES_CF.getBytes(), properties );
-            return true;
-        }
-        catch ( Exception e ) {
-            logger.error( "Unable to update properties: " + e.getMessage() );
-        }
-        return false;
-    }
-
-
-    @Override
-    public Map<String, String> getServiceProperties() {
-        try {
-            return asMap( cass.getAllColumns( cass.getSystemKeyspace(), PROPERTIES_CF, PROPERTIES_CF, se, se ) );
-        }
-        catch ( Exception e ) {
-            logger.error( "Unable to load properties: " + e.getMessage() );
-        }
-        return null;
-    }
-
-
-    @Override
-    public void setApplicationContext( ApplicationContext applicationContext ) throws BeansException {
-        this.applicationContext = applicationContext;
-    }
-
-
-    @Override
-    public long performEntityCount() {
-        throw new UnsupportedOperationException("Not supported in v1");
-    }
-
-
-    public void setCounterUtils( CounterUtils counterUtils ) {
-        this.counterUtils = counterUtils;
-    }
-
-
-    static final UUID MANAGEMENT_APPLICATION_ID = new UUID( 0, 1 );
-    static final UUID DEFAULT_APPLICATION_ID = new UUID( 0, 16 );
-
-    @Override
-    public UUID getManagementAppId() {
-        return MANAGEMENT_APPLICATION_ID;
-    }
-
-    @Override
-    public UUID getDefaultAppId() {
-        return DEFAULT_APPLICATION_ID; 
-    }
-
-    @Override
-    public void refreshIndex() {
-        // no op
-    }
-
-    @Override
-    public void flushEntityManagerCaches() {
-        // no-op
-    }
-
-    @Override
-    public void rebuildInternalIndexes(ProgressObserver po) throws Exception {
-        throw new UnsupportedOperationException("Not supported."); 
-    }
-
-    @Override
-    public void rebuildAllIndexes(ProgressObserver po) throws Exception {
-        throw new UnsupportedOperationException("Not supported."); 
-    }
-
-    @Override
-    public void rebuildApplicationIndexes(UUID appId, ProgressObserver po) throws Exception {
-        throw new UnsupportedOperationException("Not supported."); 
-    }
-
-
-    @Override
-    public void migrateData() throws Exception {
-
-    }
-
-
-    @Override
-    public String getMigrateDataStatus() {
-        throw new UnsupportedOperationException("Not supported in v1");
-    }
-
-
-    @Override
-    public int getMigrateDataVersion() {
-        throw new UnsupportedOperationException("Not supported in v1");
-    }
-
-
-    @Override
-    public void setMigrationVersion( final int version ) {
-        throw new UnsupportedOperationException("Not supported in v1");
-    }
-
-
-    @Override
-    public void rebuildCollectionIndex(UUID appId, String collection, ProgressObserver po) {
-        throw new UnsupportedOperationException("Not supported."); 
-    }
-
-    @Override
-    public void addIndex(UUID appId, String suffix,final int shards,final int replicas) {
-        throw new UnsupportedOperationException("Not supported in v1");
-    }
-
-    @Override
-    public Health getEntityStoreHealth() {
-        throw new UnsupportedOperationException("Not supported yet."); 
-    }
-}


[03/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/WithinNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/WithinNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/WithinNode.java
deleted file mode 100644
index 35afda6..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/WithinNode.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-/**
- * Intermediate represnetation of the within operator
- *
- * @author tnine
- */
-public class WithinNode extends QueryNode {
-
-    private final String propertyName;
-    private final float distance;
-    private final float lattitude;
-    private final float longitude;
-    private final QuerySlice slice;
-
-
-    /**
-     * @param propertyName
-     * @param distance
-     * @param lattitude
-     * @param longitude
-     */
-    public WithinNode( String propertyName, float distance, float lattitude, float longitude, int nodeId ) {
-        this.propertyName = propertyName;
-        this.distance = distance;
-        this.lattitude = lattitude;
-        this.longitude = longitude;
-        this.slice = new QuerySlice( "location", nodeId );
-    }
-
-
-    /** @return the propertyName */
-    public String getPropertyName() {
-        return propertyName;
-    }
-
-
-    /** @return the distance */
-    public float getDistance() {
-        return distance;
-    }
-
-
-    /** @return the lattitude */
-    public float getLattitude() {
-        return lattitude;
-    }
-
-
-    /** @return the longitude */
-    public float getLongitude() {
-        return longitude;
-    }
-
-
-    /** @return the slice */
-    public QuerySlice getSlice() {
-        return slice;
-    }
-
-
-    /*
-       * (non-Javadoc)
-       *
-       * @see
-       * org.apache.usergrid.persistence.query.ir.QueryNode#visit(org.apache.usergrid.persistence
-       * .query.ir.NodeVisitor)
-       */
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-
-
-    @Override
-    public int getCount() {
-        return 1;
-    }
-
-
-    @Override
-    public boolean ignoreHintSize() {
-        return false;
-    }
-
-
-    @Override
-    public String toString() {
-        return "WithinNode [propertyName=" + propertyName + ", distance=" + distance + ", lattitude=" + lattitude
-                + ", longitude=" + longitude + "]";
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/AbstractScanColumn.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/AbstractScanColumn.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/AbstractScanColumn.java
deleted file mode 100644
index fc4a1d6..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/AbstractScanColumn.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.UUID;
-import org.apache.cassandra.utils.ByteBufferUtil;
-
-
-/**
- *
- * @author: tnine
- *
- */
-public abstract class AbstractScanColumn implements ScanColumn {
-
-    private final UUID uuid;
-    private final ByteBuffer buffer;
-
-
-    protected AbstractScanColumn( UUID uuid, ByteBuffer buffer ) {
-        this.uuid = uuid;
-        this.buffer = buffer;
-    }
-
-
-    @Override
-    public UUID getUUID() {
-        return uuid;
-    }
-
-
-    @Override
-    public ByteBuffer getCursorValue() {
-        return buffer == null ? null :buffer.duplicate();
-    }
-
-
-    @Override
-    public boolean equals( Object o ) {
-        if ( this == o ) {
-            return true;
-        }
-        if ( !( o instanceof AbstractScanColumn ) ) {
-            return false;
-        }
-
-        AbstractScanColumn that = ( AbstractScanColumn ) o;
-
-        return uuid.equals(that.uuid);
-
-    }
-
-
-    @Override
-    public int hashCode() {
-        return uuid.hashCode();
-    }
-
-
-    @Override
-    public String toString() {
-        return "AbstractScanColumn{" +
-                "uuid=" + uuid +
-                ", buffer=" + ByteBufferUtil.bytesToHex( buffer ) +
-                '}';
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/CollectionResultsLoaderFactory.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/CollectionResultsLoaderFactory.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/CollectionResultsLoaderFactory.java
deleted file mode 100644
index 2904bbc..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/CollectionResultsLoaderFactory.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.index.query.Query.Level;
-import static org.apache.usergrid.persistence.index.query.Query.Level.IDS;
-import static org.apache.usergrid.persistence.index.query.Query.Level.REFS;
-
-
-/** Implementation for loading collection results */
-public class CollectionResultsLoaderFactory implements ResultsLoaderFactory {
-
-    @Override
-    public ResultsLoader getResultsLoader( EntityManager em, Query query, Level level ) {
-        switch ( level ) {
-            case IDS:
-                return new IDLoader();
-            case REFS:
-                return new EntityRefLoader( query.getEntityType() );
-            default:
-                return new EntityResultsLoader( em );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionIndexSliceParser.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionIndexSliceParser.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionIndexSliceParser.java
deleted file mode 100644
index 51f0c9a..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionIndexSliceParser.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.Schema;
-
-import me.prettyprint.hector.api.beans.DynamicComposite;
-
-
-/**
- * Parser for reading uuid connections from ENTITY_COMPOSITE_DICTIONARIES and DICTIONARY_CONNECTED_ENTITIES type
- *
- * @author tnine
- */
-public class ConnectionIndexSliceParser implements SliceParser {
-
-    private final String connectedEntityType;
-
-
-    /** @param connectedEntityType Could be null if we want to return all types */
-    public ConnectionIndexSliceParser( String connectedEntityType ) {
-        this.connectedEntityType = connectedEntityType;
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.SliceParser#parse(java.nio.ByteBuffer)
-     */
-    @Override
-    public ScanColumn parse( ByteBuffer buff ) {
-        DynamicComposite composite = DynamicComposite.fromByteBuffer( buff.duplicate() );
-
-        String connectedType = composite.get( 1 ).toString();
-
-
-        //connection type has been defined and it doesn't match, skip it
-        if ( connectedEntityType != null && !connectedEntityType.equals( connectedType ) ) {
-            return null;
-        }
-
-        //we're checking a loopback and it wasn't specified, skip it
-        if ( ( connectedEntityType != null && !connectedEntityType.equalsIgnoreCase( connectedType ) ) || Schema
-                .TYPE_CONNECTION.equalsIgnoreCase( connectedType ) ) {
-            return null;
-        }
-
-        return new ConnectionColumn( ( UUID ) composite.get( 0 ), connectedType, buff );
-        //    return composite;
-        //    return null;
-    }
-
-
-    public static class ConnectionColumn extends AbstractScanColumn {
-
-        private final String connectedType;
-
-
-        public ConnectionColumn( UUID uuid, String connectedType, ByteBuffer column ) {
-            super( uuid, column );
-            this.connectedType = connectedType;
-        }
-
-
-        /** Get the target type from teh column */
-        public String getTargetType() {
-            return connectedType;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionRefLoader.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionRefLoader.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionRefLoader.java
deleted file mode 100644
index 0419aea..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionRefLoader.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.ConnectionRef;
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.SimpleEntityRef;
-import org.apache.usergrid.persistence.cassandra.ConnectionRefImpl;
-
-
-/**
- *
- * @author: tnine
- *
- */
-public class ConnectionRefLoader implements ResultsLoader {
-
-    private final UUID sourceEntityId;
-    private final String sourceType;
-    private final String connectionType;
-    private final String targetEntityType;
-
-
-    public ConnectionRefLoader( ConnectionRef connectionRef ) {
-        this.sourceType = connectionRef.getConnectingEntity().getType();
-        this.sourceEntityId = connectionRef.getConnectingEntity().getUuid();
-        this.connectionType = connectionRef.getConnectionType();
-        this.targetEntityType = connectionRef.getConnectedEntity().getType();
-    }
-
-
-    @Override
-    public Results getResults( List<ScanColumn> entityIds, String type ) throws Exception {
-
-
-        final EntityRef sourceRef = new SimpleEntityRef( sourceType, sourceEntityId );
-
-        List<ConnectionRef> refs = new ArrayList<ConnectionRef>( entityIds.size() );
-
-        for ( ScanColumn column : entityIds ) {
-
-            SimpleEntityRef targetRef;
-
-            if ( column instanceof ConnectionIndexSliceParser.ConnectionColumn ) {
-                final ConnectionIndexSliceParser.ConnectionColumn connectionColumn =
-                        ( ConnectionIndexSliceParser.ConnectionColumn ) column;
-                targetRef = new SimpleEntityRef( connectionColumn.getTargetType(), connectionColumn.getUUID() );
-            }
-
-            else {
-                targetRef = new SimpleEntityRef( targetEntityType, column.getUUID() );
-            }
-
-            final ConnectionRef ref = new ConnectionRefImpl( sourceRef, connectionType, targetRef );
-
-            refs.add( ref );
-        }
-
-        return Results.fromConnections( refs );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionResultsLoaderFactory.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionResultsLoaderFactory.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionResultsLoaderFactory.java
deleted file mode 100644
index 0e58449..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionResultsLoaderFactory.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import org.apache.usergrid.persistence.ConnectionRef;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.index.query.Query.Level;
-import static org.apache.usergrid.persistence.index.query.Query.Level.IDS;
-import static org.apache.usergrid.persistence.index.query.Query.Level.REFS;
-
-
-/** Implementation for loading connectionResults results */
-public class ConnectionResultsLoaderFactory implements ResultsLoaderFactory {
-
-    private final ConnectionRef connection;
-
-
-    public ConnectionResultsLoaderFactory( ConnectionRef connection ) {
-        this.connection = connection;
-    }
-
-
-    @Override
-    public ResultsLoader getResultsLoader( EntityManager em, Query query, Level level ) {
-        switch ( level ) {
-            case IDS://Note that this is technically wrong.  However, to support backwards compatibility with the
-                // existing apis and usage, both ids and refs return a connection ref when dealing with connections
-            case REFS:
-                return new ConnectionRefLoader( connection );
-            default:
-                return new EntityResultsLoader( em );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionTypesIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionTypesIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionTypesIterator.java
deleted file mode 100644
index e22b6b1..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ConnectionTypesIterator.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.Schema;
-import org.apache.usergrid.persistence.cassandra.CassandraService;
-
-import me.prettyprint.hector.api.beans.HColumn;
-
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTED_TYPES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTING_TYPES;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_DICTIONARIES;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-import static org.apache.usergrid.persistence.cassandra.Serializers.*;
-
-/** Iterator to iterate all types of connections the entity participates in */
-public class ConnectionTypesIterator implements Iterator<String>, Iterable<String> {
-
-    private final CassandraService cass;
-    private final UUID applicationId;
-    private final Object key;
-    //  private final UUID entityId;
-    private final int pageSize;
-    //  private static final String dictionaryName;
-
-
-    private boolean hasMore = true;
-    private Object start = null;
-
-    private Iterator<String> lastResults;
-
-
-    /**
-     * The connection types iterator.
-     *
-     * @param cass The cassandra service to use
-     * @param applicationId The application id to use
-     * @param entityId The entityId to use.  Can be a source for outgoing connections, or target for incoming
-     * connections
-     * @param outgoing True if this is a search from source->target on the edge, false if it is a search from
-     * target<-source
-     * @param pageSize The page size to use for batch fetching
-     */
-    public ConnectionTypesIterator( CassandraService cass, UUID applicationId, UUID entityId, boolean outgoing,
-                                    int pageSize ) {
-        this.cass = cass;
-        this.applicationId = applicationId;
-        this.pageSize = pageSize;
-
-        this.key =
-                outgoing ? key( entityId, DICTIONARY_CONNECTED_TYPES ) : key( entityId, DICTIONARY_CONNECTING_TYPES );
-    }
-
-
-    @Override
-    public Iterator<String> iterator() {
-        return this;
-    }
-
-
-    /*
-       * (non-Javadoc)
-       *
-       * @see java.util.Iterator#hasNext()
-       */
-    @Override
-    public boolean hasNext() {
-
-        // We've either 1) paged everything we should and have 1 left from our
-        // "next page" pointer
-        // Our currently buffered results don't exist or don't have a next. Try to
-        // load them again if they're less than the page size
-        if ( ( lastResults == null || !lastResults.hasNext() ) && hasMore ) {
-            try {
-                return load();
-            }
-            catch ( Exception e ) {
-                throw new RuntimeException( "Error loading next page of indexbucket scanner", e );
-            }
-        }
-
-        return lastResults.hasNext();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#next()
-     */
-    @Override
-    public String next() {
-
-        if ( !hasNext() ) {
-            throw new NoSuchElementException( "There are no elements left in this iterator" );
-        }
-
-        return lastResults.next();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#remove()
-     */
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException( "You can't remove from a result set, only advance" );
-    }
-
-
-    /**
-     * Search the collection index using all the buckets for the given collection. Load the next page. Return false if
-     * nothing was loaded, true otherwise
-     */
-
-    public boolean load() throws Exception {
-
-        // nothing left to load
-        if ( !hasMore ) {
-            return false;
-        }
-
-        // if we skip the first we need to set the load to page size +2, since we'll
-        // discard the first
-        // and start paging at the next entity, otherwise we'll just load the page
-        // size we need
-        int selectSize = pageSize + 1;
-
-
-        List<HColumn<ByteBuffer, ByteBuffer>> results =
-                cass.getColumns( cass.getApplicationKeyspace( applicationId ), ENTITY_DICTIONARIES, key, start, null,
-                        selectSize, false );
-
-        // we loaded a full page, there might be more
-        if ( results.size() == selectSize ) {
-            hasMore = true;
-
-            // set the bytebuffer for the next pass
-            start = results.get( results.size() - 1 ).getName();
-
-            results.remove( results.size() - 1 );
-        }
-        else {
-            hasMore = false;
-        }
-
-
-        List<String> stringResults = new ArrayList<String>( results.size() );
-
-        //do the parse here
-        for ( HColumn<ByteBuffer, ByteBuffer> col : results ) {
-            final String value = se.fromByteBuffer( col.getName() );
-
-            //always ignore loopback, this is legacy data that needs cleaned up, and it doesn't belong here
-            if ( !Schema.TYPE_CONNECTION.equalsIgnoreCase( value ) ) {
-                stringResults.add( value );
-            }
-        }
-
-
-        lastResults = stringResults.iterator();
-
-
-        return stringResults.size() > 0;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EmptyIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EmptyIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EmptyIterator.java
deleted file mode 100644
index 933bd2a..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EmptyIterator.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.Iterator;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-
-
-/** Iterator that never returns results */
-public class EmptyIterator implements ResultIterator {
-    @Override
-    public void reset() {
-        //no op
-    }
-
-
-    @Override
-    public void finalizeCursor( CursorCache cache, UUID lastValue ) {
-        //no op
-    }
-
-
-    @Override
-    public Iterator<Set<ScanColumn>> iterator() {
-        return this;
-    }
-
-
-    @Override
-    public boolean hasNext() {
-        return false;
-    }
-
-
-    @Override
-    public Set<ScanColumn> next() {
-        return null;
-    }
-
-
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException( "Not supported" );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EntityRefLoader.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EntityRefLoader.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EntityRefLoader.java
deleted file mode 100644
index cc24022..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EntityRefLoader.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.SimpleEntityRef;
-
-
-public class EntityRefLoader implements ResultsLoader {
-
-    private String type;
-
-
-    public EntityRefLoader( String type ) {
-        this.type = type;
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultsLoader#getResults(java.util.List)
-     */
-    @Override
-    public Results getResults( List<ScanColumn> entityIds, String type ) throws Exception {
-        Results r = new Results();
-        List<EntityRef> refs = new ArrayList<EntityRef>( entityIds.size() );
-        for ( ScanColumn id : entityIds ) {
-            refs.add( new SimpleEntityRef( type, id.getUUID() ) );
-        }
-        r.setRefs( refs );
-        return r;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EntityResultsLoader.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EntityResultsLoader.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EntityResultsLoader.java
deleted file mode 100644
index f7623f4..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/EntityResultsLoader.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.List;
-
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.Results;
-
-
-/** @author tnine */
-public class EntityResultsLoader implements ResultsLoader {
-
-    private EntityManager em;
-
-
-    /**
-     *
-     */
-    public EntityResultsLoader( EntityManager em ) {
-        this.em = em;
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultsLoader#getResults(java.util.List)
-     */
-    @Override
-    public Results getResults( List<ScanColumn> entityIds, String type ) throws Exception {
-        return em.getEntities( ScanColumnTransformer.getIds( entityIds ), type );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/GeoIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/GeoIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/GeoIterator.java
deleted file mode 100644
index 92f6f03..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/GeoIterator.java
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-import org.apache.usergrid.persistence.geo.EntityLocationRef;
-import org.apache.usergrid.persistence.geo.GeoIndexSearcher;
-import org.apache.usergrid.persistence.geo.GeoIndexSearcher.SearchResults;
-import org.apache.usergrid.persistence.geo.model.Point;
-import org.apache.usergrid.persistence.query.ir.QuerySlice;
-
-import static org.apache.usergrid.persistence.cassandra.Serializers.*;
-
-/**
- * Simple wrapper around list results until the geo library is updated so support iteration and set returns
- *
- * @author tnine
- */
-public class GeoIterator implements ResultIterator {
-
-    /**
-     *
-     */
-    private static final String DELIM = "+";
-    private static final String TILE_DELIM = "TILE";
-
-    private final GeoIndexSearcher searcher;
-    private final int resultSize;
-    private final QuerySlice slice;
-    private final LinkedHashMap<UUID, LocationScanColumn> idOrder;
-    private final Point center;
-    private final double distance;
-    private final String propertyName;
-
-    private Set<ScanColumn> toReturn;
-    private Set<ScanColumn> lastLoaded;
-
-    // set when parsing cursor. If the cursor has gone to the end, this will be
-    // true, we should return no results
-    private boolean done = false;
-
-    /** Moved and used as part of cursors */
-    private EntityLocationRef last;
-    private List<String> lastCellsSearched;
-
-    /** counter that's incremented as we load pages. If pages loaded = 1 when reset,
-     * we don't have to reload from cass */
-    private int pagesLoaded = 0;
-
-
-    /**
-     *
-     */
-    public GeoIterator( GeoIndexSearcher searcher, int resultSize, QuerySlice slice, String propertyName, Point center,
-                        double distance ) {
-        this.searcher = searcher;
-        this.resultSize = resultSize;
-        this.slice = slice;
-        this.propertyName = propertyName;
-        this.center = center;
-        this.distance = distance;
-        this.idOrder = new LinkedHashMap<UUID, LocationScanColumn>( resultSize );
-        this.lastLoaded = new LinkedHashSet<ScanColumn>( resultSize );
-        parseCursor();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.lang.Iterable#iterator()
-     */
-    @Override
-    public Iterator<Set<ScanColumn>> iterator() {
-        return this;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#hasNext()
-     */
-    @Override
-    public boolean hasNext() {
-        advance();
-        return !done || toReturn != null;
-    }
-
-
-    private void advance() {
-        // already loaded, do nothing
-        if ( done || toReturn != null ) {
-            return;
-        }
-
-        idOrder.clear();
-        lastLoaded.clear();
-
-
-        SearchResults results;
-
-        try {
-            results =
-                    searcher.proximitySearch( last, lastCellsSearched, center, propertyName, 0, distance, resultSize );
-        }
-        catch ( Exception e ) {
-            throw new RuntimeException( "Unable to search geo locations", e );
-        }
-
-        List<EntityLocationRef> locations = results.entityLocations;
-
-        lastCellsSearched = results.lastSearchedGeoCells;
-
-        for (final EntityLocationRef location : locations) {
-
-            final UUID id = location.getUuid();
-
-            final LocationScanColumn locationScan = new LocationScanColumn(location);
-
-            idOrder.put(id, locationScan);
-            lastLoaded.add(locationScan);
-
-            last = location;
-        }
-
-        if ( locations.size() < resultSize ) {
-            done = true;
-        }
-
-        if ( lastLoaded.size() > 0 ) {
-            toReturn = lastLoaded;
-        }
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#next()
-     */
-    @Override
-    public Set<ScanColumn> next() {
-        if ( !hasNext() ) {
-            throw new NoSuchElementException();
-        }
-
-        Set<ScanColumn> temp = toReturn;
-
-        toReturn = null;
-
-        return temp;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#remove()
-     */
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException( "You cannot reove elements from this iterator" );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultIterator#reset()
-     */
-    @Override
-    public void reset() {
-        //only 1 iteration was invoked.  Just reset the pointer rather than re-search
-        if ( pagesLoaded == 1 ) {
-            toReturn = lastLoaded;
-            return;
-        }
-
-        idOrder.clear();
-        lastLoaded.clear();
-        lastCellsSearched = null;
-        last = null;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.usergrid.persistence.query.ir.result.ResultIterator#finalizeCursor(
-     * org.apache.usergrid.persistence.cassandra.CursorCache, java.util.UUID)
-     */
-    @Override
-    public void finalizeCursor( CursorCache cache, UUID uuid ) {
-
-        LocationScanColumn col = idOrder.get( uuid );
-
-        if ( col == null ) {
-            return;
-        }
-
-        final EntityLocationRef location = col.location;
-
-        if ( location == null ) {
-            return;
-        }
-
-        final int sliceHash = slice.hashCode();
-
-        // get our next distance
-        final double latitude = location.getLatitude();
-
-        final double longitude = location.getLongitude();
-
-        // now create a string value for this
-        final StringBuilder builder = new StringBuilder();
-
-        builder.append( uuid ).append( DELIM );
-        builder.append( latitude ).append( DELIM );
-        builder.append( longitude );
-
-        if ( lastCellsSearched != null ) {
-            builder.append( DELIM );
-
-            for ( String geoCell : lastCellsSearched ) {
-                builder.append( geoCell ).append( TILE_DELIM );
-            }
-
-            int length = builder.length();
-
-            builder.delete( length - TILE_DELIM.length() - 1, length );
-        }
-
-        ByteBuffer buff = se.toByteBuffer( builder.toString() );
-
-
-        cache.setNextCursor( sliceHash, buff );
-    }
-
-
-    /** Get the last cells searched in the iteraton */
-    public List<String> getLastCellsSearched() {
-        return Collections.unmodifiableList( lastCellsSearched );
-    }
-
-
-    private void parseCursor() {
-        if ( !slice.hasCursor() ) {
-            return;
-        }
-
-        String string = se.fromByteBuffer( slice.getCursor() );
-
-        // was set to the end, set the no op flag
-        if ( string.length() == 0 ) {
-            done = true;
-            return;
-        }
-
-        String[] parts = string.split( "\\" + DELIM );
-
-        if ( parts.length < 3 ) {
-            throw new RuntimeException(
-                    "Geo cursors must contain 3 or more parts.  Incorrect cursor, please execute the query again" );
-        }
-
-        UUID startId = UUID.fromString( parts[0] );
-        double latitude = Double.parseDouble( parts[1] );
-        double longitude = Double.parseDouble( parts[2] );
-
-        if ( parts.length >= 4 ) {
-            String[] geoCells = parts[3].split( TILE_DELIM );
-
-            lastCellsSearched = Arrays.asList( geoCells );
-        }
-
-        last = new EntityLocationRef( ( String ) null, startId, latitude, longitude );
-    }
-
-
-    private class LocationScanColumn implements ScanColumn {
-
-        private final EntityLocationRef location;
-
-
-        public LocationScanColumn( EntityLocationRef location ) {
-            this.location = location;
-        }
-
-
-        @Override
-        public UUID getUUID() {
-            return location.getUuid();
-        }
-
-
-        @Override
-        public ByteBuffer getCursorValue() {
-            throw new UnsupportedOperationException(
-                    "This is not supported for location scan columns.  It requires iterator information" );
-        }
-
-
-        @Override
-        public boolean equals( Object o ) {
-            if ( this == o ) {
-                return true;
-            }
-            if ( !( o instanceof ScanColumn ) ) {
-                return false;
-            }
-
-            ScanColumn that = ( ScanColumn ) o;
-
-            return location.getUuid().equals( that.getUUID() );
-        }
-
-
-        @Override
-        public int hashCode() {
-            return location.getUuid().hashCode();
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/IDLoader.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/IDLoader.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/IDLoader.java
deleted file mode 100644
index e078172..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/IDLoader.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.List;
-
-import org.apache.usergrid.persistence.Results;
-
-
-public class IDLoader implements ResultsLoader {
-
-    public IDLoader() {
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultsLoader#getResults(java.util.List)
-     */
-    @Override
-    public Results getResults( List<ScanColumn> entityIds, String type ) throws Exception {
-        Results r = new Results();
-        r.setIds( ScanColumnTransformer.getIds( entityIds ) );
-        return r;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/IntersectionIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/IntersectionIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/IntersectionIterator.java
deleted file mode 100644
index 1fea546..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/IntersectionIterator.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.LinkedHashSet;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-
-import com.google.common.collect.Sets;
-
-
-/**
- * An iterator that unions 1 or more subsets. It makes the assuming that sub iterators iterate from min(uuid) to
- * max(uuid)
- *
- * @author tnine
- */
-public class IntersectionIterator extends MultiIterator {
-
-
-    /**
-     *
-     */
-    public IntersectionIterator( int pageSize ) {
-        super( pageSize );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultIterator#reset()
-     */
-    @Override
-    public void doReset() {
-        for ( ResultIterator itr : iterators ) {
-            itr.reset();
-        }
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.result.MergeIterator#advance()
-     */
-    @Override
-    protected Set<ScanColumn> advance() {
-        /**
-         * Advance our sub iterators until the UUID's all line up
-         */
-
-        int size = iterators.size();
-
-        if ( size == 0 ) {
-            return null;
-        }
-
-        // edge case with only 1 iterator
-        if ( size == 1 ) {
-
-            ResultIterator itr = iterators.get( 0 );
-
-            if ( !itr.hasNext() ) {
-                return null;
-            }
-
-            return itr.next();
-        }
-
-        // begin our tree merge of the iterators
-
-        return merge();
-    }
-
-
-    private Set<ScanColumn> merge() {
-
-        Set<ScanColumn> results = new LinkedHashSet<ScanColumn>();
-        ResultIterator rootIterator = iterators.get( 0 );
-
-
-        //we've matched to the end
-        if ( !rootIterator.hasNext() ) {
-            return null;
-        }
-
-
-        //purposely check size first, that way we avoid another round trip if we can
-        while ( results.size() < pageSize && rootIterator.hasNext() ) {
-
-            Set<ScanColumn> intersection = rootIterator.next();
-
-            for ( int i = 1; i < iterators.size(); i++ ) {
-
-                ResultIterator joinIterator = iterators.get( i );
-
-                intersection = merge( intersection, joinIterator );
-
-                //nothing left short circuit, there is no point in advancing to further join iterators
-                if ( intersection.size() == 0 ) {
-                    break;
-                }
-            }
-
-            //now add the intermediate results and continue
-            results.addAll( intersection );
-        }
-
-        return results;
-    }
-
-
-    private Set<ScanColumn> merge( Set<ScanColumn> current, ResultIterator child ) {
-
-        Set<ScanColumn> results = new LinkedHashSet<ScanColumn>( pageSize );
-
-        while ( results.size() < pageSize ) {
-            if ( !child.hasNext() ) {
-                // we've iterated to the end, reset for next pass
-                child.reset();
-                return results;
-            }
-
-
-            final Set<ScanColumn> childResults = child.next();
-
-            final Set<ScanColumn> intersection =  Sets.intersection( current, childResults );
-
-            results.addAll( intersection );
-        }
-
-        return results;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.usergrid.persistence.query.ir.result.ResultIterator#finalizeCursor(
-     * org.apache.usergrid.persistence.cassandra.CursorCache)
-     */
-    @Override
-    public void finalizeCursor( CursorCache cache, UUID lastLoaded ) {
-        ResultIterator itr = iterators.get( 0 );
-
-        //We can only create a cursor on our root level value in the intersection iterator.
-        if ( itr != null ) {
-            itr.finalizeCursor( cache, lastLoaded );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/MergeIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/MergeIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/MergeIterator.java
deleted file mode 100644
index 8434019..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/MergeIterator.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.Iterator;
-import java.util.Set;
-
-
-/** @author tnine */
-public abstract class MergeIterator implements ResultIterator {
-
-
-    /** kept private on purpose so advance must return the correct value */
-    private Set<ScanColumn> next;
-
-    /** Pointer to the last set.  Equal to "next" when returned.  Used to retain results after "next" is set to null */
-    private Set<ScanColumn> last;
-    /** The size of the pages */
-    protected int pageSize;
-
-    int loadCount = 0;
-
-
-    /**
-     *
-     */
-    public MergeIterator( int pageSize ) {
-        this.pageSize = pageSize;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.lang.Iterable#iterator()
-     */
-    @Override
-    public Iterator<Set<ScanColumn>> iterator() {
-        return this;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#hasNext()
-     */
-    @Override
-    public boolean hasNext() {
-        //if next isn't set, try to advance
-        if(checkNext()){
-            return true;
-        }
-
-
-        doAdvance();
-
-
-        return checkNext();
-    }
-
-
-    /**
-     * Single source of logic to check if a next is present.
-     * @return
-     */
-    protected boolean checkNext(){
-        return next != null && next.size() > 0;
-    }
-
-
-    /** Advance to the next page */
-    protected void doAdvance() {
-        next = advance();
-
-
-        if ( next != null && next.size() > 0 ) {
-            last = next;
-            loadCount++;
-        }
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#next()
-     */
-    @Override
-    public Set<ScanColumn> next() {
-        if ( next == null ) {
-            doAdvance();
-        }
-
-        Set<ScanColumn> returnVal = next;
-
-        next = null;
-
-        return returnVal;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#remove()
-     */
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException( "You can't remove from a union iterator" );
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultIterator#reset()
-     */
-    @Override
-    public void reset() {
-        if ( loadCount == 1 && last != null ) {
-            next = last;
-            return;
-        }
-        //clean up the last pointer
-        last = null;
-        //reset in the child iterators
-        doReset();
-    }
-
-
-    /** Advance the iterator to the next value.  Can return an empty set with signals no values */
-    protected abstract Set<ScanColumn> advance();
-
-    /** Perform the reset if required */
-    protected abstract void doReset();
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/MultiIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/MultiIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/MultiIterator.java
deleted file mode 100644
index f163f26..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/MultiIterator.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.ArrayList;
-import java.util.List;
-
-
-/** @author tnine */
-public abstract class MultiIterator extends MergeIterator {
-
-    protected List<ResultIterator> iterators = new ArrayList<ResultIterator>();
-
-
-    /**
-     * @param pageSize
-     */
-    public MultiIterator( int pageSize ) {
-        super( pageSize );
-    }
-
-
-    /** Add an iterator for our sub results */
-    public void addIterator( ResultIterator iterator ) {
-        iterators.add( iterator );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultIterator#reset()
-     */
-    @Override
-    public void doReset() {
-        for ( ResultIterator itr : iterators ) {
-            itr.reset();
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/OrderByIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/OrderByIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/OrderByIterator.java
deleted file mode 100644
index ed94cd9..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/OrderByIterator.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.usergrid.persistence.Entity;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.EntityPropertyComparator;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.index.query.Query.SortPredicate;
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-import org.apache.usergrid.persistence.query.ir.QuerySlice;
-
-import org.apache.commons.collections.comparators.ComparatorChain;
-
-import static org.apache.usergrid.persistence.cassandra.Serializers.*;
-
-/**
- * 1) Take a result set iterator as the child 2) Iterate only over candidates and create a cursor from the candidates
- *
- * @author tnine
- */
-
-public class OrderByIterator extends MergeIterator {
-
-    private static final String NAME_UUID = "uuid";
-    private static final Logger logger = LoggerFactory.getLogger( OrderByIterator.class );
-    private final QuerySlice slice;
-    private final ResultIterator candidates;
-    private final ComparatorChain subSortCompare;
-    private final List<String> secondaryFields;
-    private final EntityManager em;
-
-    //our last result from in memory sorting
-    private SortedEntitySet entries;
-
-
-    /**
-     * @param pageSize
-     */
-    public OrderByIterator( QuerySlice slice, List<Query.SortPredicate> secondary, ResultIterator candidates,
-                            EntityManager em, int pageSize ) {
-        super( pageSize );
-        this.slice = slice;
-        this.em = em;
-        this.candidates = candidates;
-        this.subSortCompare = new ComparatorChain();
-        this.secondaryFields = new ArrayList<String>( 1 + secondary.size() );
-
-        //add the sort of the primary column
-        this.secondaryFields.add( slice.getPropertyName() );
-        this.subSortCompare
-                .addComparator( new EntityPropertyComparator( slice.getPropertyName(), slice.isReversed() ) );
-
-        for ( SortPredicate sort : secondary ) {
-            this.subSortCompare.addComparator( new EntityPropertyComparator( sort.getPropertyName(),
-                    sort.getDirection() == Query.SortDirection.DESCENDING ) );
-            this.secondaryFields.add( sort.getPropertyName() );
-        }
-
-        //do uuid sorting last, this way if all our previous sorts are equal, we'll have a reproducible sort order for
-        // paging
-        this.secondaryFields.add( NAME_UUID );
-        this.subSortCompare.addComparator( new EntityPropertyComparator( NAME_UUID, false ) );
-    }
-
-
-    @Override
-    protected Set<ScanColumn> advance() {
-
-        ByteBuffer cursor = slice.getCursor();
-
-        UUID minEntryId = null;
-
-        if ( cursor != null ) {
-            minEntryId = ue.fromByteBuffer( cursor );
-        }
-
-        entries = new SortedEntitySet( subSortCompare, em, secondaryFields, pageSize, minEntryId );
-
-        /**
-         *  keep looping through our peek iterator.  We need to inspect each forward page to ensure we have performed a
-         *  seek to the end of our primary range.  Otherwise we need to keep aggregating. I.E  if the value is a boolean
-         *  and we order by "true
-         *  asc, timestamp desc" we must load every entity that has the value "true" before sub sorting,
-         *  then drop all values that fall out of the sort.
-         */
-        while ( candidates.hasNext() ) {
-
-
-            for ( ScanColumn id : candidates.next() ) {
-                entries.add( id );
-            }
-
-            entries.load();
-        }
-
-
-        return entries.toIds();
-    }
-
-
-    @Override
-    protected void doReset() {
-        // no op
-    }
-
-
-    @Override
-    public void finalizeCursor( CursorCache cache, UUID lastValue ) {
-        int sliceHash = slice.hashCode();
-
-        ByteBuffer bytes = ue.toByteBuffer( lastValue );
-
-        if ( bytes == null ) {
-            return;
-        }
-
-        cache.setNextCursor( sliceHash, bytes );
-    }
-
-
-    /** A Sorted set with a max size. When a new entry is added, the max is removed */
-    public static final class SortedEntitySet extends TreeSet<Entity> {
-
-        private final int maxSize;
-        private final Map<UUID, ScanColumn> cursorVal = new HashMap<UUID, ScanColumn>();
-        private final EntityManager em;
-        private final List<String> fields;
-        private final Entity minEntity;
-        private final Comparator<Entity> comparator;
-
-
-        public SortedEntitySet( Comparator<Entity> comparator, EntityManager em, List<String> fields, int maxSize,
-                                UUID minEntityId ) {
-            super( comparator );
-            this.maxSize = maxSize;
-            this.em = em;
-            this.fields = fields;
-            this.comparator = comparator;
-            this.minEntity = getPartialEntity( minEntityId );
-        }
-
-
-        @Override
-        public boolean add( Entity entity ) {
-
-            // don't add this entity.  We get it in our scan range, but it's <= the minimum value that
-            //should be allowed in the result set
-            if ( minEntity != null && comparator.compare( entity, minEntity ) <= 0 ) {
-                return false;
-            }
-
-            boolean added = super.add( entity );
-
-            while ( size() > maxSize ) {
-                //remove our last element, we're over size
-                Entity e = this.pollLast();
-                //remove it from the cursors as well
-                cursorVal.remove( e.getUuid() );
-            }
-
-            return added;
-        }
-
-
-        /** add the id to be loaded, and the dynamiccomposite column that belongs with it */
-        public void add( ScanColumn col ) {
-            cursorVal.put( col.getUUID(), col );
-        }
-
-
-        private Entity getPartialEntity( UUID minEntityId ) {
-            List<Entity> entities;
-
-            try {
-                entities = em.getPartialEntities( Collections.singletonList( minEntityId ), fields );
-            }
-            catch ( Exception e ) {
-                logger.error( "Unable to load partial entities", e );
-                throw new RuntimeException( e );
-            }
-
-            if ( entities == null || entities.size() == 0 ) {
-                return null;
-            }
-
-            return entities.get( 0 );
-        }
-
-
-        public void load() {
-            try {
-                for ( Entity e : em.getPartialEntities( cursorVal.keySet(), fields ) ) {
-                    add( e );
-                }
-            }
-            catch ( Exception e ) {
-                logger.error( "Unable to load partial entities", e );
-                throw new RuntimeException( e );
-            }
-        }
-
-
-        /** Turn our sorted entities into a set of ids */
-        public Set<ScanColumn> toIds() {
-            Iterator<Entity> itr = iterator();
-
-            Set<ScanColumn> columns = new LinkedHashSet<ScanColumn>( this.size() );
-
-            while ( itr.hasNext() ) {
-
-                UUID id = itr.next().getUuid();
-
-                columns.add( cursorVal.get( id ) );
-            }
-
-            return columns;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultIterator.java
deleted file mode 100644
index 01a048c..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultIterator.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.Iterator;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-
-
-/**
- * Interface for iterating slice results per node.  This is to be used to iterate and join or intersect values Each
- * iterator element is a set.  Each set size is determined by the underlying implementation.  When no sets of uuids are
- * left the iterator should fail the next statement.  Note that you should not rely on the returned set being exactly
- * the same size as the specified page size.  Valid sets can be returned with size >= that of the set value in the
- * underlying implementation
- *
- * @author tnine
- */
-public interface ResultIterator extends Iterable<Set<ScanColumn>>, Iterator<Set<ScanColumn>> {
-
-
-    /** Reset this iterator to the start to begin iterating again */
-    public void reset();
-
-    /** Finalize the cursor for this results.  Pass in the uuid of the last entity loaded. */
-    public void finalizeCursor( CursorCache cache, UUID lastValue );
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultsLoader.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultsLoader.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultsLoader.java
deleted file mode 100644
index 955296b..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultsLoader.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.List;
-
-import org.apache.usergrid.persistence.Results;
-
-
-/** @author tnine */
-public interface ResultsLoader {
-
-    /** Load results from the list of uuids.  Should return a Results entity where the query cursor can be set */
-    public Results getResults( List<ScanColumn> entityIds, String type ) throws Exception;
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultsLoaderFactory.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultsLoaderFactory.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultsLoaderFactory.java
deleted file mode 100644
index f119cb0..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ResultsLoaderFactory.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.index.query.Query.Level;
-
-
-/**
- *
- * @author: tnine
- *
- */
-public interface ResultsLoaderFactory {
-
-    /**
-     * Get the results loaded that will load all Ids given the results level.  
-     * The original query and the entity manager may be needed to load these results
-     */
-    public ResultsLoader getResultsLoader( EntityManager em, Query query, Level level );
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ScanColumn.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ScanColumn.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ScanColumn.java
deleted file mode 100644
index 289fe86..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ScanColumn.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-
-/** An interface that represents a column */
-public interface ScanColumn {
-
-    /** Get the uuid from the column */
-    public UUID getUUID();
-
-    /** Get the cursor value of this column */
-    public ByteBuffer getCursorValue();
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ScanColumnTransformer.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ScanColumnTransformer.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ScanColumnTransformer.java
deleted file mode 100644
index 0fd35e9..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/ScanColumnTransformer.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.UUID;
-
-
-/** Simple utility to convert Scan Columns collections to lists */
-public class ScanColumnTransformer {
-
-    public static List<UUID> getIds( Collection<ScanColumn> cols ) {
-
-        List<UUID> ids = new ArrayList<UUID>( cols.size() );
-
-        for ( ScanColumn col : cols ) {
-            ids.add( col.getUUID() );
-        }
-
-        return ids;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SecondaryIndexSliceParser.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SecondaryIndexSliceParser.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SecondaryIndexSliceParser.java
deleted file mode 100644
index ea093e6..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SecondaryIndexSliceParser.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-import me.prettyprint.hector.api.beans.DynamicComposite;
-
-
-/**
- * Parser for reading and writing secondary index composites
- *
- * @author tnine
- */
-public class SecondaryIndexSliceParser implements SliceParser {
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.result.SliceParser#parse(java.nio.ByteBuffer)
-     */
-    @Override
-    public ScanColumn parse( ByteBuffer buff ) {
-        DynamicComposite composite = DynamicComposite.fromByteBuffer( buff.duplicate() );
-
-        return new SecondaryIndexColumn( ( UUID ) composite.get( 2 ), composite.get( 1 ), buff );
-    }
-
-
-    public static class SecondaryIndexColumn extends AbstractScanColumn {
-
-        private final Object value;
-
-
-        public SecondaryIndexColumn( UUID uuid, Object value, ByteBuffer buff ) {
-            super( uuid, buff );
-            this.value = value;
-        }
-
-
-        /** Get the value from the node */
-        public Object getValue() {
-            return this.value;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SliceIterator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SliceIterator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SliceIterator.java
deleted file mode 100644
index 2a4c3cf..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SliceIterator.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Set;
-import java.util.UUID;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.usergrid.persistence.cassandra.CursorCache;
-import org.apache.usergrid.persistence.cassandra.index.IndexScanner;
-import org.apache.usergrid.persistence.exceptions.QueryIterationException;
-import org.apache.usergrid.persistence.query.ir.QuerySlice;
-
-import me.prettyprint.hector.api.beans.HColumn;
-
-
-/**
- * An iterator that will take all slices and order them correctly
- *
- * @author tnine
- */
-public class SliceIterator implements ResultIterator {
-
-    private static final Logger logger = LoggerFactory.getLogger( SliceIterator.class );
-
-    private final LinkedHashMap<UUID, ScanColumn> cols;
-    private final QuerySlice slice;
-    private final SliceParser parser;
-    private final IndexScanner scanner;
-    private final int pageSize;
-
-    /**
-     * Pointer to the uuid set until it's returned
-     */
-    private Set<ScanColumn> lastResult;
-
-    /**
-     * The pointer to the last set of parsed columns
-     */
-    private Set<ScanColumn> parsedCols;
-
-    /**
-     * counter that's incremented as we load pages. If pages loaded = 1 when reset, we don't have to reload from cass
-     */
-    private int pagesLoaded = 0;
-
-    /**
-     * Pointer to the last column we parsed
-     */
-    private ScanColumn last;
-
-
-    /**
-     * @param scanner The scanner to use to read the cols
-     * @param slice The slice used in the scanner
-     * @param parser The parser for the scanner results
-     */
-    public SliceIterator( QuerySlice slice, IndexScanner scanner, SliceParser parser ) {
-        this.slice = slice;
-        this.parser = parser;
-        this.scanner = scanner;
-        this.pageSize = scanner.getPageSize();
-        this.cols = new LinkedHashMap<UUID, ScanColumn>( this.pageSize );
-        this.parsedCols = new LinkedHashSet<ScanColumn>( this.pageSize );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.lang.Iterable#iterator()
-     */
-    @Override
-    public Iterator<Set<ScanColumn>> iterator() {
-        return this;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#hasNext()
-     */
-    @Override
-    public boolean hasNext() {
-        if ( lastResult == null ) {
-            return load();
-        }
-
-        return true;
-    }
-
-
-    private boolean load() {
-        if ( !scanner.hasNext() ) {
-            return false;
-        }
-
-        Iterator<HColumn<ByteBuffer, ByteBuffer>> results = scanner.next().iterator();
-
-        cols.clear();
-
-        parsedCols.clear();
-
-        while ( results.hasNext() ) {
-
-            ByteBuffer colName = results.next().getName().duplicate();
-
-            ScanColumn parsed = parser.parse( colName );
-
-            //skip this value, the parser has discarded it
-            if ( parsed == null ) {
-                continue;
-            }
-
-            last = parsed;
-            cols.put( parsed.getUUID(), parsed );
-            parsedCols.add( parsed );
-        }
-
-
-        pagesLoaded++;
-
-        lastResult = parsedCols;
-
-        return lastResult != null && lastResult.size() > 0;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#next()
-     */
-    @Override
-    public Set<ScanColumn> next() {
-        Set<ScanColumn> temp = lastResult;
-        lastResult = null;
-        return temp;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.util.Iterator#remove()
-     */
-    @Override
-    public void remove() {
-        throw new UnsupportedOperationException( "Remove is not supported" );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.result.ResultIterator#reset()
-     */
-    @Override
-    public void reset() {
-        // Do nothing, we'll just return the first page again
-        if ( pagesLoaded == 1 ) {
-            lastResult = parsedCols;
-            return;
-        }
-        scanner.reset();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.usergrid.persistence.query.ir.result.ResultIterator#finalizeCursor()
-     */
-    @Override
-    public void finalizeCursor( CursorCache cache, UUID lastLoaded ) {
-        final int sliceHash = slice.hashCode();
-
-        ByteBuffer bytes = null;
-
-        ScanColumn col = cols.get( lastLoaded );
-
-
-        //the column came from the current page
-        if ( col != null ) {
-            bytes = col.getCursorValue();
-        }
-        else {
-
-            //check if we reached the end of our iterator.  If we did, set the last value into the cursor.  Otherwise
-            //this is a bug
-            if ( scanner.hasNext() ) {
-                logger.error(
-                        "An iterator attempted to access a slice that was not iterated over.  This will result in the" +
-                                " cursor construction failing" );
-                throw new QueryIterationException(
-                        "An iterator attempted to access a slice that was not iterated over.  This will result in the" +
-                                " cursor construction failing" );
-            }
-
-            final ByteBuffer sliceCursor = slice.getCursor();
-
-            //we've never loaded anything, just re-use the existing slice
-            if (last == null && sliceCursor != null ) {
-                bytes = sliceCursor;
-            }
-
-            //use the last column we loaded.  This way our scan returns nothing next time since start == finish
-            else if(last != null) {
-                bytes = last.getCursorValue();
-            }
-        }
-
-
-        if ( bytes == null ) {
-            return;
-        }
-
-        cache.setNextCursor( sliceHash, bytes );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SliceParser.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SliceParser.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SliceParser.java
deleted file mode 100644
index d1ce6a1..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/result/SliceParser.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir.result;
-
-
-import java.nio.ByteBuffer;
-
-
-/**
- * Interface to parse and compare range slices
- *
- * @author tnine
- */
-public interface SliceParser {
-
-    /** Parse the slice and return it's parse type.  If null is returned, the column should be considered discarded */
-    public ScanColumn parse( ByteBuffer buff );
-}


[04/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Posted by to...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeocellUtils.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeocellUtils.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeocellUtils.java
deleted file mode 100644
index ffc3d72..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/GeocellUtils.java
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo;
-
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.usergrid.persistence.geo.comparator.DoubleTupleComparator;
-import org.apache.usergrid.persistence.geo.model.BoundingBox;
-import org.apache.usergrid.persistence.geo.model.Point;
-import org.apache.usergrid.persistence.geo.model.Tuple;
-
-/**
- #
- # Copyright 2010 Alexandre Gellibert
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- #     http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- */
-
-
-/**
- * Utils class to compute geocells.
- *
- * @author api.roman.public@gmail.com (Roman Nurik)
- * @author (java portage) Alexandre Gellibert
- */
-public final class GeocellUtils {
-
-    public static final float MIN_LONGITUDE = -180.0f;
-    public static final float MAX_LONGITUDE = 180.0f;
-    public static final float MIN_LATITUDE = -90.0f;
-    public static final float MAX_LATITUDE = 90.0f;
-    // Geocell algorithm constants.
-    public static final int GEOCELL_GRID_SIZE = 4;
-    private static final String GEOCELL_ALPHABET = "0123456789abcdef";
-
-    // Direction enumerations.
-    private static final int[] NORTHWEST = new int[] { -1, 1 };
-    private static final int[] NORTH = new int[] { 0, 1 };
-    private static final int[] NORTHEAST = new int[] { 1, 1 };
-    private static final int[] EAST = new int[] { 1, 0 };
-    private static final int[] SOUTHEAST = new int[] { 1, -1 };
-    private static final int[] SOUTH = new int[] { 0, -1 };
-    private static final int[] SOUTHWEST = new int[] { -1, -1 };
-    private static final int[] WEST = new int[] { -1, 0 };
-
-    private static final int RADIUS = 6378135;
-
-
-    private GeocellUtils() {
-        // no instantiation allowed
-    }
-
-
-    /**
-     * Determines whether the given cells are collinear along a dimension.
-     * <p/>
-     * Returns True if the given cells are in the same row (columnTest=False) or in the same column (columnTest=True).
-     *
-     * @param cell1 : The first geocell string.
-     * @param cell2 : The second geocell string.
-     * @param columnTest : A boolean, where False invokes a row collinearity test and 1 invokes a column collinearity
-     * test.
-     *
-     * @return A bool indicating whether or not the given cells are collinear in the given dimension.
-     */
-    public static boolean collinear( String cell1, String cell2, boolean columnTest ) {
-
-        for ( int i = 0; i < Math.min( cell1.length(), cell2.length() ); i++ ) {
-            int l1[] = subdivXY( cell1.charAt( i ) );
-            int x1 = l1[0];
-            int y1 = l1[1];
-            int l2[] = subdivXY( cell2.charAt( i ) );
-            int x2 = l2[0];
-            int y2 = l2[1];
-
-            // Check row collinearity (assure y's are always the same).
-            if ( !columnTest && y1 != y2 ) {
-                return false;
-            }
-
-            // Check column collinearity (assure x's are always the same).
-            if ( columnTest && x1 != x2 ) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-
-    /**
-     * Calculates the grid of cells formed between the two given cells.
-     * <p/>
-     * Generates the set of cells in the grid created by interpolating from the given Northeast geocell to the given
-     * Southwest geocell.
-     * <p/>
-     * Assumes the Northeast geocell is actually Northeast of Southwest geocell.
-     *
-     * @param cellNE : The Northeast geocell string.
-     * @param cellSW : The Southwest geocell string.
-     *
-     * @return A list of geocell strings in the interpolation.
-     */
-    public static List<String> interpolate( String cellNE, String cellSW ) {
-        // 2D array, will later be flattened.
-        LinkedList<LinkedList<String>> cellSet = new LinkedList<LinkedList<String>>();
-        LinkedList<String> cellFirst = new LinkedList<String>();
-        cellFirst.add( cellSW );
-        cellSet.add( cellFirst );
-
-        // First get adjacent geocells across until Southeast--collinearity with
-        // Northeast in vertical direction (0) means we're at Southeast.
-        while ( !collinear( cellFirst.getLast(), cellNE, true ) ) {
-            String cellTmp = adjacent( cellFirst.getLast(), EAST );
-            if ( cellTmp == null ) {
-                break;
-            }
-            cellFirst.add( cellTmp );
-        }
-
-        // Then get adjacent geocells upwards.
-        while ( !cellSet.getLast().getLast().equalsIgnoreCase( cellNE ) ) {
-
-            LinkedList<String> cellTmpRow = new LinkedList<String>();
-            for ( String g : cellSet.getLast() ) {
-                cellTmpRow.add( adjacent( g, NORTH ) );
-            }
-            if ( cellTmpRow.getFirst() == null ) {
-                break;
-            }
-            cellSet.add( cellTmpRow );
-        }
-
-        // Flatten cellSet, since it's currently a 2D array.
-        List<String> result = new ArrayList<String>();
-        for ( LinkedList<String> list : cellSet ) {
-            result.addAll( list );
-        }
-        return result;
-    }
-
-
-    /**
-     * Computes the number of cells in the grid formed between two given cells.
-     * <p/>
-     * Computes the number of cells in the grid created by interpolating from the given Northeast geocell to the given
-     * Southwest geocell. Assumes the Northeast geocell is actually Northeast of Southwest geocell.
-     *
-     * @param cellNE : The Northeast geocell string.
-     * @param cellSW : The Southwest geocell string.
-     *
-     * @return An int, indicating the number of geocells in the interpolation.
-     */
-    public static int interpolationCount( String cellNE, String cellSW ) {
-
-        BoundingBox bboxNE = computeBox( cellNE );
-        BoundingBox bboxSW = computeBox( cellSW );
-
-        double cellLatSpan = bboxSW.getNorth() - bboxSW.getSouth();
-        double cellLonSpan = bboxSW.getEast() - bboxSW.getWest();
-
-        double numCols = ( ( bboxNE.getEast() - bboxSW.getWest() ) / cellLonSpan );
-        double numRows = ( ( bboxNE.getNorth() - bboxSW.getSouth() ) / cellLatSpan );
-
-        double totalCols = numCols * numRows * 1.0;
-        if ( totalCols > Integer.MAX_VALUE ) {
-            return Integer.MAX_VALUE;
-        }
-        return ( int ) totalCols;
-    }
-
-
-    /**
-     * Calculates all of the given geocell's adjacent geocells.
-     *
-     * @param cell : The geocell string for which to calculate adjacent/neighboring cells.
-     *
-     * @return A list of 8 geocell strings and/or None values indicating adjacent cells.
-     */
-
-    public static List<String> allAdjacents( String cell ) {
-        List<String> result = new ArrayList<String>();
-        for ( int[] d : Arrays.asList( NORTHWEST, NORTH, NORTHEAST, EAST, SOUTHEAST, SOUTH, SOUTHWEST, WEST ) ) {
-            result.add( adjacent( cell, d ) );
-        }
-        return result;
-    }
-
-
-    /**
-     * Calculates the geocell adjacent to the given cell in the given direction.
-     *
-     * @param cell : The geocell string whose neighbor is being calculated.
-     * @param dir : An (x, y) tuple indicating direction, where x and y can be -1, 0, or 1. -1 corresponds to West for x
-     * and South for y, and 1 corresponds to East for x and North for y. Available helper constants are NORTH, EAST,
-     * SOUTH, WEST, NORTHEAST, NORTHWEST, SOUTHEAST, and SOUTHWEST.
-     *
-     * @return The geocell adjacent to the given cell in the given direction, or None if there is no such cell.
-     */
-    public static String adjacent( String cell, int[] dir ) {
-        if ( cell == null ) {
-            return null;
-        }
-        int dx = dir[0];
-        int dy = dir[1];
-        char[] cellAdjArr = cell.toCharArray(); // Split the geocell string
-        // characters into a list.
-        int i = cellAdjArr.length - 1;
-
-        while ( i >= 0 && ( dx != 0 || dy != 0 ) ) {
-            int l[] = subdivXY( cellAdjArr[i] );
-            int x = l[0];
-            int y = l[1];
-
-            // Horizontal adjacency.
-            if ( dx == -1 ) { // Asking for left.
-                if ( x == 0 ) { // At left of parent cell.
-                    x = GEOCELL_GRID_SIZE - 1; // Becomes right edge of adjacent parent.
-                }
-                else {
-                    x--; // Adjacent, same parent.
-                    dx = 0; // Done with x.
-                }
-            }
-            else if ( dx == 1 ) { // Asking for right.
-                if ( x == GEOCELL_GRID_SIZE - 1 ) { // At right of parent cell.
-                    x = 0; // Becomes left edge of adjacent parent.
-                }
-                else {
-                    x++; // Adjacent, same parent.
-                    dx = 0; // Done with x.
-                }
-            }
-
-            // Vertical adjacency.
-            if ( dy == 1 ) { // Asking for above.
-                if ( y == GEOCELL_GRID_SIZE - 1 ) { // At top of parent cell.
-                    y = 0; // Becomes bottom edge of adjacent parent.
-                }
-                else {
-                    y++; // Adjacent, same parent.
-                    dy = 0; // Done with y.
-                }
-            }
-            else if ( dy == -1 ) { // Asking for below.
-                if ( y == 0 ) { // At bottom of parent cell.
-                    y = GEOCELL_GRID_SIZE - 1; // Becomes top edge of adjacent parent.
-                }
-                else {
-                    y--; // Adjacent, same parent.
-                    dy = 0; // Done with y.
-                }
-            }
-
-            int l2[] = { x, y };
-            cellAdjArr[i] = subdivChar( l2 );
-            i--;
-        }
-        // If we're not done with y then it's trying to wrap vertically,
-        // which is a failure.
-        if ( dy != 0 ) {
-            return null;
-        }
-
-        // At this point, horizontal wrapping is done inherently.
-        return new String( cellAdjArr );
-    }
-
-
-    /**
-     * Returns whether or not the given cell contains the given point.
-     *
-     * @return Returns whether or not the given cell contains the given point.
-     */
-    public static boolean containsPoint( String cell, Point point ) {
-        return compute( point, cell.length() ).equalsIgnoreCase( cell );
-    }
-
-
-    /**
-     * Returns the shortest distance between a point and a geocell bounding box.
-     * <p/>
-     * If the point is inside the cell, the shortest distance is always to a 'edge' of the cell rectangle. If the point
-     * is outside the cell, the shortest distance will be to either a 'edge' or 'corner' of the cell rectangle.
-     *
-     * @return The shortest distance from the point to the geocell's rectangle, in meters.
-     */
-    public static double pointDistance( String cell, Point point ) {
-        BoundingBox bbox = computeBox( cell );
-
-        boolean betweenWE = bbox.getWest() <= point.getLon() && point.getLon() <= bbox.getEast();
-        boolean betweenNS = bbox.getSouth() <= point.getLat() && point.getLat() <= bbox.getNorth();
-
-        if ( betweenWE ) {
-            if ( betweenNS ) {
-                // Inside the geocell.
-                return Math.min( Math.min( distance( point, new Point( bbox.getSouth(), point.getLon() ) ),
-                        distance( point, new Point( bbox.getNorth(), point.getLon() ) ) ),
-                        Math.min( distance( point, new Point( point.getLat(), bbox.getEast() ) ),
-                                distance( point, new Point( point.getLat(), bbox.getWest() ) ) ) );
-            }
-            else {
-                return Math.min( distance( point, new Point( bbox.getSouth(), point.getLon() ) ),
-                        distance( point, new Point( bbox.getNorth(), point.getLon() ) ) );
-            }
-        }
-        else {
-            if ( betweenNS ) {
-                return Math.min( distance( point, new Point( point.getLat(), bbox.getEast() ) ),
-                        distance( point, new Point( point.getLat(), bbox.getWest() ) ) );
-            }
-            else {
-                // TODO(romannurik): optimize
-                return Math.min( Math.min( distance( point, new Point( bbox.getSouth(), bbox.getEast() ) ),
-                        distance( point, new Point( bbox.getNorth(), bbox.getEast() ) ) ),
-                        Math.min( distance( point, new Point( bbox.getSouth(), bbox.getWest() ) ),
-                                distance( point, new Point( bbox.getNorth(), bbox.getWest() ) ) ) );
-            }
-        }
-    }
-
-
-    /**
-     * Computes the geocell containing the given point to the given resolution.
-     * <p/>
-     * This is a simple 16-tree lookup to an arbitrary depth (resolution).
-     *
-     * @param point : The geotypes.Point to compute the cell for.
-     * @param resolution : An int indicating the resolution of the cell to compute.
-     *
-     * @return The geocell string containing the given point, of length resolution.
-     */
-    public static String compute( Point point, int resolution ) {
-        float north = MAX_LATITUDE;
-        float south = MIN_LATITUDE;
-        float east = MAX_LONGITUDE;
-        float west = MIN_LONGITUDE;
-
-        StringBuilder cell = new StringBuilder();
-        while ( cell.length() < resolution ) {
-            float subcellLonSpan = ( east - west ) / GEOCELL_GRID_SIZE;
-            float subcellLatSpan = ( north - south ) / GEOCELL_GRID_SIZE;
-
-            int x = Math.min( ( int ) ( GEOCELL_GRID_SIZE * ( point.getLon() - west ) / ( east - west ) ),
-                    GEOCELL_GRID_SIZE - 1 );
-            int y = Math.min( ( int ) ( GEOCELL_GRID_SIZE * ( point.getLat() - south ) / ( north - south ) ),
-                    GEOCELL_GRID_SIZE - 1 );
-
-            int l[] = { x, y };
-            cell.append( subdivChar( l ) );
-
-            south += subcellLatSpan * y;
-            north = south + subcellLatSpan;
-
-            west += subcellLonSpan * x;
-            east = west + subcellLonSpan;
-        }
-        return cell.toString();
-    }
-
-
-    /**
-     * Computes the rectangular boundaries (bounding box) of the given geocell.
-     *
-     * @param cell_ : The geocell string whose boundaries are to be computed.
-     *
-     * @return A geotypes.Box corresponding to the rectangular boundaries of the geocell.
-     */
-    public static BoundingBox computeBox( String cell_ ) {
-        if ( cell_ == null ) {
-            return null;
-        }
-
-        BoundingBox bbox = new BoundingBox( 90.0, 180.0, -90.0, -180.0 );
-        StringBuilder cell = new StringBuilder( cell_ );
-        while ( cell.length() > 0 ) {
-            double subcellLonSpan = ( bbox.getEast() - bbox.getWest() ) / GEOCELL_GRID_SIZE;
-            double subcellLatSpan = ( bbox.getNorth() - bbox.getSouth() ) / GEOCELL_GRID_SIZE;
-
-            int l[] = subdivXY( cell.charAt( 0 ) );
-            int x = l[0];
-            int y = l[1];
-
-            bbox = new BoundingBox( bbox.getSouth() + subcellLatSpan * ( y + 1 ),
-                    bbox.getWest() + subcellLonSpan * ( x + 1 ), bbox.getSouth() + subcellLatSpan * y,
-                    bbox.getWest() + subcellLonSpan * x );
-
-            cell.deleteCharAt( 0 );
-        }
-
-        return bbox;
-    }
-
-
-    /**
-     * Returns whether or not the given geocell string defines a valid geocell.
-     *
-     * @return Returns whether or not the given geocell string defines a valid geocell.
-     */
-    public static boolean isValid( String cell ) {
-        if ( cell == null || cell.trim().length() == 0 ) {
-            return false;
-        }
-        for ( char c : cell.toCharArray() ) {
-            if ( GEOCELL_ALPHABET.indexOf( c ) < 0 ) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-
-    /**
-     * Returns the (x, y) of the geocell character in the 4x4 alphabet grid.
-     *
-     * @return Returns the (x, y) of the geocell character in the 4x4 alphabet grid.
-     */
-    public static int[] subdivXY( char char_ ) {
-        // NOTE: This only works for grid size 4.
-        int charI = GEOCELL_ALPHABET.indexOf( char_ );
-        return new int[] {
-                ( charI & 4 ) >> 1 | (charI & 1), ( charI & 8 ) >> 2 | ( charI & 2 ) >> 1
-        };
-    }
-
-
-    /**
-     * Returns the geocell character in the 4x4 alphabet grid at pos. (x, y).
-     *
-     * @return Returns the geocell character in the 4x4 alphabet grid at pos. (x, y).
-     */
-    public static char subdivChar( int[] pos ) {
-        // NOTE: This only works for grid size 4.
-        return GEOCELL_ALPHABET.charAt( ( pos[1] & 2 ) << 2 |
-                ( pos[0] & 2 ) << 1 |
-                ( pos[1] & 1 ) << 1 |
-                (pos[0] & 1));
-    }
-
-
-    /**
-     * Calculates the great circle distance between two points (law of cosines).
-     *
-     * @param p1 : indicating the first point.
-     * @param p2 : indicating the second point.
-     *
-     * @return The 2D great-circle distance between the two given points, in meters.
-     */
-    public static double distance( Point p1, Point p2 ) {
-        double p1lat = Math.toRadians( p1.getLat() );
-        double p1lon = Math.toRadians( p1.getLon() );
-        double p2lat = Math.toRadians( p2.getLat() );
-        double p2lon = Math.toRadians( p2.getLon() );
-        return RADIUS * Math.acos( makeDoubleInRange(
-                Math.sin( p1lat ) * Math.sin( p2lat ) + Math.cos( p1lat ) * Math.cos( p2lat ) * Math
-                        .cos( p2lon - p1lon ) ) );
-    }
-
-
-    /**
-     * This function is used to fix issue 10: GeocellUtils.distance(...) uses Math.acos(arg) method. In some cases arg >
-     * 1 (i.e 1.0000000002), so acos cannot be calculated and the method returns NaN.
-     *
-     * @return a double between -1 and 1
-     */
-    public static double makeDoubleInRange( double d ) {
-        double result = d;
-        if ( d > 1 ) {
-            result = 1;
-        }
-        else if ( d < -1 ) {
-            result = -1;
-        }
-        return result;
-    }
-
-
-    /**
-     * Returns the edges of the rectangular region containing all of the given geocells, sorted by distance from the
-     * given point, along with the actual distances from the point to these edges.
-     *
-     * @param cells : The cells (should be adjacent) defining the rectangular region whose edge distances are
-     * requested.
-     * @param point : The point that should determine the edge sort order.
-     *
-     * @return A list of (direction, distance) tuples, where direction is the edge and distance is the distance from the
-     *         point to that edge. A direction value of (0,-1), for example, corresponds to the South edge of the
-     *         rectangular region containing all of the given geocells.
-     *         <p/>
-     *         TODO(romannurik): Assert that lat,lon are actually inside the geocell.
-     */
-    public static List<Tuple<int[], Double>> distanceSortedEdges( List<String> cells, Point point ) {
-        List<BoundingBox> boxes = new ArrayList<BoundingBox>();
-        for ( String cell : cells ) {
-            boxes.add( computeBox( cell ) );
-        }
-        double maxNorth = Double.NEGATIVE_INFINITY;
-        double maxEast = Double.NEGATIVE_INFINITY;
-        double maxSouth = Double.POSITIVE_INFINITY;
-        double maxWest = Double.POSITIVE_INFINITY;
-        for ( BoundingBox box : boxes ) {
-            maxNorth = Math.max( maxNorth, box.getNorth() );
-            maxEast = Math.max( maxEast, box.getEast() );
-            maxSouth = Math.min( maxSouth, box.getSouth() );
-            maxWest = Math.min( maxWest, box.getWest() );
-        }
-        List<Tuple<int[], Double>> result = new ArrayList<Tuple<int[], Double>>();
-        result.add( new Tuple<int[], Double>( SOUTH, distance( new Point( maxSouth, point.getLon() ), point ) ) );
-        result.add( new Tuple<int[], Double>( NORTH, distance( new Point( maxNorth, point.getLon() ), point ) ) );
-        result.add( new Tuple<int[], Double>( WEST, distance( new Point( point.getLat(), maxWest ), point ) ) );
-        result.add( new Tuple<int[], Double>( EAST, distance( new Point( point.getLat(), maxEast ), point ) ) );
-        Collections.sort( result, new DoubleTupleComparator() );
-        return result;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/comparator/DoubleTupleComparator.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/comparator/DoubleTupleComparator.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/comparator/DoubleTupleComparator.java
deleted file mode 100644
index 92aca74..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/comparator/DoubleTupleComparator.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo.comparator;
-
-
-import java.util.Comparator;
-
-import org.apache.usergrid.persistence.geo.model.Tuple;
-
-
-public class DoubleTupleComparator implements Comparator<Tuple<int[], Double>> {
-
-    public int compare( Tuple<int[], Double> o1, Tuple<int[], Double> o2 ) {
-        if ( o1 == null && o2 == null ) {
-            return 0;
-        }
-        if ( o1 == null ) {
-            return -1;
-        }
-        if ( o2 == null ) {
-            return 1;
-        }
-        return o1.getSecond().compareTo( o2.getSecond() );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/BoundingBox.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/BoundingBox.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/BoundingBox.java
deleted file mode 100644
index 587bfd8..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/BoundingBox.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo.model;
-
-
-/** @author Alexandre Gellibert */
-public class BoundingBox {
-
-    private Point northEast;
-    private Point southWest;
-
-
-    public BoundingBox( double north, double east, double south, double west ) {
-        double north_, south_;
-        if ( south > north ) {
-            south_ = north;
-            north_ = south;
-        }
-        else {
-            south_ = south;
-            north_ = north;
-        }
-
-        // Don't swap east and west to allow disambiguation of
-        // antimeridian crossing.
-
-        northEast = new Point( north_, east );
-        southWest = new Point( south_, west );
-    }
-
-
-    public double getNorth() {
-        return northEast.getLat();
-    }
-
-
-    public double getSouth() {
-        return southWest.getLat();
-    }
-
-
-    public double getWest() {
-        return southWest.getLon();
-    }
-
-
-    public double getEast() {
-        return northEast.getLon();
-    }
-
-
-    public Point getNorthEast() {
-        return northEast;
-    }
-
-
-    public Point getSouthWest() {
-        return southWest;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/CostFunction.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/CostFunction.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/CostFunction.java
deleted file mode 100644
index 5ae4759..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/CostFunction.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo.model;
-
-
-/**
- * Interface to create a cost function used in geocells algorithm. This function will determine the cost of an operation
- * depending of number of cells and resolution. When the cost is going higher, the algorithm stops. The cost depends on
- * application use of geocells.
- *
- * @author Alexandre Gellibert
- */
-public interface CostFunction {
-
-    /**
-     * @param numCells number of cells found
-     * @param resolution resolution of those cells
-     *
-     * @return the cost of the operation
-     */
-    public double defaultCostFunction( int numCells, int resolution );
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/DefaultCostFunction.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/DefaultCostFunction.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/DefaultCostFunction.java
deleted file mode 100644
index 0d34ad5..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/DefaultCostFunction.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo.model;
-
-
-import org.apache.usergrid.persistence.geo.GeocellUtils;
-
-
-/**
- * Default cost function used if no cost function is specified in Geocell.bestBboxSearchCells method.
- *
- * @author Alexandre Gellibert
- */
-public class DefaultCostFunction implements CostFunction {
-
-    /*
-     * (non-Javadoc)
-     * @see com.beoui.utils.CostFunction#defaultCostFunction(int, int)
-     */
-    public double defaultCostFunction( int numCells, int resolution ) {
-        return numCells > Math.pow( GeocellUtils.GEOCELL_GRID_SIZE, 2 ) ? Double.MAX_VALUE : 0;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/Tuple.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/Tuple.java b/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/Tuple.java
deleted file mode 100644
index 753233a..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/geo/model/Tuple.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.geo.model;
-
-
-public class Tuple<A, B> {
-
-    private A first;
-    private B second;
-
-
-    public Tuple( A first, B second ) {
-        this.first = first;
-        this.second = second;
-    }
-
-
-    public A getFirst() {
-        return first;
-    }
-
-
-    public B getSecond() {
-        return second;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/AllNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/AllNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/AllNode.java
deleted file mode 100644
index 11c39c9..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/AllNode.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-/**
- * Used to represent a "select all".  This will iterate over the entities by UUID
- *
- * @author tnine
- */
-public class AllNode extends QueryNode {
-
-
-    private final QuerySlice slice;
-    private final boolean forceKeepFirst;
-
-
-    /**
-     * Note that the slice isn't used on select, but is used when creating cursors
-     *
-     * @param id. The unique numeric id for this node
-     * @param forceKeepFirst True if we don't allow the iterator to skip the first result, regardless of cursor state.
-     * Used for startUUID paging
-     */
-    public AllNode( int id, boolean forceKeepFirst ) {
-        this.slice = new QuerySlice( "uuid", id );
-        this.forceKeepFirst = forceKeepFirst;
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.QueryNode#visit(org.apache.usergrid.persistence.query.ir.NodeVisitor)
-     */
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-
-
-    @Override
-    public int getCount() {
-        return 1;
-    }
-
-
-    @Override
-    public boolean ignoreHintSize() {
-        return false;
-    }
-
-
-    @Override
-    public String toString() {
-        return "AllNode";
-    }
-
-
-    /** @return the slice */
-    public QuerySlice getSlice() {
-        return slice;
-    }
-
-
-    /** @return the skipFirstMatch */
-    public boolean isForceKeepFirst() {
-        return forceKeepFirst;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/AndNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/AndNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/AndNode.java
deleted file mode 100644
index c2dea1f..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/AndNode.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-/**
- * Node where the results need intersected.  Used instead of a SliceNode when one of the children is an operation other
- * than slices.  I.E OR, NOT etc
- *
- * @author tnine
- */
-public class AndNode extends BooleanNode {
-
-    /**
-     * @param left
-     * @param right
-     */
-    public AndNode( QueryNode left, QueryNode right ) {
-        super( left, right );
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.QueryNode#visit(org.apache.usergrid.persistence.query.ir.NodeVisitor)
-     */
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/BooleanNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/BooleanNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/BooleanNode.java
deleted file mode 100644
index ee47946..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/BooleanNode.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-/** @author tnine */
-public abstract class BooleanNode extends QueryNode {
-
-    protected QueryNode left;
-    protected QueryNode right;
-
-
-    public BooleanNode( QueryNode left, QueryNode right ) {
-        this.left = left;
-        this.right = right;
-    }
-
-
-    /** @return the left */
-    public QueryNode getLeft() {
-        return left;
-    }
-
-
-    /** @return the right */
-    public QueryNode getRight() {
-        return right;
-    }
-
-
-    @Override
-    public int getCount() {
-       return left.getCount()+ right.getCount();
-    }
-
-
-    @Override
-    public boolean ignoreHintSize() {
-        return false;
-    }
-
-
-    @Override
-    public String toString() {
-        return "BooleanNode [left=" + left + ", right=" + right + "]";
-    }
-
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/EmailIdentifierNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/EmailIdentifierNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/EmailIdentifierNode.java
deleted file mode 100644
index 2fb1dcb..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/EmailIdentifierNode.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-import org.apache.usergrid.persistence.index.query.Identifier;
-
-
-/**
- * Class to represent a UUID based Identifier query
- *
- * @author tnine
- */
-public class EmailIdentifierNode extends QueryNode {
-
-    private final Identifier identifier;
-
-
-    public EmailIdentifierNode( Identifier identifier ) {
-        this.identifier = identifier;
-    }
-
-
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-
-
-    @Override
-    public int getCount() {
-        return 1;
-    }
-
-
-    @Override
-    public boolean ignoreHintSize() {
-        return false;
-    }
-
-
-    public Identifier getIdentifier() {
-        return identifier;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NameIdentifierNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NameIdentifierNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NameIdentifierNode.java
deleted file mode 100644
index 75ba111..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NameIdentifierNode.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-/**
- * Class to represent a UUID based Identifier query
- *
- * @author tnine
- */
-public class NameIdentifierNode extends QueryNode {
-
-    private final String name;
-
-
-    public NameIdentifierNode( String name ) {
-        this.name = name;
-    }
-
-
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-
-
-    @Override
-    public int getCount() {
-        return 1;
-    }
-
-
-    @Override
-    public boolean ignoreHintSize() {
-        return false;
-    }
-
-
-    public String getName() {
-        return name;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NodeVisitor.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NodeVisitor.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NodeVisitor.java
deleted file mode 100644
index bfaeee3..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NodeVisitor.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-/** @author tnine */
-public interface NodeVisitor {
-
-    /**
-     *
-     * @param node
-     * @throws Exception
-     */
-    public void visit( AndNode node ) throws Exception;
-
-    /**
-     *
-     * @param node
-     * @throws Exception
-     */
-    public void visit( NotNode node ) throws Exception;
-
-    /**
-     *
-     * @param node
-     * @throws Exception
-     */
-    public void visit( OrNode node ) throws Exception;
-
-    /**
-     *
-     * @param node
-     * @throws Exception
-     */
-    public void visit( SliceNode node ) throws Exception;
-
-    /**
-     *
-     * @param node
-     * @throws Exception
-     */
-    public void visit( WithinNode node ) throws Exception;
-
-    /**
-     *
-     * @param node
-     * @throws Exception
-     */
-    public void visit( AllNode node ) throws Exception;
-
-    /** Visit the name identifier node */
-    public void visit( NameIdentifierNode nameIdentifierNode ) throws Exception;
-
-    /** Visit the uuid identifier node */
-    public void visit( UuidIdentifierNode uuidIdentifierNode );
-
-    /**
-     * @param orderByNode
-     * @throws Exception
-     */
-    public void visit( OrderByNode orderByNode ) throws Exception;
-
-    /** Visit the email id node */
-    public void visit( EmailIdentifierNode emailIdentifierNode ) throws Exception;
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NotNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NotNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NotNode.java
deleted file mode 100644
index 306eff3..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/NotNode.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-/** @author tnine */
-public class NotNode extends QueryNode {
-
-    protected QueryNode subtractNode, keepNode;
-
-
-    /** @param keepNode may be null if there are parents to this */
-    public NotNode( QueryNode subtractNode, QueryNode keepNode ) {
-        this.subtractNode = subtractNode;
-        this.keepNode = keepNode;
-//        throw new RuntimeException( "I'm a not node" );
-    }
-
-
-    /** @return the child */
-    public QueryNode getSubtractNode() {
-        return subtractNode;
-    }
-
-
-    /** @return the all */
-    public QueryNode getKeepNode() {
-        return keepNode;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.usergrid.persistence.query.ir.QueryNode#visit(org.apache.usergrid.persistence
-     * .query.ir.NodeVisitor)
-     */
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-
-
-    @Override
-    public int getCount() {
-        return subtractNode.getCount() + keepNode.getCount();
-    }
-
-
-    @Override
-    public boolean ignoreHintSize() {
-        return false;
-    }
-
-
-    @Override
-    public String toString() {
-        return "NotNode [child=" + subtractNode + "]";
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/OrNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/OrNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/OrNode.java
deleted file mode 100644
index 2735f9b..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/OrNode.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-/**
- * @author tnine
- */
-public class OrNode extends BooleanNode {
-
-    private final int id;
-
-
-    /**
-     * @param left
-     * @param right
-     */
-    public OrNode( QueryNode left, QueryNode right, int id ) {
-        super( left, right );
-        this.id = id;
-    }
-
-
-    /**
-     * Get the context id
-     */
-    public int getId() {
-        return this.id;
-    }
-
-
-    /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.QueryNode#visit(org.apache.usergrid.persistence.query.ir.NodeVisitor)
-     */
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/OrderByNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/OrderByNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/OrderByNode.java
deleted file mode 100644
index 6364337..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/OrderByNode.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-import java.util.List;
-
-import org.apache.usergrid.persistence.index.query.Query.SortPredicate;
-
-
-/**
- * Intermediate representation of ordering operations
- *
- * @author tnine
- */
-public class OrderByNode extends QueryNode {
-
-
-    private final SliceNode firstPredicate;
-    private final List<SortPredicate> secondarySorts;
-    private final QueryNode queryOperations;
-
-
-    /**
-     * @param firstPredicate The first predicate that is in the order by statement
-     * @param secondarySorts Any subsequent terms
-     * @param queryOperations The subtree for boolean evaluation
-     */
-    public OrderByNode( SliceNode firstPredicate, List<SortPredicate> secondarySorts, QueryNode queryOperations ) {
-        this.firstPredicate = firstPredicate;
-        this.secondarySorts = secondarySorts;
-        this.queryOperations = queryOperations;
-    }
-
-
-    /** @return the sorts */
-    public List<SortPredicate> getSecondarySorts() {
-        return secondarySorts;
-    }
-
-
-    /** @return the firstPredicate */
-    public SliceNode getFirstPredicate() {
-        return firstPredicate;
-    }
-
-
-    public QueryNode getQueryOperations() {
-        return queryOperations;
-    }
-
-
-    /*
-       * (non-Javadoc)
-       *
-       * @see
-       * org.apache.usergrid.persistence.query.ir.QueryNode#visit(org.apache.usergrid.persistence
-       * .query.ir.NodeVisitor)
-       */
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-
-
-    /** Return true if this order has secondary sorts */
-    public boolean hasSecondarySorts() {
-        return secondarySorts != null && secondarySorts.size() > 0;
-    }
-
-
-    @Override
-    public int getCount() {
-        return firstPredicate.getCount() + secondarySorts.size();
-    }
-
-
-    @Override
-    public boolean ignoreHintSize() {
-        return false;
-    }
-
-
-    /* (non-Javadoc)
-         * @see java.lang.Object#toString()
-         */
-    @Override
-    public String toString() {
-        return "OrderByNode [sorts=" + secondarySorts + "]";
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/QueryNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/QueryNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/QueryNode.java
deleted file mode 100644
index 954c13f..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/QueryNode.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-/**
- * The visit the node
- *
- * @author tnine
- */
-public abstract class QueryNode {
-
-    /** Visit this node */
-    public abstract void visit( NodeVisitor visitor ) throws Exception;
-
-
-    /**
-     * Get the count of the total number of slices in our tree from this node and it's children
-     */
-    public abstract int getCount();
-
-    /**
-     * True if this node should not be used in it's context in the AST, and should ignore it's hint size and always select the max
-     * @return
-     */
-    public abstract boolean ignoreHintSize();
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/QuerySlice.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/QuerySlice.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/QuerySlice.java
deleted file mode 100644
index 77c0a6b..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/QuerySlice.java
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-import java.nio.ByteBuffer;
-
-import org.apache.usergrid.utils.NumberUtils;
-
-import me.prettyprint.hector.api.beans.AbstractComposite.ComponentEquality;
-import me.prettyprint.hector.api.beans.DynamicComposite;
-
-import static org.apache.usergrid.utils.CompositeUtils.setEqualityFlag;
-
-
-/**
- * Node that represents a query slice operation
- *
- * @author tnine
- */
-public class QuerySlice {
-
-    private final String propertyName;
-    private final int nodeId;
-    // Object value;
-    private RangeValue start;
-    private RangeValue finish;
-    private ByteBuffer cursor;
-    private boolean reversed;
-
-
-    /**
-     * @param propertyName
-     * @param nodeId
-     */
-    public QuerySlice( String propertyName, int nodeId ) {
-        this.propertyName = propertyName;
-        this.nodeId = nodeId;
-    }
-
-
-    /** Reverse this slice. Flips the reversed switch and correctly changes the start and finish */
-    public void reverse() {
-        reversed = !reversed;
-
-        RangeValue oldStart = start;
-
-        start = finish;
-
-        finish = oldStart;
-    }
-
-
-    public String getPropertyName() {
-        return propertyName;
-    }
-
-
-    public RangeValue getStart() {
-        return start;
-    }
-
-
-    public void setStart( RangeValue start ) {
-        this.start = start;
-    }
-
-
-    public RangeValue getFinish() {
-        return finish;
-    }
-
-
-    public void setFinish( RangeValue finish ) {
-        this.finish = finish;
-    }
-
-
-    public ByteBuffer getCursor() {
-        return hasCursor() ? cursor.duplicate() : null;
-    }
-
-
-    public void setCursor( ByteBuffer cursor ) {
-        this.cursor = cursor;
-    }
-
-
-    /** True if a cursor has been set */
-    public boolean hasCursor() {
-        return this.cursor != null;
-    }
-
-
-    public boolean isReversed() {
-        return reversed;
-    }
-
-
-    /**
-     * Return true if we have a cursor and it's empty. This means that we've already returned all possible values from
-     * this slice range with our existing data in a previous invocation of search
-     */
-    public boolean isComplete() {
-        return cursor != null && cursor.remaining() == 0;
-    }
-
-
-    /**
-     * Get the slice range to be used during querying
-     *
-     * @return An array of dynamic composites to use. Index 0 is the start, index 1 is the finish. One or more could be
-     *         null
-     */
-    public DynamicComposite[] getRange() {
-        DynamicComposite startComposite = null;
-        DynamicComposite finishComposite = null;
-
-        // calc
-        if ( hasCursor() ) {
-            startComposite = DynamicComposite.fromByteBuffer( cursor.duplicate() );
-        }
-
-        else if ( start != null ) {
-            startComposite = new DynamicComposite( start.getCode(), start.getValue() );
-
-            // forward scanning from a >= 100 OR //reverse scanning from MAX to >= 100
-            if ( ( !reversed && !start.isInclusive() ) || ( reversed && start.isInclusive() ) ) {
-                setEqualityFlag( startComposite, ComponentEquality.GREATER_THAN_EQUAL );
-            }
-        }
-
-        if ( finish != null ) {
-            finishComposite = new DynamicComposite( finish.getCode(), finish.getValue() );
-
-            // forward scan to <= 100 OR reverse scan ININITY to > 100
-            if ( ( !reversed && finish.isInclusive() ) || reversed && !finish.isInclusive() ) {
-                setEqualityFlag( finishComposite, ComponentEquality.GREATER_THAN_EQUAL );
-            }
-        }
-
-        return new DynamicComposite[] { startComposite, finishComposite };
-    }
-
-
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + ( ( finish == null ) ? 0 : finish.hashCode() );
-        result = prime * result + ( ( propertyName == null ) ? 0 : propertyName.hashCode() );
-        result = prime * result + ( reversed ? 1231 : 1237 );
-        result = prime * result + ( ( start == null ) ? 0 : start.hashCode() );
-        result = prime * result + nodeId;
-        return result;
-    }
-
-
-    @Override
-    public boolean equals( Object obj ) {
-        if ( this == obj ) {
-            return true;
-        }
-        if ( obj == null ) {
-            return false;
-        }
-        if ( getClass() != obj.getClass() ) {
-            return false;
-        }
-        QuerySlice other = ( QuerySlice ) obj;
-        if ( finish == null ) {
-            if ( other.finish != null ) {
-                return false;
-            }
-        }
-        else if ( !finish.equals( other.finish ) ) {
-            return false;
-        }
-        if ( propertyName == null ) {
-            if ( other.propertyName != null ) {
-                return false;
-            }
-        }
-        else if ( !propertyName.equals( other.propertyName ) ) {
-            return false;
-        }
-        if ( reversed != other.reversed ) {
-            return false;
-        }
-        if ( start == null ) {
-            if ( other.start != null ) {
-                return false;
-            }
-        }
-        else if ( !start.equals( other.start ) ) {
-            return false;
-        }
-        return true;
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see java.lang.Object#toString()
-     */
-    @Override
-    public String toString() {
-        return "QuerySlice [propertyName=" + propertyName + ", start=" + start + ", finish=" + finish + ", cursor="
-                + cursor + ", reversed=" + reversed + ", nodeId=" + nodeId + "]";
-    }
-
-
-    public static class RangeValue {
-        final byte code;
-        final Object value;
-        final boolean inclusive;
-
-
-        public RangeValue( byte code, Object value, boolean inclusive ) {
-            this.code = code;
-            this.value = value;
-            this.inclusive = inclusive;
-        }
-
-
-        public byte getCode() {
-            return code;
-        }
-
-
-        public Object getValue() {
-            return value;
-        }
-
-
-        public boolean isInclusive() {
-            return inclusive;
-        }
-
-
-        @Override
-        public int hashCode() {
-            final int prime = 31;
-            int result = 1;
-            result = prime * result + code;
-            result = prime * result + ( inclusive ? 1231 : 1237 );
-            result = prime * result + ( ( value == null ) ? 0 : value.hashCode() );
-            return result;
-        }
-
-
-        @Override
-        public boolean equals( Object obj ) {
-            if ( this == obj ) {
-                return true;
-            }
-            if ( obj == null ) {
-                return false;
-            }
-            if ( getClass() != obj.getClass() ) {
-                return false;
-            }
-            RangeValue other = ( RangeValue ) obj;
-            if ( code != other.code ) {
-                return false;
-            }
-            if ( inclusive != other.inclusive ) {
-                return false;
-            }
-            if ( value == null ) {
-                if ( other.value != null ) {
-                    return false;
-                }
-            }
-            else if ( !value.equals( other.value ) ) {
-                return false;
-            }
-            return true;
-        }
-
-
-        public int compareTo( RangeValue other, boolean finish ) {
-            if ( other == null ) {
-                return 1;
-            }
-            if ( code != other.code ) {
-                return NumberUtils.sign( code - other.code );
-            }
-            @SuppressWarnings({ "unchecked", "rawtypes" }) int c = ( ( Comparable ) value ).compareTo( other.value );
-            if ( c != 0 ) {
-                return c;
-            }
-            if ( finish ) {
-                // for finish values, inclusive means <= which is greater than <
-                if ( inclusive != other.inclusive ) {
-                    return inclusive ? 1 : -1;
-                }
-            }
-            else {
-                // for start values, inclusive means >= which is lest than >
-                if ( inclusive != other.inclusive ) {
-                    return inclusive ? -1 : 1;
-                }
-            }
-            return 0;
-        }
-
-
-        /*
-         * (non-Javadoc)
-         *
-         * @see java.lang.Object#toString()
-         */
-        @Override
-        public String toString() {
-            return "RangeValue [code=" + code + ", value=" + value + ", inclusive=" + inclusive + "]";
-        }
-
-
-        public static int compare( RangeValue v1, RangeValue v2, boolean finish ) {
-            if ( v1 == null ) {
-                if ( v2 == null ) {
-                    return 0;
-                }
-                return -1;
-            }
-            return v1.compareTo( v2, finish );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/SearchVisitor.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/SearchVisitor.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/SearchVisitor.java
deleted file mode 100644
index f938e24..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/SearchVisitor.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-import java.util.Stack;
-
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.cassandra.QueryProcessorImpl;
-import org.apache.usergrid.persistence.cassandra.index.IndexScanner;
-import org.apache.usergrid.persistence.cassandra.index.NoOpIndexScanner;
-import org.apache.usergrid.persistence.query.ir.result.EmptyIterator;
-import org.apache.usergrid.persistence.query.ir.result.IntersectionIterator;
-import org.apache.usergrid.persistence.query.ir.result.OrderByIterator;
-import org.apache.usergrid.persistence.query.ir.result.ResultIterator;
-import org.apache.usergrid.persistence.query.ir.result.SecondaryIndexSliceParser;
-import org.apache.usergrid.persistence.query.ir.result.SliceIterator;
-import org.apache.usergrid.persistence.query.ir.result.StaticIdIterator;
-import org.apache.usergrid.persistence.query.ir.result.SubtractionIterator;
-import org.apache.usergrid.persistence.query.ir.result.UnionIterator;
-
-
-/**
- * Simple search visitor that performs all the joining in memory for results.
- * <p/>
- * Subclasses will want to implement visiting SliceNode and WithinNode to actually perform the search on the Cassandra
- * indexes. This class can perform joins on all index entries that conform to the Results object
- *
- * @author tnine
- */
-public abstract class SearchVisitor implements NodeVisitor {
-
-    private static final SecondaryIndexSliceParser COLLECTION_PARSER = new SecondaryIndexSliceParser();
-
-    protected final Query query;
-
-    protected final QueryProcessorImpl queryProcessor;
-
-    protected final EntityManager em;
-
-    protected final Stack<ResultIterator> results = new Stack<ResultIterator>();
-
-
-    /**
-     * @param queryProcessor
-     */
-    public SearchVisitor( QueryProcessorImpl queryProcessor ) {
-        this.query = queryProcessor.getQuery();
-        this.queryProcessor = queryProcessor;
-        this.em = queryProcessor.getEntityManager();
-    }
-
-
-    /** Return the results if they exist, null otherwise */
-    public ResultIterator getResults() {
-        return results.isEmpty() ? null : results.pop();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.NodeVisitor#visit(org.apache.usergrid.
-     * persistence.query.ir.AndNode)
-     */
-    @Override
-    public void visit( AndNode node ) throws Exception {
-        node.getLeft().visit( this );
-        node.getRight().visit( this );
-
-        ResultIterator right = results.pop();
-        ResultIterator left = results.pop();
-
-        /**
-         * NOTE: TN We should always maintain post order traversal of the tree. It
-         * is required for sorting to work correctly
-         */
-        IntersectionIterator intersection = new IntersectionIterator( queryProcessor.getPageSizeHint( node ) );
-        intersection.addIterator( left );
-        intersection.addIterator( right );
-
-        results.push( intersection );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.NodeVisitor#visit(org.apache.usergrid.
-     * persistence.query.ir.NotNode)
-     */
-    @Override
-    public void visit( NotNode node ) throws Exception {
-        node.getSubtractNode().visit( this );
-        ResultIterator not = results.pop();
-
-        node.getKeepNode().visit( this );
-        ResultIterator keep = results.pop();
-
-        SubtractionIterator subtraction = new SubtractionIterator( queryProcessor.getPageSizeHint( node ) );
-        subtraction.setSubtractIterator( not );
-        subtraction.setKeepIterator( keep );
-
-        results.push( subtraction );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.NodeVisitor#visit(org.apache.usergrid.
-     * persistence.query.ir.OrNode)
-     */
-    @Override
-    public void visit( OrNode node ) throws Exception {
-        node.getLeft().visit( this );
-        node.getRight().visit( this );
-
-        ResultIterator right = results.pop();
-        ResultIterator left = results.pop();
-
-        final int nodeId = node.getId();
-
-        UnionIterator union = new UnionIterator( queryProcessor.getPageSizeHint( node ), nodeId, queryProcessor.getCursorCache(nodeId  ) );
-
-        if ( left != null ) {
-            union.addIterator( left );
-        }
-        if ( right != null ) {
-            union.addIterator( right );
-        }
-
-        results.push( union );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.usergrid.persistence.query.ir.NodeVisitor#visit(org.apache.usergrid.persistence
-     * .query.ir.OrderByNode)
-     */
-    @Override
-    public void visit( OrderByNode orderByNode ) throws Exception {
-
-        QuerySlice slice = orderByNode.getFirstPredicate().getAllSlices().iterator().next();
-
-        queryProcessor.applyCursorAndSort( slice );
-
-        QueryNode subOperations = orderByNode.getQueryOperations();
-
-        ResultIterator subResults = null;
-
-        if ( subOperations != null ) {
-            //visit our sub operation
-            subOperations.visit( this );
-
-            subResults = results.pop();
-        }
-
-        ResultIterator orderIterator;
-
-        /**
-         * We have secondary sorts, we need to evaluate the candidate results and sort them in memory
-         */
-        if ( orderByNode.hasSecondarySorts() ) {
-
-            //only order by with no query, start scanning the first field
-            if ( subResults == null ) {
-                QuerySlice firstFieldSlice = new QuerySlice( slice.getPropertyName(), -1 );
-                subResults =
-                        new SliceIterator( slice, secondaryIndexScan( orderByNode, firstFieldSlice ), COLLECTION_PARSER );
-            }
-
-            orderIterator = new OrderByIterator( slice, orderByNode.getSecondarySorts(), subResults, em,
-                    queryProcessor.getPageSizeHint( orderByNode ) );
-        }
-
-        //we don't have multi field sorting, we can simply do intersection with a single scan range
-        else {
-
-            IndexScanner scanner;
-
-            if ( slice.isComplete() ) {
-                scanner = new NoOpIndexScanner();
-            }
-            else {
-                scanner = secondaryIndexScan( orderByNode, slice );
-            }
-
-            SliceIterator joinSlice = new SliceIterator( slice, scanner, COLLECTION_PARSER);
-
-            IntersectionIterator union = new IntersectionIterator( queryProcessor.getPageSizeHint( orderByNode ) );
-            union.addIterator( joinSlice );
-
-            if ( subResults != null ) {
-                union.addIterator( subResults );
-            }
-
-            orderIterator = union;
-        }
-
-        // now create our intermediate iterator with our real results
-        results.push( orderIterator );
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.usergrid.persistence.query.ir.NodeVisitor#visit(org.apache.usergrid.persistence
-     * .query.ir.SliceNode)
-     */
-    @Override
-    public void visit( SliceNode node ) throws Exception {
-        IntersectionIterator intersections = new IntersectionIterator( queryProcessor.getPageSizeHint( node ) );
-
-        for ( QuerySlice slice : node.getAllSlices() ) {
-            IndexScanner scanner = secondaryIndexScan( node, slice );
-
-            intersections.addIterator( new SliceIterator( slice, scanner, COLLECTION_PARSER) );
-        }
-
-        results.push( intersections );
-    }
-
-
-    /**
-     * Create a secondary index scan for the given slice node. DOES NOT apply to the "all" case. This should only
-     * generate a slice for secondary property scanning
-     */
-    protected abstract IndexScanner secondaryIndexScan( QueryNode node, QuerySlice slice ) throws Exception;
-
-
-    @Override
-    public void visit( UuidIdentifierNode uuidIdentifierNode ) {
-        this.results.push( new StaticIdIterator( uuidIdentifierNode.getUuid() ) );
-    }
-
-
-    @Override
-    public void visit( EmailIdentifierNode emailIdentifierNode ) throws Exception {
-        EntityRef user = queryProcessor.getEntityManager().getUserByIdentifier( emailIdentifierNode.getIdentifier() );
-
-        if ( user == null ) {
-            this.results.push( new EmptyIterator() );
-            return;
-        }
-
-        this.results.push( new StaticIdIterator( user.getUuid() ) );
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/SliceNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/SliceNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/SliceNode.java
deleted file mode 100644
index fc6f53b..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/SliceNode.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-import java.util.Collection;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import org.apache.usergrid.persistence.query.ir.QuerySlice.RangeValue;
-import static org.apache.usergrid.persistence.cassandra.IndexUpdate.indexValueCode;
-import static org.apache.usergrid.persistence.cassandra.IndexUpdate.toIndexableValue;
-
-
-/**
- * A node which has 1 or more query Slices that can be unioned together. I.E and && operation with either 1 or more
- * children
- *
- * @author tnine
- */
-public class SliceNode extends QueryNode {
-
-    /**
-     * A context within a tree to allow for operand and range scan optimizations. In the event that the user enters a
-     * query in the following way
-     * <p/>
-     * (x > 5 and x < 15 and z > 10 and z < 20) or (y > 10 and y < 20)
-     * <p/>
-     * You will have 2 contexts. The first is for (x > 5 and x < 15 and z > 10 and z < 20), the second is for (y > 10
-     * and y < 20). This allows us to compress these operations into a single range scan per context.
-     */
-    // private class TreeContext {
-
-    private Map<String, QuerySlice> pairs = new LinkedHashMap<String, QuerySlice>();
-
-    private int id;
-
-
-    /**
-     * Set the id for construction. Just a counter. Used for creating tokens and things like tokens where the same
-     * property can be used in 2 different subtrees
-     */
-    public SliceNode( int id ) {
-        this.id = id;
-    }
-
-
-    @Override
-    public int getCount() {
-        return pairs.size();
-    }
-
-
-    @Override
-    public boolean ignoreHintSize() {
-        return pairs.size() > 1;
-    }
-
-
-    /**
-     * Set the start value. If the range pair doesn't exist, it's created
-     *
-     * @param start The start value. this will be processed and turned into an indexed value
-     */
-    public void setStart( String fieldName, Object start, boolean inclusive ) {
-        QuerySlice slice = getOrCreateSlice( fieldName );
-
-        // if the value is null don't set the range on the slice
-        if ( start == null ) {
-            return;
-        }
-
-        RangeValue existingStart = slice.getStart();
-
-        Object indexedValue = toIndexableValue( start );
-        byte code = indexValueCode( indexedValue );
-
-        RangeValue newStart = new RangeValue( code, indexedValue, inclusive );
-
-        if ( existingStart == null ) {
-            slice.setStart( newStart );
-            return;
-        }
-
-        // check if we're before the currently set start in this
-        // context. If so set the value to increase the range scan size;
-        if ( existingStart != null && newStart == null || ( existingStart != null
-                && existingStart.compareTo( newStart, false ) < 0 ) ) {
-            slice.setStart( newStart );
-        }
-    }
-
-
-    /** Set the finish. If finish value is greater than the existing, I.E. null or higher comparison, then */
-    public void setFinish( String fieldName, Object finish, boolean inclusive ) {
-        QuerySlice slice = getOrCreateSlice( fieldName );
-
-        // if the value is null don't set the range on the slice
-        if ( finish == null ) {
-            return;
-        }
-
-        RangeValue existingFinish = slice.getFinish();
-
-        Object indexedValue = toIndexableValue( finish );
-        byte code = indexValueCode( indexedValue );
-
-        RangeValue newFinish = new RangeValue( code, indexedValue, inclusive );
-
-        if ( existingFinish == null ) {
-            slice.setFinish( newFinish );
-            return;
-        }
-
-        // check if we're before the currently set start in this
-        // context. If so set the value to increase the range scan size;
-        if ( existingFinish != null && newFinish == null || ( existingFinish != null
-                && existingFinish.compareTo( newFinish, false ) < 0 ) ) {
-            slice.setFinish( newFinish );
-        }
-    }
-
-
-    /** Lazy instanciate a field pair if required. Otherwise return the existing pair */
-    private QuerySlice getOrCreateSlice( String fieldName ) {
-        QuerySlice pair = this.pairs.get( fieldName );
-
-        if ( pair == null ) {
-            pair = new QuerySlice( fieldName, id );
-            this.pairs.put( fieldName, pair );
-        }
-
-        return pair;
-    }
-
-
-    /** Get the slice by field name if it exists. Null otherwise */
-    public QuerySlice getSlice( String fieldName ) {
-        return this.pairs.get( fieldName );
-    }
-
-
-    /** Get all slices in our context */
-    public Collection<QuerySlice> getAllSlices() {
-        return this.pairs.values();
-    }
-
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.usergrid.persistence.query.ir.QueryNode#visit(org.apache.usergrid.persistence
-     * .query.ir.NodeVisitor)
-     */
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-
-
-    @Override
-    public String toString() {
-        return "SliceNode [pairs=" + pairs + ", id=" + id + "]";
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/UuidIdentifierNode.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/UuidIdentifierNode.java b/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/UuidIdentifierNode.java
deleted file mode 100644
index 79a6217..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/persistence/query/ir/UuidIdentifierNode.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.usergrid.persistence.query.ir;
-
-
-import java.util.UUID;
-
-
-/**
- * Class to represent a UUID based Identifier query
- *
- * @author tnine
- */
-public class UuidIdentifierNode extends QueryNode {
-
-
-    private final UUID uuid;
-
-
-    public UuidIdentifierNode( UUID uuid ) {
-        this.uuid = uuid;
-    }
-
-
-    @Override
-    public void visit( NodeVisitor visitor ) throws Exception {
-        visitor.visit( this );
-    }
-
-
-    @Override
-    public int getCount() {
-        return 1;
-    }
-
-
-    @Override
-    public boolean ignoreHintSize() {
-        return false;
-    }
-
-
-    public UUID getUuid() {
-        return uuid;
-    }
-}


[10/10] incubator-usergrid git commit: First pass at removing unnecessary 1.0 files.

Posted by to...@apache.org.
First pass at removing unnecessary 1.0 files.


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/bd743734
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/bd743734
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/bd743734

Branch: refs/heads/USERGRID-390
Commit: bd7437344122d060806b2c038d84c871a6196db1
Parents: 1203f55
Author: Todd Nine <tn...@apigee.com>
Authored: Thu Feb 12 16:15:54 2015 -0700
Committer: Todd Nine <tn...@apigee.com>
Committed: Thu Feb 12 16:15:54 2015 -0700

----------------------------------------------------------------------
 .../corepersistence/CpEntityManager.java        |   63 +-
 .../corepersistence/CpQueryProcessor.java       |   94 -
 .../corepersistence/CpRelationManager.java      | 1076 +------
 .../HybridEntityManagerFactory.java             |  218 --
 .../usergrid/corepersistence/HybridSetup.java   |   72 -
 .../usergrid/persistence/EntityManager.java     |  150 +-
 .../persistence/MultiQueryIterator.java         |    6 +-
 .../usergrid/persistence/RelationManager.java   |   23 +-
 .../apache/usergrid/persistence/Results.java    |   23 +-
 .../persistence/cassandra/CassandraService.java |   42 -
 .../cassandra/EntityManagerFactoryImpl.java     |  490 ---
 .../cassandra/EntityManagerImpl.java            | 2937 ------------------
 .../persistence/cassandra/GeoIndexManager.java  |  330 --
 .../persistence/cassandra/IndexUpdate.java      |    2 +
 .../persistence/cassandra/QueryProcessor.java   |   60 -
 .../cassandra/QueryProcessorImpl.java           |  727 -----
 .../cassandra/RelationManagerImpl.java          | 2338 --------------
 .../persistence/cassandra/SetupImpl.java        |  191 --
 .../cassandra/index/ConnectedIndexScanner.java  |  282 --
 .../cassandra/index/IndexBucketScanner.java     |  240 --
 .../index/IndexMultiBucketSetLoader.java        |  139 -
 .../cassandra/index/IndexScanner.java           |   40 -
 .../cassandra/index/NoOpIndexScanner.java       |   95 -
 .../persistence/geo/CollectionGeoSearch.java    |   68 -
 .../persistence/geo/ConnectionGeoSearch.java    |   67 -
 .../persistence/geo/EntityLocationRef.java      |  227 --
 .../EntityLocationRefDistanceComparator.java    |   78 -
 .../persistence/geo/GeoIndexSearcher.java       |  370 ---
 .../persistence/geo/GeocellManager.java         |  195 --
 .../usergrid/persistence/geo/GeocellUtils.java  |  543 ----
 .../geo/comparator/DoubleTupleComparator.java   |   39 -
 .../persistence/geo/model/BoundingBox.java      |   74 -
 .../persistence/geo/model/CostFunction.java     |   36 -
 .../geo/model/DefaultCostFunction.java          |   37 -
 .../usergrid/persistence/geo/model/Tuple.java   |   40 -
 .../usergrid/persistence/query/ir/AllNode.java  |   82 -
 .../usergrid/persistence/query/ir/AndNode.java  |   44 -
 .../persistence/query/ir/BooleanNode.java       |   65 -
 .../query/ir/EmailIdentifierNode.java           |   58 -
 .../query/ir/NameIdentifierNode.java            |   56 -
 .../persistence/query/ir/NodeVisitor.java       |   79 -
 .../usergrid/persistence/query/ir/NotNode.java  |   75 -
 .../usergrid/persistence/query/ir/OrNode.java   |   53 -
 .../persistence/query/ir/OrderByNode.java       |  105 -
 .../persistence/query/ir/QueryNode.java         |   41 -
 .../persistence/query/ir/QuerySlice.java        |  345 --
 .../persistence/query/ir/SearchVisitor.java     |  270 --
 .../persistence/query/ir/SliceNode.java         |  180 --
 .../query/ir/UuidIdentifierNode.java            |   60 -
 .../persistence/query/ir/WithinNode.java        |  109 -
 .../query/ir/result/AbstractScanColumn.java     |   83 -
 .../result/CollectionResultsLoaderFactory.java  |   41 -
 .../ir/result/ConnectionIndexSliceParser.java   |   87 -
 .../query/ir/result/ConnectionRefLoader.java    |   81 -
 .../result/ConnectionResultsLoaderFactory.java  |   50 -
 .../ir/result/ConnectionTypesIterator.java      |  190 --
 .../query/ir/result/EmptyIterator.java          |   63 -
 .../query/ir/result/EntityRefLoader.java        |   51 -
 .../query/ir/result/EntityResultsLoader.java    |   47 -
 .../query/ir/result/GeoIterator.java            |  351 ---
 .../persistence/query/ir/result/IDLoader.java   |   40 -
 .../query/ir/result/IntersectionIterator.java   |  170 -
 .../query/ir/result/MergeIterator.java          |  150 -
 .../query/ir/result/MultiIterator.java          |   55 -
 .../query/ir/result/OrderByIterator.java        |  250 --
 .../query/ir/result/ResultIterator.java         |   44 -
 .../query/ir/result/ResultsLoader.java          |   30 -
 .../query/ir/result/ResultsLoaderFactory.java   |   37 -
 .../persistence/query/ir/result/ScanColumn.java |   32 -
 .../query/ir/result/ScanColumnTransformer.java  |   39 -
 .../ir/result/SecondaryIndexSliceParser.java    |   61 -
 .../query/ir/result/SliceIterator.java          |  242 --
 .../query/ir/result/SliceParser.java            |   32 -
 .../query/ir/result/StaticIdIterator.java       |   82 -
 .../query/ir/result/SubtractionIterator.java    |  113 -
 .../query/ir/result/UUIDIndexSliceParser.java   |   47 -
 .../query/ir/result/UnionIterator.java          |  264 --
 .../main/resources/usergrid-core-context.xml    |   38 +-
 .../apache/usergrid/persistence/IndexIT.java    |   43 +-
 .../cassandra/EntityManagerFactoryImplIT.java   |  214 --
 .../cassandra/QueryProcessorTest.java           |  823 -----
 .../SimpleIndexShardLocatorImplTest.java        |  177 --
 .../cassandra/util/TraceTagUnitTest.java        |   54 -
 ...EntityLocationRefDistanceComparatorTest.java |  103 -
 .../query/IntersectionUnionPagingIT.java        |   10 +-
 .../persistence/query/NotSubPropertyIT.java     |    5 +-
 .../query/ir/result/AbstractScanColumnTest.java |  100 -
 .../query/ir/result/InOrderIterator.java        |  131 -
 .../ir/result/IntersectionIteratorTest.java     |  307 --
 .../query/ir/result/IteratorHelper.java         |   34 -
 .../ir/result/SubtractionIteratorTest.java      |  181 --
 .../query/ir/result/UnionIteratorTest.java      |  467 ---
 .../test/resources/usergrid-test-context.xml    |    5 +-
 stack/pom.xml                                   |    3 +-
 .../test/resources/usergrid-test-context.xml    |    3 +-
 95 files changed, 130 insertions(+), 18334 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManager.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManager.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManager.java
index a0a9283..ad61a1f 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManager.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpEntityManager.java
@@ -56,8 +56,6 @@ import org.apache.usergrid.persistence.RelationManager;
 import org.apache.usergrid.persistence.Results;
 import org.apache.usergrid.persistence.Schema;
 import org.apache.usergrid.persistence.SimpleEntityRef;
-import static org.apache.usergrid.persistence.SimpleEntityRef.getUuid;
-
 import org.apache.usergrid.persistence.SimpleRoleRef;
 import org.apache.usergrid.persistence.TypedEntity;
 import org.apache.usergrid.persistence.cassandra.ApplicationCF;
@@ -65,7 +63,6 @@ import org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils;
 import org.apache.usergrid.persistence.cassandra.CassandraService;
 import org.apache.usergrid.persistence.cassandra.ConnectionRefImpl;
 import org.apache.usergrid.persistence.cassandra.CounterUtils;
-import org.apache.usergrid.persistence.cassandra.GeoIndexManager;
 import org.apache.usergrid.persistence.cassandra.util.TraceParticipant;
 import org.apache.usergrid.persistence.collection.CollectionScope;
 import org.apache.usergrid.persistence.collection.EntityCollectionManager;
@@ -660,13 +657,6 @@ public class CpEntityManager implements EntityManager {
 
 
     @Override
-    public GeoIndexManager getGeoIndexManager() {
-
-        throw new UnsupportedOperationException( "GeoIndexManager no longer supported." );
-    }
-
-
-    @Override
     public EntityRef getApplicationRef() {
         return new SimpleEntityRef( TYPE_APPLICATION, applicationId );
     }
@@ -689,13 +679,6 @@ public class CpEntityManager implements EntityManager {
 
 
     @Override
-    public void updateApplication( Map<String, Object> properties ) throws Exception {
-        this.updateProperties( new SimpleEntityRef( Application.ENTITY_TYPE, applicationId ), properties );
-        this.application = get( applicationId, Application.class );
-    }
-
-
-    @Override
     public RelationManager getRelationManager( EntityRef entityRef ) {
         CpRelationManager rmi = new CpRelationManager();
         rmi.init( this, emf, applicationId, entityRef, null );
@@ -882,13 +865,6 @@ public class CpEntityManager implements EntityManager {
 
 
     @Override
-    public List<Entity> getPartialEntities(
-            Collection<UUID> ids, Collection<String> properties ) throws Exception {
-        throw new UnsupportedOperationException( "Not supported yet." );
-    }
-
-
-    @Override
     public Map<String, Object> getProperties( EntityRef entityRef ) throws Exception {
 
         Entity entity = get( entityRef );
@@ -1272,11 +1248,6 @@ public class CpEntityManager implements EntityManager {
     }
 
 
-    @Override
-    public Map<String, Map<UUID, Set<String>>> getOwners( EntityRef entityRef ) throws Exception {
-
-        return getRelationManager( entityRef ).getOwners();
-    }
 
 
     @Override
@@ -1802,12 +1773,6 @@ public class CpEntityManager implements EntityManager {
     }
 
 
-    @Override
-    public Map<String, String> getUserGroupRoles( UUID userId, UUID groupId ) throws Exception {
-        // TODO this never returns anything - write path not invoked
-        EntityRef userRef = userRef( userId );
-        return cast( getDictionaryAsMap( userRef, DICTIONARY_ROLENAMES ) );
-    }
 
 
     @Override
@@ -1821,16 +1786,6 @@ public class CpEntityManager implements EntityManager {
     }
 
 
-    @Override
-    public void removeUserFromGroupRole( UUID userId, UUID groupId, String roleName ) throws Exception {
-        roleName = roleName.toLowerCase();
-        EntityRef memberRef = userRef( userId );
-        EntityRef roleRef = getRoleRef( roleName );
-        removeFromDictionary( memberRef, DICTIONARY_ROLENAMES, roleName );
-        removeFromCollection( memberRef, COLLECTION_ROLES, roleRef );
-        removeFromCollection( roleRef, COLLECTION_USERS, userRef( userId ) );
-    }
-
 
     @Override
     public Results getUsersInGroupRole( UUID groupId, String roleName, Level level ) throws Exception {
@@ -2625,11 +2580,7 @@ public class CpEntityManager implements EntityManager {
         boolean entityHasDictionary = Schema.getDefaultSchema()
                 .hasDictionary( entity.getType(), dictionaryName );
 
-        // Don't index dynamic dictionaries not defined by the schema
-        if ( entityHasDictionary ) {
-            getRelationManager( entity ).batchUpdateSetIndexes(
-                    batch, dictionaryName, elementValue, removeFromDictionary, timestampUuid );
-        }
+
 
         ApplicationCF dictionary_cf = entityHasDictionary
                 ? ENTITY_DICTIONARIES : ENTITY_COMPOSITE_DICTIONARIES;
@@ -2674,12 +2625,6 @@ public class CpEntityManager implements EntityManager {
     }
 
 
-    @Override
-    public Mutator<ByteBuffer> batchUpdateProperties( Mutator<ByteBuffer> batch, EntityRef entity,
-            Map<String, Object> properties, UUID timestampUuid ) throws Exception {
-
-        throw new UnsupportedOperationException( "Not supported yet." );
-    }
 
 
     //TODO: ask what the difference is.
@@ -2708,12 +2653,6 @@ public class CpEntityManager implements EntityManager {
     }
 
 
-    @Override
-    public void insertEntity( EntityRef ref ) throws Exception {
-
-        throw new UnsupportedOperationException( "Not supported yet." );
-    }
-
 
     @Override
     public UUID getApplicationId() {

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpQueryProcessor.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpQueryProcessor.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpQueryProcessor.java
deleted file mode 100644
index 16d4085..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpQueryProcessor.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2014 The Apache Software Foundation.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.usergrid.corepersistence;
-
-import java.nio.ByteBuffer;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.EntityRef;
-import org.apache.usergrid.persistence.index.query.Query;
-import org.apache.usergrid.persistence.Results;
-import org.apache.usergrid.persistence.cassandra.QueryProcessor;
-import org.apache.usergrid.persistence.query.ir.QueryNode;
-import org.apache.usergrid.persistence.query.ir.QuerySlice;
-import org.apache.usergrid.persistence.query.ir.SearchVisitor;
-import org.apache.usergrid.persistence.schema.CollectionInfo;
-
-
-public class CpQueryProcessor implements QueryProcessor {
-
-    Query query;
-    EntityManager em;
-    EntityRef entityRef;
-    String collectionName;
-
-
-    public CpQueryProcessor( 
-            EntityManager em, Query query, EntityRef entityRef, String collectionName ) {
-
-        this.em = em;
-        this.query = query;
-        this.entityRef = entityRef;
-        this.collectionName = collectionName;
-    }
-
-    @Override
-    public void applyCursorAndSort(QuerySlice slice) {
-        throw new UnsupportedOperationException("Not supported yet."); 
-    }
-
-    @Override
-    public CollectionInfo getCollectionInfo() {
-        throw new UnsupportedOperationException("Not supported yet."); 
-    }
-
-    @Override
-    public ByteBuffer getCursorCache(int nodeId) {
-        throw new UnsupportedOperationException("Not supported yet."); 
-    }
-
-    @Override
-    public EntityManager getEntityManager() {
-        return em;
-    }
-
-    @Override
-    public QueryNode getFirstNode() {
-        throw new UnsupportedOperationException("Not supported yet."); 
-    }
-
-    @Override
-    public int getPageSizeHint(QueryNode node) {
-        throw new UnsupportedOperationException("Not supported yet."); 
-    }
-
-    @Override
-    public Query getQuery() {
-        return query;
-    }
-
-    @Override
-    public Results getResults(SearchVisitor visitor) throws Exception {
-        return em.searchCollection( entityRef, collectionName, query);
-    }
-
-    @Override
-    public void setQuery(Query query) {
-        this.query = query;
-    }
-
-    
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
index 5039a41..ecdc845 100644
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
+++ b/stack/core/src/main/java/org/apache/usergrid/corepersistence/CpRelationManager.java
@@ -17,10 +17,7 @@
 package org.apache.usergrid.corepersistence;
 
 
-import java.nio.ByteBuffer;
-import java.util.AbstractMap;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
@@ -33,7 +30,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.util.Assert;
 
-import org.apache.usergrid.corepersistence.results.ResultsLoader;
 import org.apache.usergrid.corepersistence.results.ResultsLoaderFactory;
 import org.apache.usergrid.corepersistence.results.ResultsLoaderFactoryImpl;
 import org.apache.usergrid.corepersistence.util.CpEntityMapUtils;
@@ -44,7 +40,6 @@ import org.apache.usergrid.persistence.Entity;
 import org.apache.usergrid.persistence.EntityManager;
 import org.apache.usergrid.persistence.EntityRef;
 import org.apache.usergrid.persistence.IndexBucketLocator;
-import org.apache.usergrid.persistence.PagingResultsIterator;
 import org.apache.usergrid.persistence.RelationManager;
 import org.apache.usergrid.persistence.Results;
 import org.apache.usergrid.persistence.RoleRef;
@@ -53,19 +48,10 @@ import org.apache.usergrid.persistence.SimpleEntityRef;
 import org.apache.usergrid.persistence.SimpleRoleRef;
 import org.apache.usergrid.persistence.cassandra.CassandraService;
 import org.apache.usergrid.persistence.cassandra.ConnectionRefImpl;
-import org.apache.usergrid.persistence.cassandra.IndexUpdate;
-import org.apache.usergrid.persistence.cassandra.QueryProcessorImpl;
-import org.apache.usergrid.persistence.cassandra.index.ConnectedIndexScanner;
-import org.apache.usergrid.persistence.cassandra.index.IndexBucketScanner;
-import org.apache.usergrid.persistence.cassandra.index.IndexScanner;
-import org.apache.usergrid.persistence.cassandra.index.NoOpIndexScanner;
 import org.apache.usergrid.persistence.collection.CollectionScope;
 import org.apache.usergrid.persistence.core.scope.ApplicationScope;
 import org.apache.usergrid.persistence.entities.Group;
 import org.apache.usergrid.persistence.entities.User;
-import org.apache.usergrid.persistence.geo.ConnectionGeoSearch;
-import org.apache.usergrid.persistence.geo.EntityLocationRef;
-import org.apache.usergrid.persistence.geo.model.Point;
 import org.apache.usergrid.persistence.graph.Edge;
 import org.apache.usergrid.persistence.graph.GraphManager;
 import org.apache.usergrid.persistence.graph.SearchByEdgeType;
@@ -85,22 +71,7 @@ import org.apache.usergrid.persistence.index.query.Query;
 import org.apache.usergrid.persistence.index.query.Query.Level;
 import org.apache.usergrid.persistence.model.entity.Id;
 import org.apache.usergrid.persistence.model.entity.SimpleId;
-import org.apache.usergrid.persistence.model.util.UUIDGenerator;
-import org.apache.usergrid.persistence.query.ir.AllNode;
-import org.apache.usergrid.persistence.query.ir.NameIdentifierNode;
-import org.apache.usergrid.persistence.query.ir.QueryNode;
-import org.apache.usergrid.persistence.query.ir.QuerySlice;
-import org.apache.usergrid.persistence.query.ir.SearchVisitor;
-import org.apache.usergrid.persistence.query.ir.WithinNode;
-import org.apache.usergrid.persistence.query.ir.result.ConnectionIndexSliceParser;
-import org.apache.usergrid.persistence.query.ir.result.ConnectionResultsLoaderFactory;
-import org.apache.usergrid.persistence.query.ir.result.ConnectionTypesIterator;
-import org.apache.usergrid.persistence.query.ir.result.EmptyIterator;
-import org.apache.usergrid.persistence.query.ir.result.GeoIterator;
-import org.apache.usergrid.persistence.query.ir.result.SliceIterator;
-import org.apache.usergrid.persistence.query.ir.result.StaticIdIterator;
 import org.apache.usergrid.persistence.schema.CollectionInfo;
-import org.apache.usergrid.utils.IndexUtils;
 import org.apache.usergrid.utils.MapUtils;
 import org.apache.usergrid.utils.UUIDUtils;
 
@@ -108,53 +79,22 @@ import com.google.common.base.Preconditions;
 import com.yammer.metrics.annotation.Metered;
 
 import me.prettyprint.hector.api.Keyspace;
-import me.prettyprint.hector.api.beans.DynamicComposite;
-import me.prettyprint.hector.api.beans.HColumn;
-import me.prettyprint.hector.api.mutation.Mutator;
 import rx.Observable;
 import rx.functions.Action1;
 import rx.functions.Func1;
 
-import static java.util.Arrays.asList;
-
-import static me.prettyprint.hector.api.factory.HFactory.createMutator;
 import static org.apache.usergrid.corepersistence.util.CpNamingUtils.getCollectionScopeNameFromEntityType;
 import static org.apache.usergrid.persistence.Schema.COLLECTION_ROLES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTED_ENTITIES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTED_TYPES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTING_ENTITIES;
-import static org.apache.usergrid.persistence.Schema.DICTIONARY_CONNECTING_TYPES;
-import static org.apache.usergrid.persistence.Schema.INDEX_CONNECTIONS;
 import static org.apache.usergrid.persistence.Schema.PROPERTY_CREATED;
 import static org.apache.usergrid.persistence.Schema.PROPERTY_INACTIVITY;
 import static org.apache.usergrid.persistence.Schema.PROPERTY_NAME;
 import static org.apache.usergrid.persistence.Schema.PROPERTY_TITLE;
-import static org.apache.usergrid.persistence.Schema.TYPE_APPLICATION;
 import static org.apache.usergrid.persistence.Schema.TYPE_ENTITY;
 import static org.apache.usergrid.persistence.Schema.TYPE_ROLE;
 import static org.apache.usergrid.persistence.Schema.getDefaultSchema;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_COMPOSITE_DICTIONARIES;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_DICTIONARIES;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_INDEX;
-import static org.apache.usergrid.persistence.cassandra.ApplicationCF.ENTITY_INDEX_ENTRIES;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.addDeleteToMutator;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.addInsertToMutator;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.batchExecute;
-import static org.apache.usergrid.persistence.cassandra.CassandraPersistenceUtils.key;
-import static org.apache.usergrid.persistence.cassandra.CassandraService.INDEX_ENTRY_LIST_COUNT;
-import static org.apache.usergrid.persistence.cassandra.GeoIndexManager.batchDeleteLocationInConnectionsIndex;
-import static org.apache.usergrid.persistence.cassandra.GeoIndexManager.batchRemoveLocationFromCollectionIndex;
-import static org.apache.usergrid.persistence.cassandra.GeoIndexManager.batchStoreLocationInCollectionIndex;
-import static org.apache.usergrid.persistence.cassandra.GeoIndexManager.batchStoreLocationInConnectionsIndex;
-import static org.apache.usergrid.persistence.cassandra.IndexUpdate.indexValueCode;
-import static org.apache.usergrid.persistence.cassandra.IndexUpdate.toIndexableValue;
-import static org.apache.usergrid.persistence.cassandra.IndexUpdate.validIndexableValue;
-import static org.apache.usergrid.persistence.cassandra.Serializers.be;
 import static org.apache.usergrid.utils.ClassUtils.cast;
-import static org.apache.usergrid.utils.CompositeUtils.setGreaterThanEqualityFlag;
 import static org.apache.usergrid.utils.InflectionUtils.singularize;
 import static org.apache.usergrid.utils.MapUtils.addMapSet;
-import static org.apache.usergrid.utils.UUIDUtils.getTimestampInMicros;
 
 
 /**
@@ -409,14 +349,14 @@ public class CpRelationManager implements RelationManager {
                         if ( CpNamingUtils.isCollectionEdgeType( edge.getType() ) ) {
 
                             String collName = CpNamingUtils.getCollectionName( edge.getType() );
-                            indexScope = new IndexScopeImpl( 
+                            indexScope = new IndexScopeImpl(
                                 new SimpleId( sourceEntity.getUuid(), sourceEntity.getType()),
                                 CpNamingUtils.getCollectionScopeNameFromCollectionName( collName ));
                         }
                         else {
 
                             String connName = CpNamingUtils.getConnectionType( edge.getType() );
-                            indexScope = new IndexScopeImpl( 
+                            indexScope = new IndexScopeImpl(
                                 new SimpleId( sourceEntity.getUuid(), sourceEntity.getType() ),
                                 CpNamingUtils.getConnectionScopeName( connName ) );
                         }
@@ -600,14 +540,14 @@ public class CpRelationManager implements RelationManager {
     }
 
 
-    public Entity addToCollection( String collName, EntityRef itemRef, boolean connectBack ) 
+    public Entity addToCollection( String collName, EntityRef itemRef, boolean connectBack )
             throws Exception {
 
         CollectionScope memberScope = getCollectionScopeNameFromEntityType(
                 applicationScope.getApplication(), itemRef.getType());
 
-        Id entityId = new SimpleId( itemRef.getUuid(), itemRef.getType() ); 
-        org.apache.usergrid.persistence.model.entity.Entity memberEntity = 
+        Id entityId = new SimpleId( itemRef.getUuid(), itemRef.getType() );
+        org.apache.usergrid.persistence.model.entity.Entity memberEntity =
             ((CpEntityManager)em).load( new CpEntityManager.EntityScope( memberScope, entityId));
 
         return addToCollection(collName, itemRef, memberEntity, connectBack);
@@ -697,7 +637,7 @@ public class CpRelationManager implements RelationManager {
         //            headEntityScope.getName()});
 
         if ( connectBack && collection != null && collection.getLinkedCollection() != null ) {
-            getRelationManager( itemEntity ).addToCollection( 
+            getRelationManager( itemEntity ).addToCollection(
                     collection.getLinkedCollection(), headEntity, cpHeadEntity, false );
             getRelationManager( itemEntity ).addToCollection(
                     collection.getLinkedCollection(), headEntity, false );
@@ -819,7 +759,7 @@ public class CpRelationManager implements RelationManager {
 
         batch.deindex( indexScope, memberEntity );
 
-        // remove collection from item index 
+        // remove collection from item index
         IndexScope itemScope = new IndexScopeImpl(
             memberEntity.getId(),
             CpNamingUtils.getCollectionScopeNameFromCollectionName(
@@ -830,7 +770,7 @@ public class CpRelationManager implements RelationManager {
 
         batch.execute();
 
-        // remove edge from collection to item 
+        // remove edge from collection to item
         GraphManager gm = managerCache.getGraphManager( applicationScope );
         Edge collectionToItemEdge = new SimpleEdge(
                 cpHeadEntity.getId(),
@@ -938,8 +878,8 @@ public class CpRelationManager implements RelationManager {
         query.setEntityType( collection.getType() );
         query = adjustQuery( query );
 
-        // Because of possible stale entities, which are filtered out by buildResults(), 
-        // we loop until the we've got enough results to satisfy the query limit. 
+        // Because of possible stale entities, which are filtered out by buildResults(),
+        // we loop until the we've got enough results to satisfy the query limit.
 
         int maxQueries = 10; // max re-queries to satisfy query limit
 
@@ -1048,131 +988,13 @@ public class CpRelationManager implements RelationManager {
 
         batch.index( indexScope, targetEntity );
 
-        // Index the new connection in app|scope|all-types context
-        //TODO REMOVE INDEX CODE
-//        IndexScope allTypesIndexScope = new IndexScopeImpl( cpHeadEntity.getId(), CpNamingUtils.ALL_TYPES, entityType );
-//        batch.index( allTypesIndexScope, targetEntity );
-
-
         batch.execute();
 
-        Keyspace ko = cass.getApplicationKeyspace( applicationId );
-        Mutator<ByteBuffer> m = createMutator( ko, be );
-        batchUpdateEntityConnection( m, false, connection, UUIDGenerator.newTimeUUID() );
-        batchExecute( m, CassandraService.RETRY_COUNT );
 
         return connection;
     }
 
 
-    @SuppressWarnings( "unchecked" )
-    @Metered( group = "core", name = "CpRelationManager_batchUpdateEntityConnection" )
-    public Mutator<ByteBuffer> batchUpdateEntityConnection(
-            Mutator<ByteBuffer> batch,
-            boolean disconnect,
-            ConnectionRefImpl conn,
-            UUID timestampUuid ) throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        Entity connectedEntity = em.get(new SimpleEntityRef(
-                conn.getConnectedEntityType(), conn.getConnectedEntityId() ) );
-
-        if ( connectedEntity == null ) {
-            return batch;
-        }
-
-        // Create connection for requested params
-
-        if ( disconnect ) {
-
-            addDeleteToMutator(batch, ENTITY_COMPOSITE_DICTIONARIES,
-                key(conn.getConnectingEntityId(), DICTIONARY_CONNECTED_ENTITIES,
-                        conn.getConnectionType() ),
-                asList(conn.getConnectedEntityId(), conn.getConnectedEntityType() ), timestamp );
-
-            addDeleteToMutator(batch, ENTITY_COMPOSITE_DICTIONARIES,
-                key(conn.getConnectedEntityId(), DICTIONARY_CONNECTING_ENTITIES,
-                        conn.getConnectionType() ),
-                asList(conn.getConnectingEntityId(), conn.getConnectingEntityType() ), timestamp );
-
-            // delete the connection path if there will be no connections left
-
-            // check out outbound edges of the given type.  If we have more than the 1 specified,
-            // we shouldn't delete the connection types from our outbound index
-            if ( !moreThanOneOutboundConnection(conn.getConnectingEntity(), conn.getConnectionType() ) ) {
-
-                addDeleteToMutator(batch, ENTITY_DICTIONARIES,
-                        key(conn.getConnectingEntityId(), DICTIONARY_CONNECTED_TYPES ),
-                        conn.getConnectionType(), timestamp );
-            }
-
-            //check out inbound edges of the given type.  If we have more than the 1 specified,
-            // we shouldn't delete the connection types from our outbound index
-            if ( !moreThanOneInboundConnection(conn.getConnectingEntity(), conn.getConnectionType() ) ) {
-
-                addDeleteToMutator(batch, ENTITY_DICTIONARIES,
-                    key(conn.getConnectedEntityId(), DICTIONARY_CONNECTING_TYPES ),
-                    conn.getConnectionType(), timestamp );
-        }
-        }
-        else {
-
-            addInsertToMutator(batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key(conn.getConnectingEntityId(), DICTIONARY_CONNECTED_ENTITIES,
-                            conn.getConnectionType() ),
-                    asList(conn.getConnectedEntityId(), conn.getConnectedEntityType() ), timestamp,
-                    timestamp );
-
-            addInsertToMutator(batch, ENTITY_COMPOSITE_DICTIONARIES,
-                    key(conn.getConnectedEntityId(), DICTIONARY_CONNECTING_ENTITIES,
-                            conn.getConnectionType() ),
-                    asList(conn.getConnectingEntityId(), conn.getConnectingEntityType() ), timestamp,
-                    timestamp );
-
-            // Add connection type to connections set
-            addInsertToMutator(batch, ENTITY_DICTIONARIES,
-                    key(conn.getConnectingEntityId(), DICTIONARY_CONNECTED_TYPES ),
-                    conn.getConnectionType(), null, timestamp );
-
-            // Add connection type to connections set
-            addInsertToMutator(batch, ENTITY_DICTIONARIES,
-                    key(conn.getConnectedEntityId(), DICTIONARY_CONNECTING_TYPES ),
-                    conn.getConnectionType(), null, timestamp );
-        }
-
-        // Add indexes for the connected entity's list properties
-
-        // Get the names of the list properties in the connected entity
-        Set<String> dictionaryNames = em.getDictionaryNames( connectedEntity );
-
-        // For each list property, get the values in the list and
-        // update the index with those values
-
-        Schema schema = getDefaultSchema();
-
-        for ( String dictionaryName : dictionaryNames ) {
-
-            boolean has_dictionary = schema.hasDictionary(
-                    connectedEntity.getType(), dictionaryName );
-
-            boolean dictionary_indexed = schema.isDictionaryIndexedInConnections(
-                    connectedEntity.getType(), dictionaryName );
-
-            if ( dictionary_indexed || !has_dictionary ) {
-                Set<Object> elementValues = em.getDictionaryAsSet( connectedEntity, dictionaryName );
-                for ( Object elementValue : elementValues ) {
-                    IndexUpdate indexUpdate = batchStartIndexUpdate(
-                            batch, connectedEntity, dictionaryName, elementValue,
-                            timestampUuid, has_dictionary, true, disconnect, false );
-                    batchUpdateConnectionIndex(indexUpdate, conn );
-                }
-            }
-        }
-
-        return batch;
-    }
-
 
     @Override
     @Metered( group = "core", name = "RelationManager_createConnection_paired_connection_type" )
@@ -1194,15 +1016,6 @@ public class CpRelationManager implements RelationManager {
     }
 
 
-    @Override
-    public ConnectionRef connectionRef(
-            String connectionType, EntityRef connectedEntityRef ) throws Exception {
-
-        ConnectionRef connection = new ConnectionRefImpl( headEntity, connectionType, connectedEntityRef );
-
-        return connection;
-    }
-
 
     @Override
     public ConnectionRef connectionRef(
@@ -1227,10 +1040,7 @@ public class CpRelationManager implements RelationManager {
 
         // First, clean up the dictionary records of the connection
         Keyspace ko = cass.getApplicationKeyspace( applicationId );
-        Mutator<ByteBuffer> m = createMutator( ko, be );
-        batchUpdateEntityConnection(
-                m, true, ( ConnectionRefImpl ) connectionRef, UUIDGenerator.newTimeUUID() );
-        batchExecute( m, CassandraService.RETRY_COUNT );
+
 
         EntityRef connectingEntityRef = connectionRef.getConnectingEntity();  // source
         EntityRef connectedEntityRef = connectionRef.getConnectedEntity();  // target
@@ -1289,11 +1099,6 @@ public class CpRelationManager implements RelationManager {
     }
 
 
-    @Override
-    public Set<String> getConnectionTypes( UUID connectedEntityId ) throws Exception {
-        throw new UnsupportedOperationException( "Cannot specify entity by UUID alone." );
-    }
-
 
     @Override
     public Set<String> getConnectionTypes() throws Exception {
@@ -1336,7 +1141,7 @@ public class CpRelationManager implements RelationManager {
             headEntity = em.validate( headEntity );
 
 
-            IndexScope indexScope = new IndexScopeImpl( 
+            IndexScope indexScope = new IndexScopeImpl(
                     cpHeadEntity.getId(), CpNamingUtils.getConnectionScopeName( connectionType ) );
 
             final SearchTypes searchTypes = SearchTypes.fromNullableTypes( connectedEntityType );
@@ -1458,7 +1263,7 @@ public class CpRelationManager implements RelationManager {
 
                 Identifier ident = query.getSingleIdentifier();
 
-                // an email was specified.  An edge case that only applies to users.  
+                // an email was specified.  An edge case that only applies to users.
                 // This is fulgy to put here, but required.
                 if ( query.getEntityType().equals( User.ENTITY_TYPE ) && ident.isEmail() ) {
 
@@ -1568,846 +1373,33 @@ public class CpRelationManager implements RelationManager {
      * @param crs Candidates to be considered for results
      * @param collName Name of collection or null if querying all types
      */
-    private Results buildResults( final IndexScope indexScope, final Query query, 
+    private Results buildResults( final IndexScope indexScope, final Query query,
             final CandidateResults crs, final String collName ) {
 
-        logger.debug( "buildResults() for {} from {} candidates", collName, crs.size() );
-
-        //get an instance of our results loader
-        final ResultsLoader resultsLoader = this.resultsLoaderFactory.getLoader(
-                applicationScope, indexScope, query.getResultsLevel() );
-
-        //load the results
-        final Results results = resultsLoader.loadResults( crs );
-
-        //signal for post processing
-        resultsLoader.postProcess();
-
-
-        results.setCursor( crs.getCursor() );
-        results.setQueryProcessor( new CpQueryProcessor( em, query, headEntity, collName ) );
-
-        logger.debug( "Returning results size {}", results.size() );
-
-        return results;
-    }
-
-
-    @Override
-    public void batchUpdateSetIndexes( Mutator<ByteBuffer> batch, String setName, Object elementValue,
-                                       boolean removeFromSet, UUID timestampUuid ) throws Exception {
-
-        Entity entity = getHeadEntity();
-
-        elementValue = getDefaultSchema()
-                .validateEntitySetValue( entity.getType(), setName, elementValue );
-
-        IndexUpdate indexUpdate = batchStartIndexUpdate( batch, entity, setName, elementValue,
-                timestampUuid, true, true, removeFromSet, false );
-
-        // Update collections 
-
-        Map<String, Set<CollectionInfo>> containers =
-                getDefaultSchema().getContainersIndexingDictionary( entity.getType(), setName );
-
-        if ( containers != null ) {
-            Map<EntityRef, Set<String>> containerEntities = getContainers();
-            for ( EntityRef containerEntity : containerEntities.keySet() ) {
-                if ( containerEntity.getType().equals( TYPE_APPLICATION )
-                        && Schema.isAssociatedEntityType( entity.getType() ) ) {
-                    logger.debug( "Extended properties for {} not indexed by application", entity.getType() );
-                    continue;
-                }
-                Set<String> collectionNames = containerEntities.get( containerEntity );
-                Set<CollectionInfo> collections = containers.get( containerEntity.getType() );
-
-                if ( collections != null ) {
-
-                    for ( CollectionInfo collection : collections ) {
-                        if ( collectionNames.contains( collection.getName() ) ) {
-                            batchUpdateCollectionIndex( indexUpdate, containerEntity, collection.getName() );
-                        }
-                    }
-                }
-            }
-        }
-
-        batchUpdateBackwardConnectionsDictionaryIndexes( indexUpdate );
-    }
-
-
-    /**
-     * Batch update collection index.
-     *
-     * @param indexUpdate The update to apply
-     * @param owner The entity that is the owner context of this entity update. Can either be an
-     * application, or another * entity
-     * @param collectionName the collection name
-     *
-     * @return The indexUpdate with batch mutations
-     * @throws Exception the exception
-     */
-    @Metered( group = "core", name = "RelationManager_batchUpdateCollectionIndex" )
-    public IndexUpdate batchUpdateCollectionIndex(
-            IndexUpdate indexUpdate, EntityRef owner, String collectionName )
-            throws Exception {
-
-        logger.debug( "batchUpdateCollectionIndex" );
-
-        Entity indexedEntity = indexUpdate.getEntity();
-
-        String bucketId = indexBucketLocator
-                .getBucket( applicationId, IndexBucketLocator.IndexType.COLLECTION, indexedEntity.getUuid(),
-                        indexedEntity.getType(), indexUpdate.getEntryName() );
-
-        // the root name without the bucket
-        // entity_id,collection_name,prop_name,
-        Object index_name = null;
-        // entity_id,collection_name,prop_name, bucketId
-        Object index_key = null;
-
-        // entity_id,collection_name,collected_entity_id,prop_name
-
-        for ( IndexUpdate.IndexEntry entry : indexUpdate.getPrevEntries() ) {
-
-            if ( entry.getValue() != null ) {
-
-                index_name = key( owner.getUuid(), collectionName, entry.getPath() );
-
-                index_key = key( index_name, bucketId );
-
-                addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, index_key,
-                        entry.getIndexComposite(), indexUpdate.getTimestamp() );
-
-                if ( "location.coordinates".equals( entry.getPath() ) ) {
-                    EntityLocationRef loc = new EntityLocationRef( indexUpdate.getEntity(),
-                            entry.getTimestampUuid(), entry.getValue().toString() );
-                    batchRemoveLocationFromCollectionIndex( indexUpdate.getBatch(),
-                            indexBucketLocator, applicationId, index_name, loc );
-                }
-            }
-            else {
-                logger.error( "Unexpected condition - deserialized property value is null" );
-            }
-        }
-
-        if ( ( indexUpdate.getNewEntries().size() > 0 )
-                && ( !indexUpdate.isMultiValue()
-                || ( indexUpdate.isMultiValue() && !indexUpdate.isRemoveListEntry() ) ) ) {
-
-            for ( IndexUpdate.IndexEntry indexEntry : indexUpdate.getNewEntries() ) {
-
-                // byte valueCode = indexEntry.getValueCode();
-
-                index_name = key( owner.getUuid(), collectionName, indexEntry.getPath() );
-
-                index_key = key( index_name, bucketId );
-
-                // int i = 0;
-
-                addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, index_key,
-                        indexEntry.getIndexComposite(), null, indexUpdate.getTimestamp() );
-
-                if ( "location.coordinates".equals( indexEntry.getPath() ) ) {
-                    EntityLocationRef loc = new EntityLocationRef(
-                            indexUpdate.getEntity(),
-                            indexEntry.getTimestampUuid(),
-                            indexEntry.getValue().toString() );
-                    batchStoreLocationInCollectionIndex(
-                            indexUpdate.getBatch(),
-                            indexBucketLocator,
-                            applicationId,
-                            index_name,
-                            indexedEntity.getUuid(),
-                            loc );
-                }
-
-                // i++;
-            }
-        }
-
-        for ( String index : indexUpdate.getIndexesSet() ) {
-            addInsertToMutator( indexUpdate.getBatch(), ENTITY_DICTIONARIES,
-                    key( owner.getUuid(), collectionName, Schema.DICTIONARY_INDEXES ), index, null,
-                    indexUpdate.getTimestamp() );
-        }
-
-        return indexUpdate;
-    }
-
-
-    public IndexUpdate batchStartIndexUpdate(
-            Mutator<ByteBuffer> batch, Entity entity, String entryName,
-            Object entryValue, UUID timestampUuid, boolean schemaHasProperty,
-             boolean isMultiValue, boolean removeListEntry, boolean fulltextIndexed )
-            throws Exception {
-        return batchStartIndexUpdate( batch, entity, entryName, entryValue, timestampUuid,
-                schemaHasProperty, isMultiValue, removeListEntry, fulltextIndexed, false );
-    }
-
-
-    @Metered(group = "core", name = "RelationManager_batchStartIndexUpdate")
-    public IndexUpdate batchStartIndexUpdate(
-        Mutator<ByteBuffer> batch, Entity entity, String entryName,
-        Object entryValue, UUID timestampUuid, boolean schemaHasProperty,
-        boolean isMultiValue, boolean removeListEntry, boolean fulltextIndexed,
-            boolean skipRead ) throws Exception {
-
-        long timestamp = getTimestampInMicros( timestampUuid );
-
-        IndexUpdate indexUpdate = new IndexUpdate( batch, entity, entryName, entryValue,
-                schemaHasProperty, isMultiValue, removeListEntry, timestampUuid );
-
-        // entryName = entryName.toLowerCase();
-
-        // entity_id,connection_type,connected_entity_id,prop_name
-
-        if ( !skipRead ) {
-
-            List<HColumn<ByteBuffer, ByteBuffer>> entries = null;
-
-            if ( isMultiValue && validIndexableValue( entryValue ) ) {
-                entries = cass.getColumns(
-                    cass.getApplicationKeyspace( applicationId ),
-                        ENTITY_INDEX_ENTRIES,
-                        entity.getUuid(),
-                        new DynamicComposite(
-                            entryName,
-                            indexValueCode( entryValue ),
-                            toIndexableValue( entryValue ) ),
-                        setGreaterThanEqualityFlag(
-                            new DynamicComposite(
-                                entryName, indexValueCode( entryValue ),
-                                toIndexableValue( entryValue ) ) ),
-                        INDEX_ENTRY_LIST_COUNT,
-                        false );
-            }
-            else {
-                entries = cass.getColumns(
-                    cass.getApplicationKeyspace( applicationId ),
-                    ENTITY_INDEX_ENTRIES,
-                    entity.getUuid(),
-                    new DynamicComposite( entryName ),
-                    setGreaterThanEqualityFlag( new DynamicComposite( entryName ) ),
-                    INDEX_ENTRY_LIST_COUNT,
-                    false );
-            }
-
-            if ( logger.isDebugEnabled() ) {
-                logger.debug( "Found {} previous index entries for {} of entity {}", new Object[] {
-                        entries.size(), entryName, entity.getUuid()
-                } );
-            }
-
-            // Delete all matching entries from entry list
-            for ( HColumn<ByteBuffer, ByteBuffer> entry : entries ) {
-                UUID prev_timestamp = null;
-                Object prev_value = null;
-                String prev_obj_path = null;
-
-                // new format:
-                // composite(entryName,
-                // value_code,prev_value,prev_timestamp,prev_obj_path) = null
-                DynamicComposite composite =
-                        DynamicComposite.fromByteBuffer( entry.getName().duplicate() );
-                prev_value = composite.get( 2 );
-                prev_timestamp = ( UUID ) composite.get( 3 );
-                if ( composite.size() > 4 ) {
-                    prev_obj_path = ( String ) composite.get( 4 );
-                }
-
-                if ( prev_value != null ) {
-
-                    String entryPath = entryName;
-                    if ( ( prev_obj_path != null ) && ( prev_obj_path.length() > 0 ) ) {
-                        entryPath = entryName + "." + prev_obj_path;
-                    }
-
-                    indexUpdate.addPrevEntry(
-                            entryPath, prev_value, prev_timestamp, entry.getName().duplicate() );
-
-                    // composite(property_value,connected_entity_id,entry_timestamp)
-                    // addDeleteToMutator(batch, ENTITY_INDEX_ENTRIES,
-                    // entity.getUuid(), entry.getName(), timestamp);
-
-                }
-                else {
-                    logger.error( "Unexpected condition - deserialized property value is null" );
-                }
-            }
-        }
-
-        if ( !isMultiValue || ( isMultiValue && !removeListEntry ) ) {
-
-            List<Map.Entry<String, Object>> list =
-                    IndexUtils.getKeyValueList( entryName, entryValue, fulltextIndexed );
-
-            if ( entryName.equalsIgnoreCase( "location" ) && ( entryValue instanceof Map ) ) {
-                @SuppressWarnings( "rawtypes" ) double latitude =
-                        MapUtils.getDoubleValue( ( Map ) entryValue, "latitude" );
-                @SuppressWarnings( "rawtypes" ) double longitude =
-                        MapUtils.getDoubleValue( ( Map ) entryValue, "longitude" );
-                list.add( new AbstractMap.SimpleEntry<String, Object>( "location.coordinates",
-                        latitude + "," + longitude ) );
-            }
-
-            for ( Map.Entry<String, Object> indexEntry : list ) {
-
-                if ( validIndexableValue( indexEntry.getValue() ) ) {
-                    indexUpdate.addNewEntry(
-                            indexEntry.getKey(), toIndexableValue( indexEntry.getValue() ) );
-                }
-            }
-
-            if ( isMultiValue ) {
-                addInsertToMutator( batch, ENTITY_INDEX_ENTRIES, entity.getUuid(),
-                        asList( entryName,
-                            indexValueCode( entryValue ),
-                            toIndexableValue( entryValue ),
-                            indexUpdate.getTimestampUuid() ),
-                        null, timestamp );
-            }
-            else {
-                // int i = 0;
-
-                for ( Map.Entry<String, Object> indexEntry : list ) {
-
-                    String name = indexEntry.getKey();
-                    if ( name.startsWith( entryName + "." ) ) {
-                        name = name.substring( entryName.length() + 1 );
-                    }
-                    else if ( name.startsWith( entryName ) ) {
-                        name = name.substring( entryName.length() );
-                    }
-
-                    byte code = indexValueCode( indexEntry.getValue() );
-                    Object val = toIndexableValue( indexEntry.getValue() );
-                    addInsertToMutator( batch, ENTITY_INDEX_ENTRIES, entity.getUuid(),
-                            asList( entryName, code, val, indexUpdate.getTimestampUuid(), name ),
-                            null, timestamp );
-
-                    indexUpdate.addIndex( indexEntry.getKey() );
-                }
-            }
-
-            indexUpdate.addIndex( entryName );
-        }
-
-        return indexUpdate;
-    }
-
-
-    /**
-     * Batch update backward connections set indexes.
-     *
-     * @param indexUpdate The index to update in the dictionary
-     *
-     * @return The index update
-     *
-     * @throws Exception the exception
-     */
-    @Metered(group = "core", name = "RelationManager_batchUpdateBackwardConnectionsDictionaryIndexes")
-    public IndexUpdate batchUpdateBackwardConnectionsDictionaryIndexes(
-            IndexUpdate indexUpdate ) throws Exception {
-
-        logger.debug( "batchUpdateBackwardConnectionsListIndexes" );
-
-        boolean entityHasDictionary = getDefaultSchema()
-                .isDictionaryIndexedInConnections(
-                        indexUpdate.getEntity().getType(), indexUpdate.getEntryName() );
-
-        if ( !entityHasDictionary ) {
-            return indexUpdate;
-        }
-
-
-        return doBackwardConnectionsUpdate( indexUpdate );
-    }
-
-
-    /**
-     * Search each reverse connection type in the graph for connections.
-     * If one is found, update the index appropriately
-     *
-     * @param indexUpdate The index update to use
-     *
-     * @return The updated index update
-     */
-    private IndexUpdate doBackwardConnectionsUpdate( IndexUpdate indexUpdate ) throws Exception {
-        final Entity targetEntity = indexUpdate.getEntity();
-
-        logger.debug( "doBackwardConnectionsUpdate" );
-
-        final ConnectionTypesIterator connectionTypes =
-                new ConnectionTypesIterator( cass, applicationId, targetEntity.getUuid(), false, 100 );
-
-        for ( String connectionType : connectionTypes ) {
-
-            PagingResultsIterator itr =
-                    getReversedConnectionsIterator( targetEntity, connectionType );
-
-            for ( Object connection : itr ) {
-
-                final ConnectedEntityRef sourceEntity = ( ConnectedEntityRef ) connection;
-
-                //we need to create a connection ref from the source entity (found via reverse edge) 
-                // to the entity we're about to update.  This is the index that needs updated
-                final ConnectionRefImpl connectionRef =
-                        new ConnectionRefImpl( sourceEntity, connectionType, indexUpdate.getEntity() );
-
-                batchUpdateConnectionIndex( indexUpdate, connectionRef );
-            }
-        }
-
-        return indexUpdate;
-    }
-
-
-    /**
-     * Batch update connection index.
-     *
-     * @param indexUpdate The update operation to perform
-     * @param connection The connection to update
-     *
-     * @return The index with the batch mutation udpated
-     *
-     * @throws Exception the exception
-     */
-    @Metered(group = "core", name = "RelationManager_batchUpdateConnectionIndex")
-    public IndexUpdate batchUpdateConnectionIndex(
-            IndexUpdate indexUpdate, ConnectionRefImpl connection ) throws Exception {
-
-        logger.debug( "batchUpdateConnectionIndex" );
-
-        // UUID connection_id = connection.getUuid();
-
-        UUID[] index_keys = connection.getIndexIds();
-
-        // Delete all matching entries from entry list
-        for ( IndexUpdate.IndexEntry entry : indexUpdate.getPrevEntries() ) {
-
-            if ( entry.getValue() != null ) {
-
-                batchDeleteConnectionIndexEntries( indexUpdate, entry, connection, index_keys );
-
-                if ( "location.coordinates".equals( entry.getPath() ) ) {
-                    EntityLocationRef loc =
-                        new EntityLocationRef( indexUpdate.getEntity(), entry.getTimestampUuid(),
-                        entry.getValue().toString() );
-                    batchDeleteLocationInConnectionsIndex(
-                        indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                        index_keys, entry.getPath(), loc );
-                }
-            }
-            else {
-                logger.error( "Unexpected condition - deserialized property value is null" );
-            }
-        }
-
-        if ( ( indexUpdate.getNewEntries().size() > 0 )
-                && ( !indexUpdate.isMultiValue() || ( indexUpdate.isMultiValue()
-                && !indexUpdate.isRemoveListEntry() ) ) ) {
-
-            for ( IndexUpdate.IndexEntry indexEntry : indexUpdate.getNewEntries() ) {
-
-                batchAddConnectionIndexEntries( indexUpdate, indexEntry, connection, index_keys );
-
-                if ( "location.coordinates".equals( indexEntry.getPath() ) ) {
-                    EntityLocationRef loc =
-                            new EntityLocationRef(
-                        indexUpdate.getEntity(),
-                        indexEntry.getTimestampUuid(),
-                        indexEntry.getValue().toString() );
-                    batchStoreLocationInConnectionsIndex(
-                            indexUpdate.getBatch(), indexBucketLocator, applicationId,
-                            index_keys, indexEntry.getPath(), loc );
-                }
-            }
-
-      /*
-       * addInsertToMutator(batch, EntityCF.SETS, key(connection_id,
-       * Schema.INDEXES_SET), indexEntry.getKey(), null, false, timestamp); }
-       *
-       * addInsertToMutator(batch, EntityCF.SETS, key(connection_id,
-       * Schema.INDEXES_SET), entryName, null, false, timestamp);
-       */
-        }
-
-        for ( String index : indexUpdate.getIndexesSet() ) {
-            addInsertToMutator( indexUpdate.getBatch(), ENTITY_DICTIONARIES,
-                    key( connection.getConnectingIndexId(), Schema.DICTIONARY_INDEXES), index, null,
-                    indexUpdate.getTimestamp() );
-        }
-
-        return indexUpdate;
-    }
-
-
-    /**
-     * Get a paging results iterator.  Should return an iterator for all results
-     *
-     * @param targetEntity The target entity search connections from
-     *
-     * @return connectionType The name of the edges to search
-     */
-    private PagingResultsIterator getReversedConnectionsIterator(
-            EntityRef targetEntity, String connectionType ) throws Exception {
-
-        return new PagingResultsIterator(
-                getConnectingEntities( targetEntity, connectionType, null, Level.REFS ) );
-    }
-
-
-    /**
-     * Get all edges that are to the targetEntity
-     *
-     * @param targetEntity The target entity to search edges in
-     * @param connectionType The type of connection.  If not specified, all connections are returned
-     * @param connectedEntityType The connected entity type, if not specified all types are returned
-     * @param resultsLevel The results level to return
-     */
-    private Results getConnectingEntities(
-            EntityRef targetEntity, String connectionType, String connectedEntityType,
-            Level resultsLevel ) throws Exception {
-
-        return getConnectingEntities(
-                targetEntity, connectionType, connectedEntityType, resultsLevel, 0);
-    }
-
-
-    /**
-     * Get all edges that are to the targetEntity
-     *
-     * @param targetEntity The target entity to search edges in
-     * @param connectionType The type of connection.  If not specified, all connections are returned
-     * @param connectedEntityType The connected entity type, if not specified all types are returned
-     * @param count result limit
-     */
-    private Results getConnectingEntities( EntityRef targetEntity, String connectionType,
-            String connectedEntityType, Level level, int count) throws Exception {
-
-        Query query = new Query();
-        query.setResultsLevel( level );
-        query.setLimit( count );
-
-        final ConnectionRefImpl connectionRef = new ConnectionRefImpl(
-                new SimpleEntityRef( connectedEntityType, null ), connectionType, targetEntity );
-        final ConnectionResultsLoaderFactory factory =
-                new ConnectionResultsLoaderFactory( connectionRef );
-
-        QueryProcessorImpl qp = new QueryProcessorImpl( query, null, em, factory );
-        SearchConnectionVisitor visitor = new SearchConnectionVisitor( qp, connectionRef, false );
-
-        return qp.getResults( visitor );
-    }
-
-
-    @Metered( group = "core", name = "RelationManager_batchDeleteConnectionIndexEntries" )
-    public Mutator<ByteBuffer> batchDeleteConnectionIndexEntries(
-            IndexUpdate indexUpdate,
-            IndexUpdate.IndexEntry entry,
-            ConnectionRefImpl connection,
-            UUID[] index_keys ) throws Exception {
-
-        logger.debug( "batchDeleteConnectionIndexEntries" );
-
-        // entity_id,prop_name
-        Object property_index_key = key( index_keys[ConnectionRefImpl.ALL], INDEX_CONNECTIONS, entry.getPath(),
-                indexBucketLocator.getBucket( applicationId, IndexBucketLocator.IndexType.CONNECTION,
-                        index_keys[ConnectionRefImpl.ALL], entry.getPath() ) );
-
-        // entity_id,entity_type,prop_name
-        Object entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexBucketLocator.IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], entry.getPath() ) );
-
-        // entity_id,connection_type,prop_name
-        Object connection_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexBucketLocator.IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], entry.getPath() ) );
-
-        // entity_id,connection_type,entity_type,prop_name
-        Object connection_type_and_entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexBucketLocator.IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], entry.getPath() ) );
-
-        // composite(property_value,connected_entity_id,connection_type,entity_type,entry_timestamp)
-        addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, property_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId(), connection.getConnectionType(),
-                        connection.getConnectedEntityType() ), indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,connection_type,entry_timestamp)
-        addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, entity_type_prop_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId(), connection.getConnectionType() ),
-                indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,entity_type,entry_timestamp)
-        addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, connection_type_prop_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId(), connection.getConnectedEntityType() ),
-                indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,entry_timestamp)
-        addDeleteToMutator( indexUpdate.getBatch(), ENTITY_INDEX, connection_type_and_entity_type_prop_index_key,
-                entry.getIndexComposite( connection.getConnectedEntityId() ), indexUpdate.getTimestamp() );
-
-        return indexUpdate.getBatch();
-    }
-
-
-    @Metered( group = "core", name = "RelationManager_batchAddConnectionIndexEntries" )
-    public Mutator<ByteBuffer> batchAddConnectionIndexEntries( IndexUpdate indexUpdate, IndexUpdate.IndexEntry entry,
-                                                               ConnectionRefImpl conn, UUID[] index_keys ) {
-
-        logger.debug( "batchAddConnectionIndexEntries" );
-
-        // entity_id,prop_name
-        Object property_index_key = key( index_keys[ConnectionRefImpl.ALL],
-                INDEX_CONNECTIONS, entry.getPath(),
-                indexBucketLocator.getBucket( applicationId,
-                        IndexBucketLocator.IndexType.CONNECTION, index_keys[ConnectionRefImpl.ALL],
-                        entry.getPath() ) );
-
-        // entity_id,entity_type,prop_name
-        Object entity_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexBucketLocator.IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_ENTITY_TYPE], entry.getPath() ) );
-
-        // entity_id,connection_type,prop_name
-        Object connection_type_prop_index_key =
-                key( index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexBucketLocator.IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_CONNECTION_TYPE], entry.getPath() ) );
-
-        // entity_id,connection_type,entity_type,prop_name
-        Object connection_type_and_entity_type_prop_index_key =
-            key( index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE],
-                INDEX_CONNECTIONS, entry.getPath(),
-                        indexBucketLocator.getBucket( applicationId, IndexBucketLocator.IndexType.CONNECTION,
-                                index_keys[ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE], entry.getPath() ) );
-
-        // composite(property_value,connected_entity_id,connection_type,entity_type,entry_timestamp)
-        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, property_index_key,
-                entry.getIndexComposite( conn.getConnectedEntityId(), conn.getConnectionType(),
-                        conn.getConnectedEntityType() ), conn.getUuid(), indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,connection_type,entry_timestamp)
-        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, entity_type_prop_index_key,
-            entry.getIndexComposite( conn.getConnectedEntityId(), conn.getConnectionType() ),
-            conn.getUuid(), indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,entity_type,entry_timestamp)
-        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX, connection_type_prop_index_key,
-            entry.getIndexComposite( conn.getConnectedEntityId(), conn.getConnectedEntityType() ),
-            conn.getUuid(), indexUpdate.getTimestamp() );
-
-        // composite(property_value,connected_entity_id,entry_timestamp)
-        addInsertToMutator( indexUpdate.getBatch(), ENTITY_INDEX,
-            connection_type_and_entity_type_prop_index_key,
-            entry.getIndexComposite( conn.getConnectedEntityId() ), conn.getUuid(),
-            indexUpdate.getTimestamp() );
-
-        return indexUpdate.getBatch();
-    }
-
-
-    /**
-     * Simple search visitor that performs all the joining
-     *
-     * @author tnine
-     */
-    private class SearchConnectionVisitor extends SearchVisitor {
-
-        private final ConnectionRefImpl connection;
-
-        /** True if we should search from source->target edges.
-         * False if we should search from target<-source edges */
-        private final boolean outgoing;
-
-
-        /**
-         * @param queryProcessor They query processor to use
-         * @param connection The connection refernce
-         * @param outgoing The direction to search.  True if we should search from source->target
-         * edges.  False if we * should search from target<-source edges
-         */
-        public SearchConnectionVisitor( QueryProcessorImpl queryProcessor, ConnectionRefImpl connection,
-                                        boolean outgoing ) {
-            super( queryProcessor );
-            this.connection = connection;
-            this.outgoing = outgoing;
-        }
-
-
-        /* (non-Javadoc)
-     * @see org.apache.usergrid.persistence.query.ir.SearchVisitor#secondaryIndexScan(org.apache.usergrid.persistence
-     * .query.ir
-     * .QueryNode, org.apache.usergrid.persistence.query.ir.QuerySlice)
-     */
-        @Override
-        protected IndexScanner secondaryIndexScan( QueryNode node, QuerySlice slice ) throws Exception {
-
-            UUID id = ConnectionRefImpl.getIndexId(
-                    ConnectionRefImpl.BY_CONNECTION_AND_ENTITY_TYPE,
-                    headEntity,
-                    connection.getConnectionType(),
-                    connection.getConnectedEntityType(),
-                    new ConnectedEntityRef[0] );
-
-            Object key = key( id, INDEX_CONNECTIONS );
-
-            // update the cursor and order before we perform the slice
-            // operation
-            queryProcessor.applyCursorAndSort( slice );
-
-            IndexScanner columns = null;
-
-            if ( slice.isComplete() ) {
-                columns = new NoOpIndexScanner();
-            }
-            else {
-                columns = searchIndex( key, slice, queryProcessor.getPageSizeHint( node ) );
-            }
-
-            return columns;
-        }
-
-
-        /*
-     * (non-Javadoc)
-     *
-     * @see org.apache.usergrid.persistence.query.ir.NodeVisitor#visit(org.apache.usergrid.
-     * persistence.query.ir.WithinNode)
-     */
-        @Override
-        public void visit( WithinNode node ) throws Exception {
-
-            QuerySlice slice = node.getSlice();
-
-            queryProcessor.applyCursorAndSort( slice );
-
-            GeoIterator itr = new GeoIterator(
-                new ConnectionGeoSearch( em, indexBucketLocator, cass, connection.getIndexId() ),
-                query.getLimit(),
-                slice,
-                node.getPropertyName(),
-                new Point( node.getLattitude(), node.getLongitude() ),
-                node.getDistance() );
-
-            results.push( itr );
-        }
-
 
-        @Override
-        public void visit( AllNode node ) throws Exception {
-            QuerySlice slice = node.getSlice();
-
-            queryProcessor.applyCursorAndSort( slice );
-
-            int size = queryProcessor.getPageSizeHint( node );
-
-            ByteBuffer start = null;
-
-            if ( slice.hasCursor() ) {
-                start = slice.getCursor();
-            }
-
-
-            boolean skipFirst = !node.isForceKeepFirst() && slice.hasCursor();
-
-            UUID entityIdToUse;
-
-            //change our type depending on which direction we're loading
-            String dictionaryType;
-
-            //the target type
-            String targetType;
-
-            //this is on the "source" side of the edge
-            if ( outgoing ) {
-                entityIdToUse = connection.getConnectingEntityId();
-                dictionaryType = DICTIONARY_CONNECTED_ENTITIES;
-                targetType = connection.getConnectedEntityType();
-            }
-
-            //we're on the target side of the edge
-            else {
-                entityIdToUse = connection.getConnectedEntityId();
-                dictionaryType = DICTIONARY_CONNECTING_ENTITIES;
-                targetType = connection.getConnectingEntityType();
-            }
-
-            final String connectionType = connection.getConnectionType();
-
-            final ConnectionIndexSliceParser connectionParser = new ConnectionIndexSliceParser( targetType );
-
-            final Iterator<String> connectionTypes;
-
-            //use the provided connection type
-            if ( connectionType != null ) {
-                connectionTypes = Collections.singleton( connectionType ).iterator();
-            }
-
-            //we need to iterate all connection types
-            else {
-                connectionTypes = new ConnectionTypesIterator(
-                        cass, applicationId, entityIdToUse, outgoing, size );
-            }
-
-            IndexScanner connectionScanner = new ConnectedIndexScanner(
-                    cass,
-                    dictionaryType,
-                    applicationId,
-                    entityIdToUse,
-                    connectionTypes,
-                    start,
-                    slice.isReversed(),
-                    size,
-                    skipFirst );
-
-            this.results.push( new SliceIterator( slice, connectionScanner, connectionParser ) );
-        }
-
-
-        @Override
-        public void visit( NameIdentifierNode nameIdentifierNode ) throws Exception {
-
-            //TODO T.N. USERGRID-1919 actually validate this is connected
-            EntityRef ref = em.getAlias( connection.getConnectedEntityType(), nameIdentifierNode.getName() );
-
-            if ( ref == null ) {
-                this.results.push( new EmptyIterator() );
-                return;
-            }
-
-            this.results.push( new StaticIdIterator( ref.getUuid() ) );
-        }
+        throw new UnsupportedOperationException( "Use a reducer to create results" );
+//        logger.debug( "buildResults() for {} from {} candidates", collName, crs.size() );
+//
+//        //get an instance of our results loader
+//        final ResultsLoader resultsLoader = this.resultsLoaderFactory.getLoader(
+//                applicationScope, indexScope, query.getResultsLevel() );
+//
+//        //load the results
+//        final Results results = resultsLoader.loadResults( crs );
+//
+//        //signal for post processing
+//        resultsLoader.postProcess();
+//
+//
+//        results.setCursor( crs.getCursor() );
+//        results.setQueryProcessor( new CpQueryProcessor( em, query, headEntity, collName ) );
+//
+//        logger.debug( "Returning results size {}", results.size() );
+//
+//        return results;
     }
 
 
-    private IndexScanner searchIndex( Object indexKey, QuerySlice slice, int pageSize ) throws Exception {
-
-        DynamicComposite[] range = slice.getRange();
 
-        Object keyPrefix = key( indexKey, slice.getPropertyName() );
 
-        IndexScanner scanner = new IndexBucketScanner(
-                cass,
-                indexBucketLocator,
-                ENTITY_INDEX,
-                applicationId,
-                IndexBucketLocator.IndexType.CONNECTION,
-                keyPrefix,
-                range[0],
-                range[1],
-                slice.isReversed(),
-                pageSize,
-                slice.hasCursor(),
-                slice.getPropertyName() );
-
-        return scanner;
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/corepersistence/HybridEntityManagerFactory.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/HybridEntityManagerFactory.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/HybridEntityManagerFactory.java
deleted file mode 100644
index a8dd404..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/HybridEntityManagerFactory.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Copyright 2014 The Apache Software Foundation.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.usergrid.corepersistence;
-
-import java.util.Map;
-import java.util.UUID;
-import org.apache.usergrid.persistence.EntityManager;
-import org.apache.usergrid.persistence.EntityManagerFactory;
-import org.apache.usergrid.persistence.cassandra.CassandraService;
-import org.apache.usergrid.persistence.cassandra.CounterUtils;
-import org.apache.usergrid.persistence.cassandra.EntityManagerFactoryImpl;
-import org.apache.usergrid.persistence.core.util.Health;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.BeansException;
-import org.springframework.context.ApplicationContext;
-import org.springframework.context.ApplicationContextAware;
-
-
-/**
- * Can read from either old EntityManagerImpl or new CpEntityManager, can write to either or both.
- */
-public class HybridEntityManagerFactory implements EntityManagerFactory, ApplicationContextAware {
-    private static final Logger logger = LoggerFactory.getLogger( CpEntityManagerFactory.class );
-    private final EntityManagerFactory factory;
-
-
-    public HybridEntityManagerFactory( 
-            CassandraService cass, CounterUtils counterUtils, boolean skipAggCounters ) {
-
-        boolean useCP = cass.getPropertiesMap().get("usergrid.persistence").equals("CP");
-        if ( useCP ) {
-            logger.info("HybridEntityManagerFactory: configured for New Core Persistence engine");
-            factory = new CpEntityManagerFactory(cass, counterUtils );
-        } else {
-            logger.info("HybridEntityManagerFactory: configured for Classic Usergrid persistence");
-            factory = new EntityManagerFactoryImpl( cass, counterUtils, skipAggCounters );
-        }
-    }
-
-    public EntityManagerFactory getImplementation() {
-        return factory; 
-    }
-
-    @Override
-    public String getImplementationDescription() throws Exception {
-        return factory.getImplementationDescription();
-    }
-
-    @Override
-    public EntityManager getEntityManager(UUID applicationId) {
-        return factory.getEntityManager(applicationId);
-    }
-
-    @Override
-    public UUID createApplication(String organizationName, String name) throws Exception {
-        return factory.createApplication(organizationName, name);
-    }
-
-    @Override
-    public UUID createApplication(String organizationName, String name, 
-            Map<String, Object> properties) throws Exception {
-        return factory.createApplication(organizationName, name, properties);
-    }
-
-    @Override
-    public void deleteApplication(UUID applicationId) throws Exception {
-        factory.deleteApplication( applicationId );
-    }
-
-    @Override
-    public UUID importApplication(String organization, UUID applicationId, String name, 
-            Map<String, Object> properties) throws Exception {
-        return factory.importApplication(organization, applicationId, name, properties);
-    }
-
-    @Override
-    public UUID lookupApplication(String name) throws Exception {
-        return factory.lookupApplication(name);
-    }
-
-    @Override
-    public Map<String, UUID> getApplications() throws Exception {
-        return factory.getApplications();
-    }
-
-    @Override
-    public void setup() throws Exception {
-        factory.setup();
-    }
-
-    @Override
-    public Map<String, String> getServiceProperties() {
-        return factory.getServiceProperties();
-    }
-
-    @Override
-    public boolean updateServiceProperties(Map<String, String> properties) {
-        return factory.updateServiceProperties(properties);
-    }
-
-    @Override
-    public boolean setServiceProperty(String name, String value) {
-        return factory.setServiceProperty(name, value);
-    }
-
-    @Override
-    public boolean deleteServiceProperty(String name) {
-        return factory.deleteServiceProperty(name);
-    }
-
-    @Override
-    public UUID initializeApplication(String orgName, UUID appId, String appName, 
-            Map<String, Object> props) throws Exception {
-        return factory.initializeApplication(orgName, appId, appName, props);
-    }
-
-    @Override
-    public UUID getManagementAppId() {
-        return factory.getManagementAppId();
-    }
-
-    @Override
-    public UUID getDefaultAppId() {
-        return factory.getDefaultAppId();
-    }
-
-    @Override
-    public void refreshIndex() {
-        factory.refreshIndex();
-    }
-
-    @Override
-    public void setApplicationContext(ApplicationContext ac) throws BeansException {
-        factory.setApplicationContext(ac);
-    }
-
-
-    @Override
-    public long performEntityCount() {
-        return factory.performEntityCount();
-    }
-
-
-    @Override
-    public void flushEntityManagerCaches() {
-        factory.flushEntityManagerCaches();
-    }
-
-    @Override
-    public void rebuildInternalIndexes(ProgressObserver po) throws Exception {
-        factory.rebuildInternalIndexes(po);
-    }
-
-    @Override
-    public void rebuildAllIndexes(ProgressObserver po) throws Exception {
-        factory.rebuildAllIndexes(po);
-    }
-
-    @Override
-    public void rebuildApplicationIndexes(UUID appId, ProgressObserver po) throws Exception {
-        factory.rebuildApplicationIndexes(appId, po);
-    }
-
-
-    @Override
-    public void migrateData() throws Exception {
-        factory.migrateData();
-    }
-
-
-    @Override
-    public String getMigrateDataStatus() {
-        return factory.getMigrateDataStatus();
-    }
-
-
-    @Override
-    public int getMigrateDataVersion() {
-        return factory.getMigrateDataVersion();
-    }
-
-
-    @Override
-    public void setMigrationVersion( final int version ) {
-        factory.setMigrationVersion( version );
-    }
-
-
-    @Override
-    public void rebuildCollectionIndex(UUID appId, String collection, ProgressObserver po) {
-        factory.rebuildCollectionIndex(appId, collection, po);
-    }
-
-    @Override
-    public void addIndex(UUID appId, String suffix,final int shards,final int replicas) {
-        factory.addIndex(appId,suffix,shards,replicas);
-    }
-
-    @Override
-    public Health getEntityStoreHealth() {
-        return factory.getEntityStoreHealth();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/bd743734/stack/core/src/main/java/org/apache/usergrid/corepersistence/HybridSetup.java
----------------------------------------------------------------------
diff --git a/stack/core/src/main/java/org/apache/usergrid/corepersistence/HybridSetup.java b/stack/core/src/main/java/org/apache/usergrid/corepersistence/HybridSetup.java
deleted file mode 100644
index 963f8db..0000000
--- a/stack/core/src/main/java/org/apache/usergrid/corepersistence/HybridSetup.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright 2014 The Apache Software Foundation.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.usergrid.corepersistence;
-
-import java.util.Properties;
-import java.util.UUID;
-import org.apache.usergrid.persistence.EntityManagerFactory;
-import org.apache.usergrid.persistence.cassandra.CassandraService;
-import org.apache.usergrid.persistence.cassandra.Setup;
-import org.apache.usergrid.persistence.cassandra.SetupImpl;
-
-
-public class HybridSetup implements Setup {
-
-    private final Setup setup;
-
-    public HybridSetup(Properties props, EntityManagerFactory emf, CassandraService cass ) {
-        
-        boolean useCP = cass.getPropertiesMap().get("usergrid.persistence").equals("CP");
-        if ( useCP ) {
-            setup = new CpSetup( emf, cass);
-        } else {
-            setup = new SetupImpl( emf, cass );
-        }
-
-    }
-
-    @Override
-    public void init() throws Exception {
-        setup.init();
-    }
-
-    @Override
-    public void setupSystemKeyspace() throws Exception {
-        setup.setupSystemKeyspace();
-    }
-
-    @Override
-    public void setupStaticKeyspace() throws Exception {
-        setup.setupStaticKeyspace();
-    }
-
-    @Override
-    public boolean keyspacesExist() {
-        return setup.keyspacesExist();
-    }
-
-    @Override
-    public void createDefaultApplications() throws Exception {
-        setup.createDefaultApplications();
-    }
-
-    @Override
-    public void setupApplicationKeyspace(UUID applicationId, String appName) throws Exception {
-        setup.setupApplicationKeyspace(applicationId, appName);
-    }
-    
-}