You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@rya.apache.org by ca...@apache.org on 2017/04/24 15:06:19 UTC

[1/9] incubator-rya git commit: RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.

Repository: incubator-rya
Updated Branches:
  refs/heads/master be9ea9a37 -> c941aea8b


http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/rya.pcj.functions.geo/src/test/java/org/apache/rya/indexing/pcj/functions/geo/GeoFunctionsIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/rya.pcj.functions.geo/src/test/java/org/apache/rya/indexing/pcj/functions/geo/GeoFunctionsIT.java b/extras/rya.pcj.fluo/rya.pcj.functions.geo/src/test/java/org/apache/rya/indexing/pcj/functions/geo/GeoFunctionsIT.java
index 97873bf..319e5b9 100644
--- a/extras/rya.pcj.fluo/rya.pcj.functions.geo/src/test/java/org/apache/rya/indexing/pcj/functions/geo/GeoFunctionsIT.java
+++ b/extras/rya.pcj.fluo/rya.pcj.functions.geo/src/test/java/org/apache/rya/indexing/pcj/functions/geo/GeoFunctionsIT.java
@@ -18,53 +18,56 @@
  */
 package org.apache.rya.indexing.pcj.functions.geo;
 
+import static java.util.Objects.requireNonNull;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.rya.api.domain.RyaStatement;
-import org.apache.rya.api.domain.RyaType;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
-import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
-import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
+import javax.xml.datatype.DatatypeFactory;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.junit.Test;
+import org.openrdf.model.Statement;
+import org.openrdf.model.URI;
 import org.openrdf.model.Value;
 import org.openrdf.model.ValueFactory;
-import org.openrdf.model.impl.LiteralImpl;
-import org.openrdf.model.impl.URIImpl;
+import org.openrdf.model.impl.ValueFactoryImpl;
 import org.openrdf.query.BindingSet;
 import org.openrdf.query.algebra.evaluation.ValueExprEvaluationException;
 import org.openrdf.query.algebra.evaluation.function.Function;
 import org.openrdf.query.algebra.evaluation.function.FunctionRegistry;
-import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.query.impl.MapBindingSet;
+import org.openrdf.repository.sail.SailRepositoryConnection;
 
-import com.google.common.base.Optional;
 import com.google.common.collect.Sets;
 
 /**
  * Performs integration tests PCJ Geospatial functions in SPARQL.
  * Each test starts a Accumulo/Rya/Fluo single node stack and runs a continuous query, checking results.
  */
-public class GeoFunctionsIT extends ITBase {
+public class GeoFunctionsIT extends RyaExportITBase {
 
     @Test
     public void verifySpiLoadedGeoFunctions() {
-        final String functions[] = { "distance", //
-                "convexHull", "boundary", "envelope", "union", "intersection", "symDifference", "difference", //
-                "relate", /* "equals", */ "sfDisjoint", "sfIntersects", "sfTouches", "sfCrosses", //
-                "sfWithin", "sfContains", "sfOverlaps", "ehDisjoint", "ehMeet", "ehOverlap", //
-                "ehCovers", "ehCoveredBy", "ehInside", "ehContains", "rcc8dc", "rcc8ec", //
-                "rcc8po", "rcc8tppi", "rcc8tpp", "rcc8ntpp", "rcc8ntppi" }; //
-        HashSet<String> functionsCheckList = new HashSet<String>();
+        final String functions[] = { "distance", "convexHull", "boundary", "envelope", "union", "intersection",
+                "symDifference", "difference", "relate", "sfDisjoint", "sfIntersects", "sfTouches", "sfCrosses",
+                "sfWithin", "sfContains", "sfOverlaps", "ehDisjoint", "ehMeet", "ehOverlap", "ehCovers", "ehCoveredBy",
+                "ehInside", "ehContains", "rcc8dc", "rcc8ec", "rcc8po", "rcc8tppi", "rcc8tpp", "rcc8ntpp", "rcc8ntppi" };
+        final HashSet<String> functionsCheckList = new HashSet<>();
         functionsCheckList.addAll(Arrays.asList(functions));
-        for (String f : FunctionRegistry.getInstance().getKeys()) {
-            String functionShortName = f.replaceFirst("^.*/geosparql/(.*)", "$1");
-            // System.out.println("Registered function: " + f + " shortname: " + functionShortName);
+        for (final String f : FunctionRegistry.getInstance().getKeys()) {
+            final String functionShortName = f.replaceFirst("^.*/geosparql/(.*)", "$1");
             functionsCheckList.remove(functionShortName);
         }
         assertTrue("Missed loading these functions via SPI: " + functionsCheckList, functionsCheckList.isEmpty());
@@ -72,122 +75,104 @@ public class GeoFunctionsIT extends ITBase {
 
     @Test
     public void withGeoFilters() throws Exception {
-        final String geoWithinSelect = "PREFIX geo: <http://www.opengis.net/ont/geosparql#> "//
-                        + "PREFIX ryageo: <tag:rya.apache.org,2017:function/geo#> "//
-                        + "PREFIX geof: <http://www.opengis.net/def/function/geosparql/> "//
-                        + "SELECT ?feature ?point ?wkt " //
-                        + "{" //
-                        + " ?feature a geo:Feature . "//
-                        + " ?feature geo:hasGeometry ?point . "//
-                        + " ?point a geo:Point . "//
-                        + " ?point geo:asWKT ?wkt . "//
-                        + " FILTER(ryageo:ehContains(?wkt, \"POLYGON((-77 39, -76 39, -76 38, -77 38, -77 39))\"^^geo:wktLiteral)) " //
-                        + "}";//
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(makeRyaStatement("tag:rya.apache.org,2017:ex#feature", "http://www.w3.org/1999/02/22-rdf-syntax-ns#type", "http://www.opengis.net/ont/geosparql#Feature"), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#feature", "http://www.opengis.net/ont/geosparql#hasGeometry", "tag:rya.apache.org,2017:ex#test_point"), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#test_point", "http://www.w3.org/1999/02/22-rdf-syntax-ns#type", "http://www.opengis.net/ont/geosparql#Point"), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#test_point", "http://www.opengis.net/ont/geosparql#asWKT", new RyaType(new URIImpl("http://www.opengis.net/ont/geosparql#wktLiteral"), "Point(-77.03524 38.889468)")) //
-        );
-
-        Function fooFunction = new Function() {
+        final String sparql =
+                "PREFIX geo: <http://www.opengis.net/ont/geosparql#> " +
+                "PREFIX ryageo: <tag:rya.apache.org,2017:function/geo#> " +
+                "PREFIX geof: <http://www.opengis.net/def/function/geosparql/> " +
+                "SELECT ?feature ?point ?wkt {" +
+                    " ?feature a geo:Feature . " +
+                    " ?feature geo:hasGeometry ?point . " +
+                    " ?point a geo:Point . " +
+                    " ?point geo:asWKT ?wkt . " +
+                    " FILTER(ryageo:ehContains(?wkt, \"POLYGON((-77 39, -76 39, -76 38, -77 38, -77 39))\"^^geo:wktLiteral)) " +
+                "}";
+
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Set<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#feature"), vf.createURI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), vf.createURI("http://www.opengis.net/ont/geosparql#Feature")),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#feature"), vf.createURI("http://www.opengis.net/ont/geosparql#hasGeometry"), vf.createURI("tag:rya.apache.org,2017:ex#test_point")),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#test_point"), vf.createURI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), vf.createURI("http://www.opengis.net/ont/geosparql#Point")),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#test_point"), vf.createURI("http://www.opengis.net/ont/geosparql#asWKT"), vf.createLiteral("Point(-77.03524 38.889468)", vf.createURI("http://www.opengis.net/ont/geosparql#wktLiteral"))));
+
+        // Create a Geo function.
+        final Function geoFunction = new Function() {
             @Override
             public String getURI() {
                 return "tag:rya.apache.org,2017:function/geo#ehContains";
             }
 
             @Override
-            public Value evaluate(ValueFactory valueFactory, Value... args) throws ValueExprEvaluationException {
-
+            public Value evaluate(final ValueFactory valueFactory, final Value... args) throws ValueExprEvaluationException {
                 if (args.length != 2) {
                     throw new ValueExprEvaluationException(getURI() + " requires exactly 3 arguments, got " + args.length);
                 }
-                // SpatialContext spatialContext = (new SpatialContextFactory()).newSpatialContext();
-                // Shape shape1 = org.eclipse.rdf4j.query.algebra.evaluation.function.geosparql.FunctionArguments() .getShape(this, args[0], spatialContext);
-                // Shape shape2 = FunctionArguments.getShape(this, args[1], spatialContext);
-                // //https://github.com/eclipse/rdf4j/blob/master/core/queryalgebra/geosparql/src/main/java/org/eclipse/rdf4j/query/algebra/evaluation/function/geosparql/SpatialSupport.java
-                // boolean result = SpatialSupport.getSpatialAlgebra().ehContains(shape1, shape2);
-                // return valueFactory.createLiteral(result);
                 return valueFactory.createLiteral(true);
             }
         };
 
         // Add our new function to the registry
-        FunctionRegistry.getInstance().add(fooFunction);
+        FunctionRegistry.getInstance().add(geoFunction);
 
         // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(//
-                        new BindingImpl("wkt", new LiteralImpl("Point(-77.03524 38.889468)", new URIImpl("http://www.opengis.net/ont/geosparql#wktLiteral"))), //
-                        new BindingImpl("feature", new URIImpl("tag:rya.apache.org,2017:ex#feature")), //
-                        new BindingImpl("point", new URIImpl("tag:rya.apache.org,2017:ex#test_point"))));
-
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(geoWithinSelect);
-
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String> absent());
-
-        // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, geoWithinSelect);
-        assertEquals(expected, results);
+        final Set<BindingSet> expectedResults = new HashSet<>();
+        final MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("wkt", vf.createLiteral("Point(-77.03524 38.889468)", vf.createURI("http://www.opengis.net/ont/geosparql#wktLiteral")));
+        bs.addBinding("feature", vf.createURI("tag:rya.apache.org,2017:ex#feature"));
+        bs.addBinding("point", vf.createURI("tag:rya.apache.org,2017:ex#test_point"));
+        expectedResults.add(bs);
+
+        runTest(sparql, statements, expectedResults);
     }
 
     @Test
     public void GeoDistance() throws Exception {
-        String geoCitySelect = "PREFIX geo: <http://www.opengis.net/ont/geosparql#> " //
-                        + "PREFIX geof: <http://www.opengis.net/def/function/geosparql/> " //
-                        + "PREFIX uom: <http://www.opengis.net/def/uom/OGC/1.0/> " //
-                        + "SELECT ?cityA ?cityB " //
-                        // + "SELECT ?cityA ?cityB ?dist " //
-                        + "WHERE { ?cityA geo:asWKT ?coord1 . " //
-                        + "        ?cityB geo:asWKT ?coord2 . " //
-                        // + " BIND( (geof:distance(?coord1, ?coord2, uom:metre) / 1000) as ?dist) . " // currently not supported
-                        + " FILTER ( 500000 > geof:distance(?coord1, ?coord2, uom:metre)  ) . " // from brussels 173km to amsterdam
-                        + " FILTER ( !sameTerm (?cityA, ?cityB) ) }"; //
-
-        final URIImpl wktTypeUri = new URIImpl("http://www.opengis.net/ont/geosparql#wktLiteral");
-        final String asWKT = "http://www.opengis.net/ont/geosparql#asWKT";
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(//
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#dakar", asWKT, new RyaType(wktTypeUri, "Point(-17.45 14.69)")), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#dakar2", asWKT, new RyaType(wktTypeUri, "Point(-17.45 14.69)")), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#canberra", asWKT, new RyaType(wktTypeUri, "Point(149.12 -35.31)")), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#brussels", asWKT, new RyaType(wktTypeUri, "Point(4.35 50.85)")), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#amsterdam", asWKT, new RyaType(wktTypeUri, "Point(4.9 52.37)")) //
-        );
+        final String sparql =
+                "PREFIX geo: <http://www.opengis.net/ont/geosparql#> " +
+                "PREFIX geof: <http://www.opengis.net/def/function/geosparql/> " +
+                "PREFIX uom: <http://www.opengis.net/def/uom/OGC/1.0/> " +
+                "SELECT ?cityA ?cityB " +
+                "WHERE { " +
+                    "?cityA geo:asWKT ?coord1 . " +
+                    "?cityB geo:asWKT ?coord2 . " +
+                    // from brussels 173km to amsterdam
+                    " FILTER ( 500000 > geof:distance(?coord1, ?coord2, uom:metre)  ) . " +
+                    " FILTER ( !sameTerm (?cityA, ?cityB) ) " +
+                "}";
+
+        final ValueFactory vf = new ValueFactoryImpl();
+        final URI wktTypeUri = vf.createURI("http://www.opengis.net/ont/geosparql#wktLiteral");
+        final URI asWKT = vf.createURI("http://www.opengis.net/ont/geosparql#asWKT");
+        final Set<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#dakar"), asWKT, vf.createLiteral("Point(-17.45 14.69)", wktTypeUri)),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#dakar2"), asWKT, vf.createLiteral("Point(-17.45 14.69)", wktTypeUri)),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#canberra"), asWKT, vf.createLiteral("Point(149.12 -35.31)", wktTypeUri)),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#brussels"), asWKT, vf.createLiteral("Point(4.35 50.85)", wktTypeUri)),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#amsterdam"), asWKT, vf.createLiteral("Point(4.9 52.37)", wktTypeUri)));
 
         // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("cityA", new URIImpl("tag:rya.apache.org,2017:ex#dakar")), new BindingImpl("cityB", new URIImpl("tag:rya.apache.org,2017:ex#dakar2"))));
-        expected.add(makeBindingSet(new BindingImpl("cityA", new URIImpl("tag:rya.apache.org,2017:ex#dakar2")), new BindingImpl("cityB", new URIImpl("tag:rya.apache.org,2017:ex#dakar"))));
-        expected.add(makeBindingSet(new BindingImpl("cityA", new URIImpl("tag:rya.apache.org,2017:ex#brussels")), new BindingImpl("cityB", new URIImpl("tag:rya.apache.org,2017:ex#amsterdam"))));
-        expected.add(makeBindingSet(new BindingImpl("cityA", new URIImpl("tag:rya.apache.org,2017:ex#amsterdam")), new BindingImpl("cityB", new URIImpl("tag:rya.apache.org,2017:ex#brussels"))));
+        final Set<BindingSet> expectedResults = new HashSet<>();
 
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(geoCitySelect);
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("cityA", vf.createURI("tag:rya.apache.org,2017:ex#dakar"));
+        bs.addBinding("cityB", vf.createURI("tag:rya.apache.org,2017:ex#dakar2"));
+        expectedResults.add(bs);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        bs = new MapBindingSet();
+        bs.addBinding("cityA", vf.createURI("tag:rya.apache.org,2017:ex#dakar2"));
+        bs.addBinding("cityB", vf.createURI("tag:rya.apache.org,2017:ex#dakar"));
+        expectedResults.add(bs);
 
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String> absent());
+        bs = new MapBindingSet();
+        bs.addBinding("cityA", vf.createURI("tag:rya.apache.org,2017:ex#brussels"));
+        bs.addBinding("cityB", vf.createURI("tag:rya.apache.org,2017:ex#amsterdam"));
+        expectedResults.add(bs);
 
-        // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, geoCitySelect);
+        bs = new MapBindingSet();
+        bs.addBinding("cityA", vf.createURI("tag:rya.apache.org,2017:ex#amsterdam"));
+        bs.addBinding("cityB", vf.createURI("tag:rya.apache.org,2017:ex#brussels"));
+        expectedResults.add(bs);
 
-        results.forEach(res -> {
-            // System.out.println(res.getValue("cityA").stringValue() + " - " + res.getValue("cityB").stringValue() + " : "
-            // /* + res.getValue("dist").stringValue() + "km" */
-            // );
-        });
-
-        assertEquals(expected, results);
+        runTest(sparql, statements, expectedResults);
     }
 
     /**
@@ -203,167 +188,169 @@ public class GeoFunctionsIT extends ITBase {
      * Then add a bit of code to replace the default one that comes with RDF4J:
      * SpatialSupportInitializer.java
      * Here is one: https://bitbucket.org/pulquero/sesame-geosparql-jts
-     *
-     * @throws Exception
      */
-    // @Ignore("needs JTS initializer, see comments.")
     @Test
     public void withGeoSpatialSupportInitializer() throws Exception {
-        final String geoWithinSelect = "PREFIX geo: <http://www.opengis.net/ont/geosparql#> "//
-                        + "PREFIX ryageo: <tag:rya.apache.org,2017:function/geo#> "//
-                        + "PREFIX geof: <http://www.opengis.net/def/function/geosparql/> "//
-                        + "SELECT ?feature ?point ?wkt " //
-                        + "{" //
-                        + " ?feature a geo:Feature . "//
-                        + " ?feature geo:hasGeometry ?point . "//
-                        + " ?point a geo:Point . "//
-                        + " ?point geo:asWKT ?wkt . "//
-                        + " FILTER(geof:sfWithin(?wkt, \"POLYGON((-78 39, -76 39, -76 38, -78 38, -78 39))\"^^geo:wktLiteral)) " //
-                        + "}";//
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(//
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#feature", "http://www.w3.org/1999/02/22-rdf-syntax-ns#type", "http://www.opengis.net/ont/geosparql#Feature"), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#feature", "http://www.opengis.net/ont/geosparql#hasGeometry", "tag:rya.apache.org,2017:ex#test_point"), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#test_point", "http://www.w3.org/1999/02/22-rdf-syntax-ns#type", "http://www.opengis.net/ont/geosparql#Point"), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#test_point", "http://www.opengis.net/ont/geosparql#asWKT", new RyaType(new URIImpl("http://www.opengis.net/ont/geosparql#wktLiteral") //
-                                        , "Point(-77.03524 38.889468)")), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#skip_point", "http://www.w3.org/1999/02/22-rdf-syntax-ns#type", "http://www.opengis.net/ont/geosparql#Point"), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#skip_point", "http://www.opengis.net/ont/geosparql#asWKT", new RyaType(new URIImpl("http://www.opengis.net/ont/geosparql#wktLiteral")//
-                                        , "Point(-10 10)")) //
-        );
+        final String sparql =
+                "PREFIX geo: <http://www.opengis.net/ont/geosparql#> " +
+                "PREFIX ryageo: <tag:rya.apache.org,2017:function/geo#> " +
+                "PREFIX geof: <http://www.opengis.net/def/function/geosparql/> " +
+                "SELECT ?feature ?point ?wkt { " +
+                    "?feature a geo:Feature . " +
+                    "?feature geo:hasGeometry ?point . " +
+                    "?point a geo:Point . " +
+                    "?point geo:asWKT ?wkt . " +
+                    "FILTER(geof:sfWithin(?wkt, \"POLYGON((-78 39, -76 39, -76 38, -78 38, -78 39))\"^^geo:wktLiteral)) " +
+                "}";
+
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Set<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#feature"), vf.createURI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), vf.createURI("http://www.opengis.net/ont/geosparql#Feature")),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#feature"), vf.createURI("http://www.opengis.net/ont/geosparql#hasGeometry"), vf.createURI("tag:rya.apache.org,2017:ex#test_point")),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#test_point"), vf.createURI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), vf.createURI("http://www.opengis.net/ont/geosparql#Point")),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#test_point"), vf.createURI("http://www.opengis.net/ont/geosparql#asWKT"), vf.createLiteral("Point(-77.03524 38.889468)", vf.createURI("http://www.opengis.net/ont/geosparql#wktLiteral"))),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#skip_point"), vf.createURI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), vf.createURI("http://www.opengis.net/ont/geosparql#Point")),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#skip_point"), vf.createURI("http://www.opengis.net/ont/geosparql#asWKT"), vf.createLiteral("Point(-10 10)", vf.createURI("http://www.opengis.net/ont/geosparql#wktLiteral"))));
+
         // Register geo functions from RDF4J is done automatically via SPI.
         // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("wkt", (new LiteralImpl("Point(-77.03524 38.889468)", new URIImpl("http://www.opengis.net/ont/geosparql#wktLiteral")))), new BindingImpl("feature", new URIImpl("tag:rya.apache.org,2017:ex#feature")), new BindingImpl("point", new URIImpl("tag:rya.apache.org,2017:ex#test_point"))));
-        // expected.add(makeBindingSet(new BindingImpl("wkt", (new LiteralImpl("Point(-77.03524 38.889468)", new URIImpl("http://www.opengis.net/ont/geosparql#wktLiteral"))))));
-        // expected.add(makeBindingSet(new BindingImpl("wkt", new URIImpl("\"Point(-77.03524 38.889468)\"^^<http://www.opengis.net/ont/geosparql#wktLiteral>")), new BindingImpl("feature", new
-        // URIImpl("tag:rya.apache.org,2017:ex#feature")), new BindingImpl("point", new URIImpl("tag:rya.apache.org,2017:ex#test_point"))));
-
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(geoWithinSelect);
-
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String> absent());
-
-        // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, geoWithinSelect);
-        assertEquals(expected, results);
+        final Set<BindingSet> expectedResults = new HashSet<>();
+        final MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("wkt", vf.createLiteral("Point(-77.03524 38.889468)", vf.createURI("http://www.opengis.net/ont/geosparql#wktLiteral")));
+        bs.addBinding("feature", vf.createURI("tag:rya.apache.org,2017:ex#feature"));
+        bs.addBinding("point", vf.createURI("tag:rya.apache.org,2017:ex#test_point"));
+        expectedResults.add(bs);
+
+        runTest(sparql, statements, expectedResults);
     }
 
     /**
      * This test does not rely on geoTools. The default implementation in RDF4J handles point intersections.
-     * 
-     * @throws Exception
      */
     @Test
     public void withGeoIntersectsPoint() throws Exception {
-        String geoCitySelect = "PREFIX geo: <http://www.opengis.net/ont/geosparql#> " //
-                        + "PREFIX geof: <http://www.opengis.net/def/function/geosparql/> " //
-                        + "PREFIX uom: <http://www.opengis.net/def/uom/OGC/1.0/> " //
-                        + "SELECT ?cityA ?cityB " //
-                        + "WHERE { ?cityA geo:asWKT ?coord1 . " //
-                        + "        ?cityB geo:asWKT ?coord2 . " //
-                        + " FILTER ( geof:sfIntersects(?coord1, ?coord2) ) " //
-                        + " FILTER ( !sameTerm (?cityA, ?cityB) ) }"; //
-
-        final URIImpl wktTypeUri = new URIImpl("http://www.opengis.net/ont/geosparql#wktLiteral");
-        final String asWKT = "http://www.opengis.net/ont/geosparql#asWKT";
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(//
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#dakar", asWKT, new RyaType(wktTypeUri, "Point(-17.45 14.69)")), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#canberra", asWKT, new RyaType(wktTypeUri, "Point(149.12 -35.31)")), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#brussels", asWKT, new RyaType(wktTypeUri, "Point(4.35 50.85)")), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#amsterdam", asWKT, new RyaType(wktTypeUri, "Point(4.9 52.37)")), //
-                        makeRyaStatement("tag:rya.apache.org,2017:ex#amsterdam2", asWKT, new RyaType(wktTypeUri, "Point(4.9 52.37)")) //
-        );
+        final String sparql =
+                "PREFIX geo: <http://www.opengis.net/ont/geosparql#> "  +
+                "PREFIX geof: <http://www.opengis.net/def/function/geosparql/> "  +
+                "PREFIX uom: <http://www.opengis.net/def/uom/OGC/1.0/> "  +
+                "SELECT ?cityA ?cityB { "  +
+                    "?cityA geo:asWKT ?coord1 . " +
+                    "?cityB geo:asWKT ?coord2 . " +
+                    " FILTER ( geof:sfIntersects(?coord1, ?coord2) ) " +
+                    " FILTER ( !sameTerm (?cityA, ?cityB) ) " +
+                "}";
+
+        final ValueFactory vf = new ValueFactoryImpl();
+        final URI wktTypeUri = vf.createURI("http://www.opengis.net/ont/geosparql#wktLiteral");
+        final URI asWKT = vf.createURI("http://www.opengis.net/ont/geosparql#asWKT");
+        final Set<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#dakar"), asWKT, vf.createLiteral("Point(-17.45 14.69)", wktTypeUri)),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#canberra"), asWKT, vf.createLiteral("Point(149.12 -35.31)", wktTypeUri)),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#brussels"), asWKT, vf.createLiteral("Point(4.35 50.85)", wktTypeUri)),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#amsterdam"), asWKT, vf.createLiteral("Point(4.9 52.37)", wktTypeUri)),
+                vf.createStatement(vf.createURI("tag:rya.apache.org,2017:ex#amsterdam2"), asWKT, vf.createLiteral("Point(4.9 52.37)", wktTypeUri)));
 
         // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("cityA", new URIImpl("tag:rya.apache.org,2017:ex#amsterdam")), new BindingImpl("cityB", new URIImpl("tag:rya.apache.org,2017:ex#amsterdam2"))));
-        expected.add(makeBindingSet(new BindingImpl("cityA", new URIImpl("tag:rya.apache.org,2017:ex#amsterdam2")), new BindingImpl("cityB", new URIImpl("tag:rya.apache.org,2017:ex#amsterdam"))));
-
-        // Register geo functions from RDF4J is done automatically via SPI.
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(geoCitySelect);
-
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String> absent());
+        final Set<BindingSet> expectedResults = new HashSet<>();
 
-        // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, geoCitySelect);
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("cityA", vf.createURI("tag:rya.apache.org,2017:ex#amsterdam"));
+        bs.addBinding("cityB", vf.createURI("tag:rya.apache.org,2017:ex#amsterdam2"));
+        expectedResults.add(bs);
 
-        results.forEach(res -> {
-            // System.out.println(res.getValue("cityA").stringValue() + " - " + res.getValue("cityB").stringValue() + " : "
-            // /* + res.getValue("dist").stringValue() + "km" */
-            // );
-        });
+        bs = new MapBindingSet();
+        bs.addBinding("cityA", vf.createURI("tag:rya.apache.org,2017:ex#amsterdam2"));
+        bs.addBinding("cityB", vf.createURI("tag:rya.apache.org,2017:ex#amsterdam"));
+        expectedResults.add(bs);
 
-        assertEquals(expected, results);
+        runTest(sparql, statements, expectedResults);
     }
 
     @Test
     public void withTemporal() throws Exception {
-        final String dtPredUri = "http://www.w3.org/2006/time#inXSDDateTime";
-        final String xmlDateTime = "http://www.w3.org/2001/XMLSchema#dateTime";
         // Find all stored dates.
-        String selectQuery = "PREFIX time: <http://www.w3.org/2006/time#> \n"//
-                        + "PREFIX xml: <http://www.w3.org/2001/XMLSchema#> \n" //
-                        + "PREFIX tempo: <tag:rya-rdf.org,2015:temporal#> \n"//
-                        + "SELECT ?event ?time \n" //
-                        + "WHERE { \n" //
-                        + "  ?event time:inXSDDateTime ?time . \n"//
-                        // + " FILTER(?time > '2000-01-01T01:00:00Z'^^xml:dateTime) \n"// all
-                        // + " FILTER(?time < '2007-01-01T01:01:03-08:00'^^xml:dateTime) \n"// after 2007
-                        + " FILTER(?time > '2001-01-01T01:01:03-08:00'^^xml:dateTime) \n"// after 3 seconds
-                        + " FILTER('2007-01-01T01:01:01+09:00'^^xml:dateTime > ?time ) \n"// 2006/12/31 include 2006, not 2007,8
-                        + "}";//
+        final String sparql =
+                "PREFIX time: <http://www.w3.org/2006/time#> " +
+                "PREFIX xml: <http://www.w3.org/2001/XMLSchema#> " +
+                "PREFIX tempo: <tag:rya-rdf.org,2015:temporal#> " +
+                "SELECT ?event ?time { " +
+                    "?event time:inXSDDateTime ?time . " +
+                    "FILTER(?time > '2001-01-01T01:01:03-08:00'^^xml:dateTime) " + // after 3 seconds
+                    "FILTER('2007-01-01T01:01:01+09:00'^^xml:dateTime > ?time ) " + // 2006/12/31 include 2006, not 2007,8
+                "}";
 
         // create some resources and literals to make statements out of
-        String eventz = "<http://eventz>";
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(//
-                        makeRyaStatement(eventz, "http://www.w3.org/1999/02/22-rdf-syntax-ns#type", "<http://www.w3.org/2006/time#Instant>"), //
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T01:01:01-08:00")), // one second
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T04:01:02.000-05:00")), // 2 seconds
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T01:01:03-08:00")), // 3 seconds
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T01:01:03.999-08:00")), // 4 seconds
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T09:01:05Z")), // 5 seconds
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2006-01-01TZ")), //
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2007-01-01TZ")), //
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2008-01-01TZ")));
+        final ValueFactory vf = new ValueFactoryImpl();
+        final DatatypeFactory dtf = DatatypeFactory.newInstance();
+
+        final URI dtPredUri = vf.createURI("http://www.w3.org/2006/time#inXSDDateTime");
+        final URI eventz = vf.createURI("<http://eventz>");
+
+        final Set<Statement> statements = Sets.newHashSet(
+                vf.createStatement(eventz, vf.createURI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), vf.createURI("<http://www.w3.org/2006/time#Instant>")),
+                vf.createStatement(eventz, dtPredUri, vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T01:01:01-08:00"))), // 1 second
+                vf.createStatement(eventz, dtPredUri, vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T04:01:02.000-05:00"))), // 2 seconds
+                vf.createStatement(eventz, dtPredUri, vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T01:01:03-08:00"))), // 3 seconds
+                vf.createStatement(eventz, dtPredUri, vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T01:01:03.999-08:00"))), // 4 seconds
+                vf.createStatement(eventz, dtPredUri, vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T09:01:05Z"))), // 5 seconds
+                vf.createStatement(eventz, dtPredUri, vf.createLiteral(dtf.newXMLGregorianCalendar("2006-01-01T05:00:00.000Z"))),
+                vf.createStatement(eventz, dtPredUri, vf.createLiteral(dtf.newXMLGregorianCalendar("2007-01-01T05:00:00.000Z"))),
+                vf.createStatement(eventz, dtPredUri, vf.createLiteral(dtf.newXMLGregorianCalendar("2008-01-01T05:00:00.000Z"))));
+
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("time", vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T09:01:05.000Z")));
+        bs.addBinding("event", eventz);
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("time", vf.createLiteral(dtf.newXMLGregorianCalendar("2006-01-01T05:00:00.000Z")));
+        bs.addBinding("event", eventz);
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("time", vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T09:01:03.999Z")));
+        bs.addBinding("event", eventz);
+        expectedResults.add(bs);
+
+        runTest(sparql, statements, expectedResults);
+    }
 
-        // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("time", new LiteralImpl("2001-01-01T09:01:03.999Z", new URIImpl(xmlDateTime))), new BindingImpl("event", new URIImpl(eventz)))); //
-        expected.add(makeBindingSet(new BindingImpl("time", new LiteralImpl("2001-01-01T09:01:05.000Z", new URIImpl(xmlDateTime))), new BindingImpl("event", new URIImpl(eventz)))); //
-        expected.add(makeBindingSet(new BindingImpl("time", new LiteralImpl("2006-01-01T00:00:00.000Z", new URIImpl(xmlDateTime))), new BindingImpl("event", new URIImpl(eventz)))); //
-        expected.add(makeBindingSet(new BindingImpl("time", new LiteralImpl("2006-01-01T00:00:00.000Z", new URIImpl(xmlDateTime))), new BindingImpl("event", new URIImpl(eventz)))); //
-        // expected.add(makeBindingSet(new BindingImpl("event", new URIImpl(eventz)), new BindingImpl("time", new LiteralImpl("2007-01-01T05:00:00.000Z", new URIImpl(xmlDateTime))))); //
-        // expected.add(makeBindingSet(new BindingImpl("event", new URIImpl(eventz)), new BindingImpl("time", new LiteralImpl("2008-01-01T05:00:00.000Z", new URIImpl(xmlDateTime)))));
+    public void runTest(final String sparql, final Collection<Statement> statements, final Collection<BindingSet> expectedResults) throws Exception {
+        requireNonNull(sparql);
+        requireNonNull(statements);
+        requireNonNull(expectedResults);
 
-        // Register geo functions from RDF4J is done automatically via SPI.
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(selectQuery);
+        // Register the PCJ with Rya.
+        final Instance accInstance = super.getAccumuloConnector().getInstance();
+        final Connector accumuloConn = super.getAccumuloConnector();
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(new AccumuloConnectionDetails(
+                ACCUMULO_USER,
+                ACCUMULO_PASSWORD.toCharArray(),
+                accInstance.getInstanceName(),
+                accInstance.getZooKeepers()), accumuloConn);
 
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String> absent());
+        ryaClient.getCreatePCJ().createPCJ(RYA_INSTANCE_NAME, sparql);
 
-        // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, selectQuery);
-        assertEquals(expected, results);
-    }
+        // Write the data to Rya.
+        final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
+        ryaConn.begin();
+        ryaConn.add(statements);
+        ryaConn.commit();
+        ryaConn.close();
 
-}
+        // Wait for the Fluo application to finish computing the end result.
+        super.getMiniFluo().waitForObservers();
 
+        // Fetch the value that is stored within the PCJ table.
+        try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME)) {
+            final String pcjId = pcjStorage.listPcjs().get(0);
+            final Set<BindingSet> results = Sets.newHashSet( pcjStorage.listResults(pcjId) );
+
+            // Ensure the result of the query matches the expected result.
+            assertEquals(expectedResults, results);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 4f7148a..1a27b8f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -578,6 +578,22 @@ under the License.
                 <artifactId>fluo-mini</artifactId>
                 <version>${fluo.version}</version>
             </dependency>
+            <dependency>
+                <groupId>org.apache.fluo</groupId>
+                <artifactId>fluo-recipes-core</artifactId>
+                <version>${fluo.version}</version>
+            </dependency>
+            <dependency>
+                <groupId>org.apache.fluo</groupId>
+                <artifactId>fluo-recipes-accumulo</artifactId>
+                <version>${fluo.version}</version>
+            </dependency>
+            <dependency>
+                <groupId>org.apache.fluo</groupId>
+                <artifactId>fluo-recipes-test</artifactId>
+                <version>${fluo.version}</version>
+                <scope>test</scope>
+            </dependency>
 
             <dependency>
                 <groupId>org.mockito</groupId>



[6/9] incubator-rya git commit: RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java
index 20d3005..dc4b3b4 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java
@@ -20,25 +20,25 @@ package org.apache.rya.indexing.pcj.fluo.app.observers;
 
 import static com.google.common.base.Preconditions.checkNotNull;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
-import org.apache.rya.indexing.pcj.fluo.app.BindingSetRow;
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.observer.AbstractObserver;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.AggregationResultUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.FilterResultUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.NodeType;
 import org.apache.rya.indexing.pcj.fluo.app.QueryResultUpdater;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FilterMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.QueryMetadata;
-import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
 
-import org.apache.fluo.api.client.TransactionBase;
-import org.apache.fluo.api.data.Bytes;
-import org.apache.fluo.api.data.Column;
-import org.apache.fluo.api.observer.AbstractObserver;
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 
 /**
  * Notified when the results of a node have been updated to include a new Binding
@@ -47,6 +47,7 @@ import org.apache.fluo.api.observer.AbstractObserver;
  */
 @DefaultAnnotation(NonNull.class)
 public abstract class BindingSetUpdater extends AbstractObserver {
+    private static final Logger log = Logger.getLogger(BindingSetUpdater.class);
 
     // DAO
     private final FluoQueryMetadataDAO queryDao = new FluoQueryMetadataDAO();
@@ -55,6 +56,7 @@ public abstract class BindingSetUpdater extends AbstractObserver {
     private final JoinResultUpdater joinUpdater = new JoinResultUpdater();
     private final FilterResultUpdater filterUpdater = new FilterResultUpdater();
     private final QueryResultUpdater queryUpdater = new QueryResultUpdater();
+    private final AggregationResultUpdater aggregationUpdater = new AggregationResultUpdater();
 
     @Override
     public abstract ObservedColumn getObservedColumn();
@@ -63,10 +65,11 @@ public abstract class BindingSetUpdater extends AbstractObserver {
      * Create an {@link Observation} that defines the work that needs to be done.
      *
      * @param tx - The Fluo transaction being used for the observer notification. (not null)
-     * @param parsedRow - The RowID parsed into a Binding Set and Node ID. (not null)
+     * @param row - The row that triggered the notification. (not null)
      * @return An {@link Observation} that defines the work that needs to be done.
+     * @throws Exception A problem caused this method to fail.
      */
-    public abstract Observation parseObservation(TransactionBase tx, final BindingSetRow parsedRow);
+    public abstract Observation parseObservation(TransactionBase tx, Bytes row) throws Exception;
 
     @Override
     public final void process(final TransactionBase tx, final Bytes row, final Column col) {
@@ -74,8 +77,15 @@ public abstract class BindingSetUpdater extends AbstractObserver {
         checkNotNull(row);
         checkNotNull(col);
 
-        final String bindingSetString = tx.get(row, col).toString();
-        final Observation observation = parseObservation( tx, new BindingSetRow(BindingSetRow.make(row).getNodeId(), bindingSetString) );
+        final Observation observation;
+        try {
+            observation = parseObservation(tx, row);
+        } catch (final Exception e) {
+            log.error("Unable to parse an Observation from a Row and Column pair, so this notification will be skipped. " +
+                    "Row: " + row + " Column: " + col, e);
+            return;
+        }
+
         final String observedNodeId = observation.getObservedNodeId();
         final VisibilityBindingSet observedBindingSet = observation.getObservedBindingSet();
         final String parentNodeId = observation.getParentId();
@@ -85,7 +95,11 @@ public abstract class BindingSetUpdater extends AbstractObserver {
         switch(parentNodeType) {
             case QUERY:
                 final QueryMetadata parentQuery = queryDao.readQueryMetadata(tx, parentNodeId);
-                queryUpdater.updateQueryResults(tx, observedBindingSet, parentQuery);
+                try {
+                    queryUpdater.updateQueryResults(tx, observedBindingSet, parentQuery);
+                } catch (final Exception e) {
+                    throw new RuntimeException("Could not process a Query node.", e);
+                }
                 break;
 
             case FILTER:
@@ -101,11 +115,20 @@ public abstract class BindingSetUpdater extends AbstractObserver {
                 final JoinMetadata parentJoin = queryDao.readJoinMetadata(tx, parentNodeId);
                 try {
                     joinUpdater.updateJoinResults(tx, observedNodeId, observedBindingSet, parentJoin);
-                } catch (final BindingSetConversionException e) {
+                } catch (final Exception e) {
                     throw new RuntimeException("Could not process a Join node.", e);
                 }
                 break;
 
+            case AGGREGATION:
+                final AggregationMetadata parentAggregation = queryDao.readAggregationMetadata(tx, parentNodeId);
+                try {
+                    aggregationUpdater.updateAggregateResults(tx, observedBindingSet, parentAggregation);
+                } catch (final Exception e) {
+                    throw new RuntimeException("Could not process an Aggregation node.", e);
+                }
+                break;
+
             default:
                 throw new IllegalArgumentException("The parent node's NodeType must be of type Filter, Join, or Query, but was " + parentNodeType);
         }

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java
index 36af898..f5c7177 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java
@@ -18,19 +18,18 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app.observers;
 
-import static com.google.common.base.Preconditions.checkNotNull;
+import static java.util.Objects.requireNonNull;
 
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
 import org.apache.rya.indexing.pcj.fluo.app.BindingSetRow;
+import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.query.FilterMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
-import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
 import org.openrdf.query.BindingSet;
 
-import org.apache.fluo.api.client.TransactionBase;
-
 /**
  * Notified when the results of a Filter have been updated to include a new
  * {@link BindingSet}. This observer updates its parent if the new Binding Set
@@ -38,7 +37,7 @@ import org.apache.fluo.api.client.TransactionBase;
  */
 public class FilterObserver extends BindingSetUpdater {
 
-    private final VisibilityBindingSetStringConverter converter = new VisibilityBindingSetStringConverter();
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
 
     private final FluoQueryMetadataDAO queryDao = new FluoQueryMetadataDAO();
 
@@ -48,17 +47,17 @@ public class FilterObserver extends BindingSetUpdater {
     }
 
     @Override
-    public Observation parseObservation(final TransactionBase tx, final BindingSetRow parsedRow) {
-        checkNotNull(tx);
-        checkNotNull(parsedRow);
+    public Observation parseObservation(final TransactionBase tx, final Bytes row) throws Exception {
+        requireNonNull(tx);
+        requireNonNull(row);
 
         // Read the Filter metadata.
-        final String filterNodeId = parsedRow.getNodeId();
+        final String filterNodeId = BindingSetRow.make(row).getNodeId();
         final FilterMetadata filterMetadata = queryDao.readFilterMetadata(tx, filterNodeId);
 
-        // Read the Binding Set that was just emmitted by the Filter.
-        final VariableOrder filterVarOrder = filterMetadata.getVariableOrder();
-        final VisibilityBindingSet filterBindingSet = (VisibilityBindingSet) converter.convert(parsedRow.getBindingSetString(), filterVarOrder);
+        // Read the Visibility Binding Set from the value.
+        final Bytes valueBytes = tx.get(row, FluoQueryColumns.FILTER_BINDING_SET);
+        final VisibilityBindingSet filterBindingSet = BS_SERDE.deserialize(valueBytes);
 
         // Figure out which node needs to handle the new metadata.
         final String parentNodeId = filterMetadata.getParentNodeId();

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java
index 6933096..141ccc7 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java
@@ -18,19 +18,18 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app.observers;
 
-import static com.google.common.base.Preconditions.checkNotNull;
+import static java.util.Objects.requireNonNull;
 
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
 import org.apache.rya.indexing.pcj.fluo.app.BindingSetRow;
+import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata;
-import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
 import org.openrdf.query.BindingSet;
 
-import org.apache.fluo.api.client.TransactionBase;
-
 /**
  * Notified when the results of a Join have been updated to include a new
  * {@link BindingSet}. This observer updates its parent if the new Binding Set
@@ -38,7 +37,7 @@ import org.apache.fluo.api.client.TransactionBase;
  */
 public class JoinObserver extends BindingSetUpdater {
 
-    private final VisibilityBindingSetStringConverter converter = new VisibilityBindingSetStringConverter();
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
 
     private final FluoQueryMetadataDAO queryDao = new FluoQueryMetadataDAO();
 
@@ -48,16 +47,17 @@ public class JoinObserver extends BindingSetUpdater {
     }
 
     @Override
-    public Observation parseObservation(final TransactionBase tx, final BindingSetRow parsedRow) {
-        checkNotNull(parsedRow);
+    public Observation parseObservation(final TransactionBase tx, final Bytes row) throws Exception {
+        requireNonNull(tx);
+        requireNonNull(row);
 
         // Read the Join metadata.
-        final String joinNodeId = parsedRow.getNodeId();
+        final String joinNodeId = BindingSetRow.make(row).getNodeId();
         final JoinMetadata joinMetadata = queryDao.readJoinMetadata(tx, joinNodeId);
 
-        // Read the Binding Set that was just emmitted by the Join.
-        final VariableOrder joinVarOrder = joinMetadata.getVariableOrder();
-        final VisibilityBindingSet joinBindingSet = (VisibilityBindingSet) converter.convert(parsedRow.getBindingSetString(), joinVarOrder);
+        // Read the Visibility Binding Set from the value.
+        final Bytes valueBytes = tx.get(row, FluoQueryColumns.JOIN_BINDING_SET);
+        final VisibilityBindingSet joinBindingSet = BS_SERDE.deserialize(valueBytes);
 
         // Figure out which node needs to handle the new metadata.
         final String parentNodeId = joinMetadata.getParentNodeId();

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java
index 1238c18..28c92af 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java
@@ -29,6 +29,7 @@ import org.apache.fluo.api.data.Column;
 import org.apache.fluo.api.observer.AbstractObserver;
 import org.apache.log4j.Logger;
 import org.apache.rya.accumulo.utils.VisibilitySimplifier;
+import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalResultExporter;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalResultExporter.ResultExportException;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalResultExporterFactory;
@@ -36,11 +37,7 @@ import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalResultExporterFact
 import org.apache.rya.indexing.pcj.fluo.app.export.kafka.KafkaResultExporterFactory;
 import org.apache.rya.indexing.pcj.fluo.app.export.rya.RyaResultExporterFactory;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
-import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
-import org.apache.rya.indexing.pcj.fluo.app.query.QueryMetadata;
-import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
 
 import com.google.common.base.Optional;
 import com.google.common.collect.ImmutableSet;
@@ -51,13 +48,7 @@ import com.google.common.collect.ImmutableSet;
 public class QueryResultObserver extends AbstractObserver {
     private static final Logger log = Logger.getLogger(QueryResultObserver.class);
 
-    private static final FluoQueryMetadataDAO QUERY_DAO = new FluoQueryMetadataDAO();
-    private static final VisibilityBindingSetStringConverter CONVERTER = new VisibilityBindingSetStringConverter();
-
-    /**
-     * Simplifies Visibility expressions prior to exporting PCJ results.
-     */
-    private static final VisibilitySimplifier SIMPLIFIER = new VisibilitySimplifier();
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
 
     /**
      * We expect to see the same expressions a lot, so we cache the simplified forms.
@@ -91,9 +82,9 @@ public class QueryResultObserver extends AbstractObserver {
         final ImmutableSet.Builder<IncrementalResultExporter> exportersBuilder = ImmutableSet.builder();
 
         for(final IncrementalResultExporterFactory builder : factories) {
-            try {
-                log.debug("QueryResultObserver.init(): for each exportersBuilder=" + builder);
+        	log.debug("QueryResultObserver.init(): for each exportersBuilder=" + builder);
 
+            try {
                 final Optional<IncrementalResultExporter> exporter = builder.build(context);
                 if(exporter.isPresent()) {
                     exportersBuilder.add(exporter.get());
@@ -107,28 +98,22 @@ public class QueryResultObserver extends AbstractObserver {
     }
 
     @Override
-    public void process(final TransactionBase tx, final Bytes brow, final Column col) {
+    public void process(final TransactionBase tx, final Bytes brow, final Column col) throws Exception {
         final String row = brow.toString();
-        
-        // Read the SPARQL query and it Binding Set from the row id.
-        final String[] queryAndBindingSet = row.split(NODEID_BS_DELIM);
-        final String queryId = queryAndBindingSet[0];
-        final String bindingSetString = tx.gets(row, col);
 
-        // Fetch the query's Variable Order from the Fluo table.
-        final QueryMetadata queryMetadata = QUERY_DAO.readQueryMetadata(tx, queryId);
-        final VariableOrder varOrder = queryMetadata.getVariableOrder();
+        // Read the SPARQL query and it Binding Set from the row id.
+        final String queryId = row.split(NODEID_BS_DELIM)[0];
 
-        // Create the result that will be exported.
-        final VisibilityBindingSet result = CONVERTER.convert(bindingSetString, varOrder);
+        // Read the Child Binding Set that will be exported.
+        final Bytes valueBytes = tx.get(brow, col);
+        final VisibilityBindingSet result = BS_SERDE.deserialize(valueBytes);
 
         // Simplify the result's visibilities.
         final String visibility = result.getVisibility();
         if(!simplifiedVisibilities.containsKey(visibility)) {
-            final String simplified = SIMPLIFIER.simplify( visibility );
+            final String simplified = VisibilitySimplifier.simplify( visibility );
             simplifiedVisibilities.put(visibility, simplified);
         }
-
         result.setVisibility( simplifiedVisibilities.get(visibility) );
 
         // Export the result using each of the provided exporters.
@@ -136,8 +121,21 @@ public class QueryResultObserver extends AbstractObserver {
             try {
                 exporter.export(tx, queryId, result);
             } catch (final ResultExportException e) {
-                log.error("Could not export a binding set for query '" + queryId + "'. Binding Set: " + bindingSetString);
+                log.error("Could not export a binding set for query '" + queryId + "'. Binding Set: " + result, e);
+            }
+        }
+    }
+
+    @Override
+    public void close() {
+        if(exporters != null) {
+            for(final IncrementalResultExporter exporter : exporters) {
+                try {
+                    exporter.close();
+                } catch(final Exception e) {
+                    log.warn("Problem encountered while closing one of the exporters.", e);
+                }
             }
         }
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java
index 5956634..b0548b4 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java
@@ -18,19 +18,18 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app.observers;
 
-import static com.google.common.base.Preconditions.checkNotNull;
+import static java.util.Objects.requireNonNull;
 
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
 import org.apache.rya.indexing.pcj.fluo.app.BindingSetRow;
+import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.StatementPatternMetadata;
-import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
 import org.openrdf.query.BindingSet;
 
-import org.apache.fluo.api.client.TransactionBase;
-
 /**
  * Notified when the results of a Statement Pattern have been updated to include
  * a new {@link BindingSet}. This observer updates its parent if the new
@@ -38,7 +37,7 @@ import org.apache.fluo.api.client.TransactionBase;
  */
 public class StatementPatternObserver extends BindingSetUpdater {
 
-    private static final VisibilityBindingSetStringConverter CONVERTER = new VisibilityBindingSetStringConverter();
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
 
     // DAO
     private final FluoQueryMetadataDAO queryDao = new FluoQueryMetadataDAO();
@@ -49,17 +48,17 @@ public class StatementPatternObserver extends BindingSetUpdater {
     }
 
     @Override
-    public Observation parseObservation(final TransactionBase tx, final BindingSetRow parsedRow) {
-        checkNotNull(tx);
+    public Observation parseObservation(final TransactionBase tx, final Bytes row) throws Exception {
+        requireNonNull(tx);
+        requireNonNull(row);
 
         // Read the Statement Pattern metadata.
-        final String spNodeId = parsedRow.getNodeId();
+        final String spNodeId = BindingSetRow.make(row).getNodeId();
         final StatementPatternMetadata spMetadata = queryDao.readStatementPatternMetadata(tx, spNodeId);
-        final String bindingSetValue = parsedRow.getBindingSetString();
 
-        // Read the Binding Set that was just emmitted by the Statement Pattern.
-        final VariableOrder spVarOrder = spMetadata.getVariableOrder();
-        final VisibilityBindingSet spBindingSet = (VisibilityBindingSet) CONVERTER.convert(bindingSetValue, spVarOrder);
+        // Read the Visibility Binding Set from the value.
+        final Bytes valueBytes = tx.get(row, FluoQueryColumns.STATEMENT_PATTERN_BINDING_SET);
+        final VisibilityBindingSet spBindingSet = BS_SERDE.deserialize(valueBytes);
 
         // Figure out which node needs to handle the new metadata.
         final String parentNodeId = spMetadata.getParentNodeId();

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java
index 70f1cbd..3c43885 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java
@@ -21,11 +21,20 @@ package org.apache.rya.indexing.pcj.fluo.app.observers;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.DELIM;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.NODEID_BS_DELIM;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.SP_PREFIX;
-import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.VAR_DELIM;
 
 import java.util.Map;
 
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.client.scanner.ColumnScanner;
+import org.apache.fluo.api.client.scanner.RowScanner;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.Span;
+import org.apache.fluo.api.observer.AbstractObserver;
+import org.apache.log4j.Logger;
+import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.indexing.pcj.fluo.app.IncUpdateDAO;
+import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.StatementPatternMetadata;
@@ -33,15 +42,8 @@ import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
 
+import com.google.common.base.Charsets;
 import com.google.common.collect.Maps;
-import org.apache.fluo.api.client.TransactionBase;
-import org.apache.fluo.api.client.scanner.ColumnScanner;
-import org.apache.fluo.api.client.scanner.RowScanner;
-import org.apache.fluo.api.data.Bytes;
-import org.apache.fluo.api.data.Column;
-import org.apache.fluo.api.data.ColumnValue;
-import org.apache.fluo.api.data.Span;
-import org.apache.fluo.api.observer.AbstractObserver;
 
 /**
  * An observer that matches new Triples to the Statement Patterns that are part
@@ -49,9 +51,11 @@ import org.apache.fluo.api.observer.AbstractObserver;
  * the new result is stored as a binding set for the pattern.
  */
 public class TripleObserver extends AbstractObserver {
+    private static final Logger log = Logger.getLogger(TripleObserver.class);
 
-    private static final FluoQueryMetadataDAO QUERY_DAO = new FluoQueryMetadataDAO();
-    private static final VisibilityBindingSetStringConverter CONVERTER = new VisibilityBindingSetStringConverter();
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
+    private static final FluoQueryMetadataDAO QUERY_METADATA_DAO = new FluoQueryMetadataDAO();
+    private static final VisibilityBindingSetStringConverter VIS_BS_CONVERTER = new VisibilityBindingSetStringConverter();
 
     public TripleObserver() {}
 
@@ -62,85 +66,113 @@ public class TripleObserver extends AbstractObserver {
 
     @Override
     public void process(final TransactionBase tx, final Bytes brow, final Column column) {
-        //get string representation of triple
-        String row = brow.toString();
-        final String triple = IncUpdateDAO.getTripleString(brow);
-        String visibility = tx.gets(row, FluoQueryColumns.TRIPLES, "");
-       
-        //get variable metadata for all SP in table
-        RowScanner rscanner = tx.scanner().over(Span.prefix(SP_PREFIX)).fetch(FluoQueryColumns.STATEMENT_PATTERN_VARIABLE_ORDER).byRow().build();
-       
+        // Get string representation of triple.
+        final RyaStatement ryaStatement = IncUpdateDAO.deserializeTriple(brow);
+        log.trace(
+                "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                "Rya Statement: " + ryaStatement + "\n");
 
-        //see if triple matches conditions of any of the SP
+        final String triple = IncUpdateDAO.getTripleString(ryaStatement);
 
-        for (ColumnScanner colScanner : rscanner) {
+        // Iterate over each of the Statement Patterns that are being matched against.
+        final RowScanner spScanner = tx.scanner()
+                .over(Span.prefix(SP_PREFIX))
+
+                // Only fetch rows that have the pattern in them. There will only be a single row with a pattern per SP.
+                .fetch(FluoQueryColumns.STATEMENT_PATTERN_PATTERN)
+                .byRow()
+                .build();
+
+        //see if triple matches conditions of any of the SP
+        for (final ColumnScanner colScanner : spScanner) {
+            // Get the Statement Pattern's node id.
             final String spID = colScanner.getsRow();
 
-            final StatementPatternMetadata spMetadata = QUERY_DAO.readStatementPatternMetadata(tx, spID);
+            // Fetch its metadata.
+            final StatementPatternMetadata spMetadata = QUERY_METADATA_DAO.readStatementPatternMetadata(tx, spID);
+
+            // Attempt to match the triple against the pattern.
             final String pattern = spMetadata.getStatementPattern();
-            
-            for (ColumnValue cv : colScanner) {
-                final String varOrders = cv.getsValue();
-                final VariableOrder varOrder = new VariableOrder(varOrders);
-                final String bindingSetString = getBindingSet(triple, pattern, varOrders);
-
-                //Statement matches to a binding set
-                if(bindingSetString.length() != 0) {
-                    final VisibilityBindingSet bindingSet = new VisibilityBindingSet(
-                        CONVERTER.convert(bindingSetString, varOrder),
-                        visibility);
-                    final String valueString = CONVERTER.convert(bindingSet, varOrder);
-                    tx.set(spID + NODEID_BS_DELIM + bindingSetString, FluoQueryColumns.STATEMENT_PATTERN_BINDING_SET, valueString);
+            final VariableOrder varOrder = spMetadata.getVariableOrder();
+            final String bindingSetString = getBindingSet(triple, pattern, varOrder);
+
+            // Statement matches to a binding set.
+            if(bindingSetString.length() != 0) {
+                // Fetch the triple's visibility label.
+                final String visibility = tx.gets(brow.toString(), FluoQueryColumns.TRIPLES, "");
+
+                // Create the Row ID for the emitted binding set. It does not contain visibilities.
+                final String row = spID + NODEID_BS_DELIM + bindingSetString;
+                final Bytes rowBytes = Bytes.of( row.getBytes(Charsets.UTF_8) );
+
+                // If this is a new Binding Set, then emit it.
+                if(tx.get(rowBytes, FluoQueryColumns.STATEMENT_PATTERN_BINDING_SET) == null) {
+                    // Create the Binding Set that goes in the Node Value. It does contain visibilities.
+                    final VisibilityBindingSet visBindingSet = VIS_BS_CONVERTER.convert(bindingSetString, varOrder);
+                    visBindingSet.setVisibility(visibility);
+
+                    try {
+                        final Bytes valueBytes = BS_SERDE.serialize(visBindingSet);
+
+                        log.trace(
+                                "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                                        "Matched Statement Pattern: " + spID + "\n" +
+                                        "Binding Set: " + visBindingSet + "\n");
+
+                        tx.set(rowBytes, FluoQueryColumns.STATEMENT_PATTERN_BINDING_SET, valueBytes);
+                    } catch(final Exception e) {
+                        log.error("Couldn't serialize a Binding Set. This value will be skipped.", e);
+                    }
                 }
-			}
-		}
+            }
+        }
 
         // Once the triple has been handled, it may be deleted.
-        tx.delete(row, column);
+        tx.delete(brow, column);
     }
 
     /**
-     * Determines whether triple matches Statement Pattern ID conditions if
-     * so, generates a string representation of a BindingSet whose order
-     * is determined by varOrder.
+     * Determines whether a triple matches a Statement Pattern. If so, it generates a string representation of a
+     * BindingSet whose order is determined by varOrder.
+     *
      * @param triple - The triple to consider.
-     * @param spID - The statement pattern ID
-     * @param varOrder - The variable order
-     * @return The string representation of the BindingSet or an empty string,
-     * signifying the triple did not match the statement pattern ID.
+     * @param pattern - The pattern the triple must match.
+     * @param varOrder - The variable order of the Binding Set String that is produced by this method.
+     * @return The string representation of a Binding Set that is generated by matching the triple to the pattern;
+     *   otherwise an empty string if the pattern couldn't be matched.
      */
-    private static String getBindingSet(final String triple, final String spID, final String varOrder) {
-        final String[] spIdArray = spID.split(DELIM);
+    private static String getBindingSet(final String triple, final String pattern, final VariableOrder varOrder) {
+        final String[] patternArray = pattern.split(DELIM);
         final String[] tripleArray = triple.split(DELIM);
-        final String[] varOrderArray = varOrder.split(VAR_DELIM);
-        final Map<String,String> varMap = Maps.newHashMap();
+        final String[] varOrderArray = varOrder.toArray();
+        final Map<String,String> bindingValues = Maps.newHashMap();
 
-        if(spIdArray.length != 3 || tripleArray.length != 3) {
+        if(patternArray.length != 3 || tripleArray.length != 3) {
             throw new IllegalArgumentException("Invald number of components");
         }
 
+        // Extract the binding names and values.
         for(int i = 0; i < 3; i ++) {
-
-            if(spIdArray[i].startsWith("-const-")) {
-                if(!spIdArray[i].substring(7).equals(tripleArray[i])) {
+            if(patternArray[i].startsWith("-const-")) {
+                // If a constant value does not match, then the triple does not match the pattern.
+                if(!patternArray[i].substring(7).equals(tripleArray[i])) {
                     return "";
                 }
             } else{
-                varMap.put(spIdArray[i], tripleArray[i]);
+                bindingValues.put(patternArray[i], tripleArray[i]);
             }
-
         }
 
-        String bindingSet = "";
-
-        for (final String element : varOrderArray) {
-            if(bindingSet.length() == 0) {
-                bindingSet = varMap.get(element);
+        // Create the returned binding set string from the extracted values.
+        String bindingSetString = "";
+        for (final String bindingName : varOrderArray) {
+            if(bindingSetString.length() == 0) {
+                bindingSetString = bindingValues.get(bindingName);
             } else {
-                bindingSet = bindingSet + DELIM + varMap.get(element);
+                bindingSetString = bindingSetString + DELIM + bindingValues.get(bindingName);
             }
         }
 
-        return bindingSet;
+        return bindingSetString;
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/AggregationMetadata.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/AggregationMetadata.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/AggregationMetadata.java
new file mode 100644
index 0000000..3bc8da6
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/AggregationMetadata.java
@@ -0,0 +1,371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.query;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.openrdf.query.algebra.AggregateOperator;
+import org.openrdf.query.algebra.Avg;
+import org.openrdf.query.algebra.Count;
+import org.openrdf.query.algebra.Max;
+import org.openrdf.query.algebra.Min;
+import org.openrdf.query.algebra.Sum;
+
+import com.google.common.collect.ImmutableMap;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
+import net.jcip.annotations.Immutable;
+
+/**
+ * Metadata that is relevant to Aggregate nodes.
+ */
+@Immutable
+@DefaultAnnotation(NonNull.class)
+public class AggregationMetadata extends CommonNodeMetadata {
+
+    /**
+     * The different types of Aggregation functions that an aggregate node may perform.
+     */
+    public static enum AggregationType {
+        MIN(Min.class),
+        MAX(Max.class),
+        COUNT(Count.class),
+        SUM(Sum.class),
+        AVERAGE(Avg.class);
+
+        private final Class<? extends AggregateOperator> operatorClass;
+
+        private AggregationType(final Class<? extends AggregateOperator> operatorClass) {
+            this.operatorClass = requireNonNull(operatorClass);
+        }
+
+        private static final ImmutableMap<Class<? extends AggregateOperator>, AggregationType> byOperatorClass;
+        static {
+            final ImmutableMap.Builder<Class<? extends AggregateOperator>, AggregationType> builder = ImmutableMap.builder();
+            for(final AggregationType type : AggregationType.values()) {
+                builder.put(type.operatorClass, type);
+            }
+            byOperatorClass = builder.build();
+        }
+
+        public static Optional<AggregationType> byOperatorClass(final Class<? extends AggregateOperator> operatorClass) {
+            return Optional.ofNullable( byOperatorClass.get(operatorClass) );
+        }
+    }
+
+    /**
+     * Represents all of the metadata require to perform an Aggregation that is part of a SPARQL query.
+     * </p>
+     * For example, if you have the following in SPARQL:
+     * <pre>
+     * SELECT (avg(?price) as ?avgPrice) {
+     *     ...
+     * }
+     * </pre>
+     * You would construct an instance of this object like so:
+     * <pre>
+     * new AggregationElement(AggregationType.AVERAGE, "price", "avgPrice");
+     * </pre>
+     */
+    @Immutable
+    @DefaultAnnotation(NonNull.class)
+    public static final class AggregationElement implements Serializable {
+        private static final long serialVersionUID = 1L;
+
+        private final AggregationType aggregationType;
+        private final String aggregatedBindingName;
+        private final String resultBindingName;
+
+        /**
+         * Constructs an instance of {@link AggregationElement}.
+         *
+         * @param aggregationType - Defines how the binding values will be aggregated. (not null)
+         * @param aggregatedBindingName - The name of the binding whose values is aggregated. This binding must
+         *   appear within the child node's emitted binding sets. (not null)
+         * @param resultBindingName - The name of the binding this aggregation's results are written to. This binding
+         *   must appeared within the AggregationMetadata's variable order. (not null)
+         */
+        public AggregationElement(
+                final AggregationType aggregationType,
+                final String aggregatedBindingName,
+                final String resultBindingName) {
+            this.aggregationType = requireNonNull(aggregationType);
+            this.aggregatedBindingName = requireNonNull(aggregatedBindingName);
+            this.resultBindingName = requireNonNull(resultBindingName);
+        }
+
+        /**
+         * @return Defines how the binding values will be aggregated.
+         */
+        public AggregationType getAggregationType() {
+            return aggregationType;
+        }
+
+        /**
+         * @return The name of the binding whose values is aggregated. This binding must appear within the child node's emitted binding sets.
+         */
+        public String getAggregatedBindingName() {
+            return aggregatedBindingName;
+        }
+
+        /**
+         * @return The name of the binding this aggregation's results are written to. This binding must appeared within the AggregationMetadata's variable order.
+         */
+        public String getResultBindingName() {
+            return resultBindingName;
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(aggregationType, aggregatedBindingName, resultBindingName);
+        }
+
+        @Override
+        public boolean equals(final Object o ) {
+            if(o instanceof AggregationElement) {
+                final AggregationElement agg = (AggregationElement) o;
+                return Objects.equals(aggregationType, agg.aggregationType) &&
+                        Objects.equals(aggregatedBindingName, agg.aggregatedBindingName) &&
+                        Objects.equals(resultBindingName, agg.resultBindingName);
+            }
+            return false;
+        }
+    }
+
+    private final String parentNodeId;
+    private final String childNodeId;
+    private final Collection<AggregationElement> aggregations;
+    private final VariableOrder groupByVariables;
+
+    /**
+     * Constructs an instance of {@link AggregationMetadata}.
+     *
+     * @param nodeId - The ID the Fluo app uses to reference this node. (not null)
+     * @param varOrder - The variable order of binding sets that are emitted by this node. This may only contain a
+     *   single variable because aggregations are only able to emit the aggregated value. (not null)
+     * @param parentNodeId - The Node ID of this node's parent. This is the node that will consume the results of the aggregations. (not null)
+     * @param childNodeId - The Node ID of this node's child. This is the node that will feed binding sets into the aggregations. (not null)
+     * @param aggregations - The aggregations that will be performed over the BindingSets that are emitted from the child node. (not null)
+     * @param groupByVariables - Defines how the data is grouped for the aggregation function. (not null, may be empty if no grouping is required)
+     */
+    public AggregationMetadata(
+            final String nodeId,
+            final VariableOrder varOrder,
+            final String parentNodeId,
+            final String childNodeId,
+            final Collection<AggregationElement> aggregations,
+            final VariableOrder groupByVariables) {
+        super(nodeId, varOrder);
+        this.parentNodeId = requireNonNull(parentNodeId);
+        this.childNodeId = requireNonNull(childNodeId);
+        this.aggregations = requireNonNull(aggregations);
+        this.groupByVariables = requireNonNull(groupByVariables);
+    }
+
+    /**
+     * @return The Node ID of this node's parent. This is the node that will consume the results of the aggregations.
+     */
+    public String getParentNodeId() {
+        return parentNodeId;
+    }
+
+    /**
+     * @return The Node ID of this node's child. This is the node that will feed binding sets into the aggregations.
+     */
+    public String getChildNodeId() {
+        return childNodeId;
+    }
+
+    /**
+     * @return The aggregations that will be performed over the BindingSets that are emitted from the child node.
+     */
+    public Collection<AggregationElement> getAggregations() {
+        return aggregations;
+    }
+
+    /**
+     * @return Defines how the data is grouped for the aggregation function.
+     */
+    public VariableOrder getGroupByVariableOrder() {
+        return groupByVariables;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(
+                super.getNodeId(),
+                super.getVariableOrder(),
+                parentNodeId,
+                childNodeId,
+                aggregations,
+                groupByVariables);
+    }
+
+    @Override
+    public boolean equals(final Object o) {
+        if(o instanceof AggregationMetadata) {
+            final AggregationMetadata metadata = (AggregationMetadata) o;
+            return Objects.equals(getNodeId(), metadata.getNodeId()) &&
+                    Objects.equals(super.getVariableOrder(), metadata.getVariableOrder()) &&
+                    Objects.equals(parentNodeId, metadata.parentNodeId) &&
+                    Objects.equals(childNodeId, metadata.childNodeId) &&
+                    Objects.equals(aggregations, metadata.aggregations) &&
+                    Objects.equals(groupByVariables, metadata.groupByVariables);
+        }
+        return false;
+    }
+
+    @Override
+    public String toString() {
+        final StringBuilder string = new StringBuilder()
+                .append("AggregationMetadata {\n")
+                .append("    Node ID: " + super.getNodeId() + "\n")
+                .append("    Variable Order: " + super.getVariableOrder() + "\n")
+                .append("    Parent Node ID: " + parentNodeId + "\n")
+                .append("    Child Node ID: " + childNodeId + "\n");
+
+        // Only print the group by names if they're preesnt.
+        if(!groupByVariables.getVariableOrders().isEmpty()) {
+            string.append("    GroupBy Variable Order: " + groupByVariables + "\n");
+        }
+
+        // Print each of the AggregationElements.
+        string.append("    Aggregations: {\n");
+        final Iterator<AggregationElement> it = aggregations.iterator();
+        while(it.hasNext()) {
+            final AggregationElement agg = it.next();
+            string.append("        Type: " + agg.getAggregationType() + "\n");
+            string.append("        Aggregated Binding Name: " + agg.getAggregatedBindingName() + "\n");
+            string.append("        Result Binding Name: " + agg.getResultBindingName() + "\n");
+
+            if(it.hasNext()) {
+                string.append("\n");
+            }
+        }
+        string.append("    }\n");
+        string.append("}");
+
+        return string.toString();
+    }
+
+    /**
+     * @param nodeId - The ID the Fluo app uses to reference this node. (not null)
+     * @return A new {@link Builder} initialized with the provided nodeId.
+     */
+    public static Builder builder(final String nodeId) {
+        return new Builder(nodeId);
+    }
+
+    /**
+     * Builds instances of {@link AggregationMetadata}.
+     */
+    @DefaultAnnotation(NonNull.class)
+    public static final class Builder {
+
+        private final String nodeId;
+        private VariableOrder varOrder;
+        private String parentNodeId;
+        private String childNodeId;
+        private final List<AggregationElement> aggregations = new ArrayList<>();
+        private VariableOrder groupByVariables = new VariableOrder();
+
+        /**
+         * Constructs an instance of {@link Builder}.
+         *
+         * @param nodeId - This node's Node ID. (not null)
+         */
+        public Builder(final String nodeId) {
+            this.nodeId = requireNonNull(nodeId);
+        }
+
+        /**
+         * @return This node's Node ID.
+         */
+        public String getNodeId() {
+            return nodeId;
+        }
+
+        /**
+         * @param varOrder - The variable order of binding sets that are emitted by this node. This may only contain a
+         *   single variable because aggregations are only able to emit the aggregated value.
+         * @return This builder so that method invocations may be chained.
+         */
+        public Builder setVariableOrder(@Nullable final VariableOrder varOrder) {
+            this.varOrder = varOrder;
+            return this;
+        }
+
+        /**
+         * @param parentNodeId - The Node ID of this node's parent.
+         * @return This builder so that method invocations may be chained.
+         */
+        public Builder setParentNodeId(@Nullable final String parentNodeId) {
+            this.parentNodeId = parentNodeId;
+            return this;
+        }
+
+        /**
+         * @param childNodeId - The Node ID of this node's child.
+         * @return This builder so that method invocations may be chained.
+         */
+        public Builder setChildNodeId(@Nullable final String childNodeId) {
+            this.childNodeId = childNodeId;
+            return this;
+        }
+
+        /**
+         * @param aggregation - An aggregation that will be performed over the BindingSets that are emitted from the child node.
+         * @return This builder so that method invocations may be chained.
+         */
+        public Builder addAggregation(@Nullable final AggregationElement aggregation) {
+            if(aggregation != null) {
+                this.aggregations.add(aggregation);
+            }
+            return this;
+        }
+
+        /**
+         * @param groupByBindingNames - Defines how the data is grouped for the aggregation function. (not null, may be
+         *   empty if no grouping is required)
+         * @return This builder so that method invocations may be chained.
+         */
+        public Builder setGroupByVariableOrder(@Nullable final VariableOrder groupByVariables) {
+            this.groupByVariables = groupByVariables;
+            return this;
+        }
+
+        /**
+         * @return An instance of {@link AggregationMetadata} build using this builder's values.
+         */
+        public AggregationMetadata build() {
+            return new AggregationMetadata(nodeId, varOrder, parentNodeId, childNodeId, aggregations, groupByVariables);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java
index 263db7e..3230a5d 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java
@@ -18,24 +18,24 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app.query;
 
-import static com.google.common.base.Preconditions.checkNotNull;
+import static java.util.Objects.requireNonNull;
 
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
 
-import edu.umd.cs.findbugs.annotations.Nullable;
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-import net.jcip.annotations.Immutable;
-
 import org.apache.commons.lang3.builder.EqualsBuilder;
 
 import com.google.common.base.Objects;
 import com.google.common.base.Optional;
 import com.google.common.collect.ImmutableMap;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
+import net.jcip.annotations.Immutable;
+
 /**
  * Metadata for every node of a query that is being updated by the Fluo application.
  */
@@ -47,6 +47,7 @@ public class FluoQuery {
     private final ImmutableMap<String, StatementPatternMetadata> statementPatternMetadata;
     private final ImmutableMap<String, FilterMetadata> filterMetadata;
     private final ImmutableMap<String, JoinMetadata> joinMetadata;
+    private final ImmutableMap<String, AggregationMetadata> aggregationMetadata;
 
     /**
      * Constructs an instance of {@link FluoQuery}. Private because applications
@@ -55,20 +56,24 @@ public class FluoQuery {
      * @param queryMetadata - The root node of a query that is updated in Fluo. (not null)
      * @param statementPatternMetadata - A map from Node ID to Statement Pattern metadata as
      *   it is represented within the Fluo app. (not null)
-     * @param filterMetadata A map from Node ID to Filter metadata as it is represented
+     * @param filterMetadata - A map from Node ID to Filter metadata as it is represented
      *   within the Fluo app. (not null)
-     * @param joinMetadata A map from Node ID to Join metadata as it is represented
+     * @param joinMetadata - A map from Node ID to Join metadata as it is represented
      *   within the Fluo app. (not null)
+     * @param aggregationMetadata - A map from Node ID to Aggregation metadata as it is
+     *   represented within the Fluo app. (not null)
      */
     private FluoQuery(
             final QueryMetadata queryMetadata,
             final ImmutableMap<String, StatementPatternMetadata> statementPatternMetadata,
             final ImmutableMap<String, FilterMetadata> filterMetadata,
-            final ImmutableMap<String, JoinMetadata> joinMetadata) {
-        this.queryMetadata = checkNotNull(queryMetadata);
-        this.statementPatternMetadata = checkNotNull(statementPatternMetadata);
-        this.filterMetadata = checkNotNull(filterMetadata);
-        this.joinMetadata = checkNotNull(joinMetadata);
+            final ImmutableMap<String, JoinMetadata> joinMetadata,
+            final ImmutableMap<String, AggregationMetadata> aggregationMetadata) {
+        this.queryMetadata = requireNonNull(queryMetadata);
+        this.statementPatternMetadata = requireNonNull(statementPatternMetadata);
+        this.filterMetadata = requireNonNull(filterMetadata);
+        this.joinMetadata = requireNonNull(joinMetadata);
+        this.aggregationMetadata = requireNonNull(aggregationMetadata);
     }
 
     /**
@@ -85,7 +90,7 @@ public class FluoQuery {
      * @return The StatementPattern metadata if it could be found; otherwise absent.
      */
     public Optional<StatementPatternMetadata> getStatementPatternMetadata(final String nodeId) {
-        checkNotNull(nodeId);
+        requireNonNull(nodeId);
         return Optional.fromNullable( statementPatternMetadata.get(nodeId) );
     }
 
@@ -103,7 +108,7 @@ public class FluoQuery {
      * @return The Filter metadata if it could be found; otherwise absent.
      */
     public Optional<FilterMetadata> getFilterMetadata(final String nodeId) {
-        checkNotNull(nodeId);
+        requireNonNull(nodeId);
         return Optional.fromNullable( filterMetadata.get(nodeId) );
     }
 
@@ -121,7 +126,7 @@ public class FluoQuery {
      * @return The Join metadata if it could be found; otherwise absent.
      */
     public Optional<JoinMetadata> getJoinMetadata(final String nodeId) {
-        checkNotNull(nodeId);
+        requireNonNull(nodeId);
         return Optional.fromNullable( joinMetadata.get(nodeId) );
     }
 
@@ -132,13 +137,32 @@ public class FluoQuery {
         return joinMetadata.values();
     }
 
+    /**
+     * Get an Aggregation node's metadata.
+     *
+     * @param nodeId - The node ID of the Aggregation metadata you want. (not null)
+     * @return The Aggregation metadata if it could be found; otherwise absent.
+     */
+    public Optional<AggregationMetadata> getAggregationMetadata(final String nodeId) {
+        requireNonNull(nodeId);
+        return Optional.fromNullable( aggregationMetadata.get(nodeId) );
+    }
+
+    /**
+     * @return All of the Aggregation metadata that is stored for the query.
+     */
+    public Collection<AggregationMetadata> getAggregationMetadata() {
+        return aggregationMetadata.values();
+    }
+
     @Override
     public int hashCode() {
         return Objects.hashCode(
                 queryMetadata,
                 statementPatternMetadata,
                 filterMetadata,
-                joinMetadata);
+                joinMetadata,
+                aggregationMetadata);
     }
 
     @Override
@@ -154,6 +178,7 @@ public class FluoQuery {
                     .append(statementPatternMetadata, fluoQuery.statementPatternMetadata)
                     .append(filterMetadata, fluoQuery.filterMetadata)
                     .append(joinMetadata, fluoQuery.joinMetadata)
+                    .append(aggregationMetadata, fluoQuery.aggregationMetadata)
                     .isEquals();
         }
 
@@ -184,6 +209,11 @@ public class FluoQuery {
             builder.append("\n");
         }
 
+        for(final AggregationMetadata metadata : aggregationMetadata.values()) {
+            builder.append(metadata.toString());
+            builder.append("\n");
+        }
+
         return builder.toString();
     }
 
@@ -204,6 +234,7 @@ public class FluoQuery {
         private final Map<String, StatementPatternMetadata.Builder> spBuilders = new HashMap<>();
         private final Map<String, FilterMetadata.Builder> filterBuilders = new HashMap<>();
         private final Map<String, JoinMetadata.Builder> joinBuilders = new HashMap<>();
+        private final Map<String, AggregationMetadata.Builder> aggregationBuilders = new HashMap<>();
 
         /**
          * Sets the {@link QueryMetadata.Builder} that is used by this builder.
@@ -230,7 +261,7 @@ public class FluoQuery {
          * @return This builder so that method invocation may be chained.
          */
         public Builder addStatementPatternBuilder(final StatementPatternMetadata.Builder spBuilder) {
-            checkNotNull(spBuilder);
+            requireNonNull(spBuilder);
             spBuilders.put(spBuilder.getNodeId(), spBuilder);
             return this;
         }
@@ -242,7 +273,7 @@ public class FluoQuery {
          * @return The builder that was stored at the node id if one was found.
          */
         public Optional<StatementPatternMetadata.Builder> getStatementPatternBuilder(final String nodeId) {
-            checkNotNull(nodeId);
+            requireNonNull(nodeId);
             return Optional.fromNullable( spBuilders.get(nodeId) );
         }
 
@@ -252,8 +283,8 @@ public class FluoQuery {
          * @param filterBuilder - A builder representing a specific Filter within the query. (not null)
          * @return This builder so that method invocation may be chained.
          */
-        public Builder addFilterMetadata(@Nullable final FilterMetadata.Builder filterBuilder) {
-            checkNotNull(filterBuilder);
+        public Builder addFilterMetadata(final FilterMetadata.Builder filterBuilder) {
+            requireNonNull(filterBuilder);
             this.filterBuilders.put(filterBuilder.getNodeId(), filterBuilder);
             return this;
         }
@@ -265,7 +296,7 @@ public class FluoQuery {
          * @return The builder that was stored at the node id if one was found.
          */
         public Optional<FilterMetadata.Builder> getFilterBuilder(final String nodeId) {
-            checkNotNull(nodeId);
+            requireNonNull(nodeId);
             return Optional.fromNullable( filterBuilders.get(nodeId) );
         }
 
@@ -275,20 +306,43 @@ public class FluoQuery {
          * @param joinBuilder - A builder representing a specific Join within the query. (not null)
          * @return This builder so that method invocation may be chained.
          */
-        public Builder addJoinMetadata(@Nullable final JoinMetadata.Builder joinBuilder) {
-            checkNotNull(joinBuilder);
+        public Builder addJoinMetadata(final JoinMetadata.Builder joinBuilder) {
+            requireNonNull(joinBuilder);
             this.joinBuilders.put(joinBuilder.getNodeId(), joinBuilder);
             return this;
         }
 
         /**
+         * Get an Aggregate builder from this builder.
+         *
+         * @param nodeId - The Node ID the Aggregate builder was stored at. (not null)
+         * @return The builder that was stored at the node id if one was found.
+         */
+        public Optional<AggregationMetadata.Builder> getAggregateBuilder(final String nodeId) {
+            requireNonNull(nodeId);
+            return Optional.fromNullable( aggregationBuilders.get(nodeId) );
+        }
+
+        /**
+         * Adds a new {@link AggregationMetadata.Builder} to this builder.
+         *
+         * @param aggregationBuilder - A builder representing a specific Aggregation within the Query. (not null)
+         * @return This builder so that method invocation may be chained.
+         */
+        public Builder addAggregateMetadata(@Nullable final AggregationMetadata.Builder aggregationBuilder) {
+            requireNonNull(aggregationBuilder);
+            this.aggregationBuilders.put(aggregationBuilder.getNodeId(), aggregationBuilder);
+            return this;
+        }
+
+        /**
          * Get a Join builder from this builder.
          *
          * @param nodeId - The Node ID the Join builder was stored at. (not null)
          * @return The builder that was stored at the node id if one was found.
          */
         public Optional<JoinMetadata.Builder> getJoinBuilder(final String nodeId) {
-            checkNotNull(nodeId);
+            requireNonNull(nodeId);
             return Optional.fromNullable( joinBuilders.get(nodeId) );
         }
 
@@ -313,7 +367,12 @@ public class FluoQuery {
                 joinMetadata.put(entry.getKey(), entry.getValue().build());
             }
 
-            return new FluoQuery(queryMetadata, spMetadata.build(), filterMetadata.build(), joinMetadata.build());
+            final ImmutableMap.Builder<String, AggregationMetadata> aggregateMetadata = ImmutableMap.builder();
+            for(final Entry<String, AggregationMetadata.Builder> entry : aggregationBuilders.entrySet()) {
+                aggregateMetadata.put(entry.getKey(), entry.getValue().build());
+            }
+
+            return new FluoQuery(queryMetadata, spMetadata.build(), filterMetadata.build(), joinMetadata.build(), aggregateMetadata.build());
         }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java
index f12c6ab..77d6a49 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java
@@ -23,11 +23,13 @@ import static java.util.Objects.requireNonNull;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.fluo.api.data.Column;
+import org.apache.rya.indexing.pcj.fluo.app.AggregationResultUpdater.AggregationState;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+
 import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
 import edu.umd.cs.findbugs.annotations.NonNull;
 
-import org.apache.fluo.api.data.Column;
-
 /**
  * Holds {@link Column} objects that represent where each piece of metadata is stored.
  * <p>
@@ -40,7 +42,7 @@ import org.apache.fluo.api.data.Column;
  *     <tr> <td>Node ID</td> <td>queryMetadata:variableOrder</td> <td>The Variable Order binding sets are emitted with.</td> </tr>
  *     <tr> <td>Node ID</td> <td>queryMetadata:sparql</td> <td>The original SPARQL query that is being computed by this query.</td> </tr>
  *     <tr> <td>Node ID</td> <td>queryMetadata:childNodeId</td> <td>The Node ID of the child who feeds this node.</td> </tr>
- *     <tr> <td>Node ID + DELIM + Binding Set String</td> <td>queryMetadata:bindingSet</td> <td>A Binding Set that matches the query.</td> </tr>
+ *     <tr> <td>Node ID + DELIM + Binding Set String</td> <td>queryMetadata:bindingSet</td> <td>A {@link VisibilityBindingSet} object.</td> </tr>
  *   </table>
  * </p>
  * <p>
@@ -53,7 +55,7 @@ import org.apache.fluo.api.data.Column;
  *     <tr> <td>Node ID</td> <td>filterMetadata:filterIndexWithinSparql</td> <td>Indicates which filter within the original SPARQL query this represents.</td> </tr>
  *     <tr> <td>Node ID</td> <td>filterMetadata:parentNodeId</td> <td>The Node ID this filter emits Binding Sets to.</td> </tr>
  *     <tr> <td>Node ID</td> <td>filterMetadata:childNodeId</td> <td>The Node ID of the node that feeds this node Binding Sets.</td> </tr>
- *     <tr> <td>Node ID + DELIM + Binding set String</td> <td>filterMetadata:bindingSet</td> <td>A Binding Set that matches the Filter.</td> </tr>
+ *     <tr> <td>Node ID + DELIM + Binding Set String</td> <td>filterMetadata:bindingSet</td> <td>A {@link VisibilityBindingSet} object.</td> </tr>
  *   </table>
  * </p>
  * <p>
@@ -66,7 +68,7 @@ import org.apache.fluo.api.data.Column;
  *     <tr> <td>Node ID</td> <td>joinMetadata:parentNodeId</td> <td>The Node ID this join emits Binding Sets to.</td> </tr>
  *     <tr> <td>Node ID</td> <td>joinMetadata:leftChildNodeId</td> <td>A Node ID of the node that feeds this node Binding Sets.</td> </tr>
  *     <tr> <td>Node ID</td> <td>joinMetadata:rightChildNodeId</td> <td>A Node ID of the node that feeds this node Binding Sets.</td> </tr>
- *     <tr> <td>Node ID + DELIM + Binding set String</td> <td>joinMetadata:bindingSet</td> <td>A Binding Set that matches the Join.</td> </tr>
+ *     <tr> <td>Node ID + DELIM + Binding Set String</td> <td>joinMetadata:bindingSet</td> <td>A {@link VisibilityBindingSet} object.</td> </tr>
  *   </table>
  * </p>
  * <p>
@@ -77,9 +79,22 @@ import org.apache.fluo.api.data.Column;
  *     <tr> <td>Node ID</td> <td>statementPatternMetadata:variableOrder</td> <td>The Variable Order binding sets are emitted with.</td> </tr>
  *     <tr> <td>Node ID</td> <td>statementPatternMetadata:pattern</td> <td>The pattern that defines which Statements will be matched.</td> </tr>
  *     <tr> <td>Node ID</td> <td>statementPatternMetadata:parentNodeId</td> <td>The Node ID this statement pattern emits Binding Sets to.</td> </tr>
- *     <tr> <td>Node ID + DELIM + Binding set String</td> <td>statementPatternMetadata:bindingSet</td> <td>A Binding Set that matches the Statement Pattern.</td> </tr>
+ *     <tr> <td>Node ID + DELIM + Binding Set String</td> <td>statementPatternMetadata:bindingSet</td> <td>A {@link VisibilityBindingSet} object.</td> </tr>
  *   </table>
+ * </p>
  * <p>
+ *   <b>Aggregation Metadata</b>
+ *   <table border="1" style="width:100%">
+ *     <tr> <th>Fluo Row</td> <th>Fluo Column</td> <th>Fluo Value</td> </tr>
+ *     <tr> <td>Node ID</td> <td>aggregationMetadata:nodeId</td> <td>The Node ID of the Statement Pattern.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>aggregationMetadata:variableOrder</td> <td>The Variable Order binding sets are emitted with.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>aggregationMetadata:parentNodeId</td> <td>The Node ID this Aggregation emits its result Binding Set to.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>aggregationMetadata:childNodeId</td> <td>The Node ID of the node that feeds this node Binding Sets.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>aggregationMetadata:groupByBindingNames</td> <td>An ordered list of the binding names the aggregation's results will be grouped by.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>aggregationMetadata:aggregations</td> <td>A serialized form of the aggregations that need to be performed by this aggregation node.</td> </tr>
+ *     <tr> <td>Node ID + DELIM + Group By Values Binding Set String</td> <td>aggregationMetadata:bindingSet</td><td>An {@link AggregationState} object.</td> </tr>
+ *   </table>
+ * </p>
  */
 public class FluoQueryColumns {
 
@@ -88,6 +103,7 @@ public class FluoQueryColumns {
     public static final String FILTER_METADATA_CF = "filterMetadata";
     public static final String JOIN_METADATA_CF = "joinMetadata";
     public static final String STATEMENT_PATTERN_METADATA_CF = "statementPatternMetadata";
+    public static final String AGGREGATION_METADATA_CF = "aggregationMetadata";
 
     /**
      * New triples that have been added to Rya are written as a row in this
@@ -96,7 +112,7 @@ public class FluoQueryColumns {
      * <p>
      *   <table border="1" style="width:100%">
      *     <tr> <th>Fluo Row</td> <th>Fluo Column</td> <th>Fluo Value</td> </tr>
-     *     <tr> <td>Core Rya SPO formatted triple</td> <td>triples:SPO</td> <td>visibility</td> </tr>
+     *     <tr> <td>Core Rya SPO formatted triple</td> <td>triples:SPO</td> <td>The visibility label for the triple.</td> </tr>
      *   </table>
      * </p>
      */
@@ -108,7 +124,7 @@ public class FluoQueryColumns {
      * <p>
      *   <table border="1" style="width:100%">
      *     <tr> <th>Fluo Row</td> <th>Fluo Column</td> <th>Fluo Value</td> </tr>
-     *     <tr> <td>Query ID</td> <td>query:ryaPcjId</td> <td>Identifies which PCJ the reuslts of this query will be exported to.</td> </tr>
+     *     <tr> <td>Query ID</td> <td>query:ryaPcjId</td> <td>Identifies which PCJ the results of this query will be exported to.</td> </tr>
      *   </table>
      * </p>
      */
@@ -160,6 +176,15 @@ public class FluoQueryColumns {
     public static final Column STATEMENT_PATTERN_PARENT_NODE_ID = new Column(STATEMENT_PATTERN_METADATA_CF, "parentNodeId");
     public static final Column STATEMENT_PATTERN_BINDING_SET = new Column(STATEMENT_PATTERN_METADATA_CF, "bindingSet");
 
+    // Aggregation Metadata columns.
+    public static final Column AGGREGATION_NODE_ID = new Column(AGGREGATION_METADATA_CF, "nodeId");
+    public static final Column AGGREGATION_VARIABLE_ORDER = new Column(AGGREGATION_METADATA_CF, "variableOrder");
+    public static final Column AGGREGATION_PARENT_NODE_ID = new Column(AGGREGATION_METADATA_CF, "parentNodeId");
+    public static final Column AGGREGATION_CHILD_NODE_ID = new Column(AGGREGATION_METADATA_CF, "childNodeId");
+    public static final Column AGGREGATION_GROUP_BY_BINDING_NAMES = new Column(AGGREGATION_METADATA_CF, "groupByBindingNames");
+    public static final Column AGGREGATION_AGGREGATIONS = new Column(AGGREGATION_METADATA_CF, "aggregations");
+    public static final Column AGGREGATION_BINDING_SET = new Column(AGGREGATION_METADATA_CF, "bindingSet");
+
     /**
      * Enumerates the {@link Column}s that hold all of the fields for each type
      * of node that can compose a query.
@@ -204,16 +229,27 @@ public class FluoQueryColumns {
                 Arrays.asList(STATEMENT_PATTERN_NODE_ID,
                         STATEMENT_PATTERN_VARIABLE_ORDER,
                         STATEMENT_PATTERN_PATTERN,
-                        STATEMENT_PATTERN_PARENT_NODE_ID));
+                        STATEMENT_PATTERN_PARENT_NODE_ID)),
+
+        /**
+         * The columns an {@link AggregationMetadata} object's fields are stored within.
+         */
+        AGGREGATION_COLUMNS(
+                Arrays.asList(AGGREGATION_NODE_ID,
+                        AGGREGATION_VARIABLE_ORDER,
+                        AGGREGATION_PARENT_NODE_ID,
+                        AGGREGATION_CHILD_NODE_ID,
+                        AGGREGATION_GROUP_BY_BINDING_NAMES,
+                        AGGREGATION_AGGREGATIONS));
 
-        private List<Column> columns;
+        private final List<Column> columns;
 
         /**
          * Constructs an instance of {@link QueryNodeMetadataColumns}.
          *
          * @param columns - The {@link Column}s associated with this node's metadata. (not null)
          */
-        private QueryNodeMetadataColumns(List<Column> columns) {
+        private QueryNodeMetadataColumns(final List<Column> columns) {
             this.columns = requireNonNull(columns);
         }
 
@@ -224,4 +260,4 @@ public class FluoQueryColumns {
             return columns;
         }
     }
-}
+}
\ No newline at end of file


[7/9] incubator-rya git commit: RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/AggregationResultUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/AggregationResultUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/AggregationResultUpdater.java
new file mode 100644
index 0000000..2e41cb1
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/AggregationResultUpdater.java
@@ -0,0 +1,572 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.log4j.Logger;
+import org.apache.rya.accumulo.utils.VisibilitySimplifier;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationElement;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationType;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.openrdf.model.Literal;
+import org.openrdf.model.Value;
+import org.openrdf.model.datatypes.XMLDatatypeUtil;
+import org.openrdf.model.impl.DecimalLiteralImpl;
+import org.openrdf.model.impl.IntegerLiteralImpl;
+import org.openrdf.query.algebra.MathExpr.MathOp;
+import org.openrdf.query.algebra.evaluation.ValueExprEvaluationException;
+import org.openrdf.query.algebra.evaluation.util.MathUtil;
+import org.openrdf.query.algebra.evaluation.util.ValueComparator;
+import org.openrdf.query.impl.MapBindingSet;
+
+import com.google.common.collect.ImmutableMap;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * Updates the results of an Aggregate node when its child has added a new Binding Set to its results.
+ */
+@DefaultAnnotation(NonNull.class)
+public class AggregationResultUpdater {
+    private static final Logger log = Logger.getLogger(AggregationResultUpdater.class);
+
+    private static final AggregationStateSerDe AGG_STATE_SERDE = new ObjectSerializationAggregationStateSerDe();
+
+    private static final ImmutableMap<AggregationType, AggregationFunction> FUNCTIONS;
+    static {
+        final ImmutableMap.Builder<AggregationType, AggregationFunction> builder = ImmutableMap.builder();
+        builder.put(AggregationType.COUNT, new CountFunction());
+        builder.put(AggregationType.SUM, new SumFunction());
+        builder.put(AggregationType.AVERAGE, new AverageFunction());
+        builder.put(AggregationType.MIN, new MinFunction());
+        builder.put(AggregationType.MAX, new MaxFunction());
+        FUNCTIONS = builder.build();
+    }
+
+    /**
+     * Updates the results of an Aggregation node where its child has emitted a new Binding Set.
+     *
+     * @param tx - The transaction all Fluo queries will use. (not null)
+     * @param childBindingSet - The Binding Set that was omitted by the Aggregation Node's child. (not null)
+     * @param aggregationMetadata - The metadata of the Aggregation node whose results will be updated. (not null)
+     * @throws Exception The update could not be successfully performed.
+     */
+    public void updateAggregateResults(
+            final TransactionBase tx,
+            final VisibilityBindingSet childBindingSet,
+            final AggregationMetadata aggregationMetadata) throws Exception {
+        requireNonNull(tx);
+        requireNonNull(childBindingSet);
+        requireNonNull(aggregationMetadata);
+
+        log.trace(
+                "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                "Child Binding Set:\n" + childBindingSet + "\n");
+
+        // The Row ID for the Aggregation State that needs to be updated is defined by the Group By variables.
+        final String aggregationNodeId = aggregationMetadata.getNodeId();
+        final VariableOrder groupByVars = aggregationMetadata.getGroupByVariableOrder();
+        final Bytes rowId = RowKeyUtil.makeRowKey(aggregationNodeId, groupByVars, childBindingSet);
+
+        // Load the old state from the bytes if one was found; otherwise initialize the state.
+        final Optional<Bytes> stateBytes = Optional.ofNullable( tx.get(rowId, FluoQueryColumns.AGGREGATION_BINDING_SET) );
+
+        final AggregationState state;
+        if(stateBytes.isPresent()) {
+            // Deserialize the old state
+            final byte[] bytes = stateBytes.get().toArray();
+            state = AGG_STATE_SERDE.deserialize(bytes);
+        } else {
+            // Initialize a new state.
+            state = new AggregationState();
+
+            // If we have group by bindings, their values need to be added to the state's binding set.
+            final MapBindingSet bindingSet = state.getBindingSet();
+            for(final String variable : aggregationMetadata.getGroupByVariableOrder()) {
+                bindingSet.addBinding( childBindingSet.getBinding(variable) );
+            }
+        }
+
+        log.trace(
+                "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                "Before Update: " + state.getBindingSet().toString() + "\n");
+
+        // Update the visibilities of the result binding set based on the child's visibilities.
+        final String oldVisibility = state.getVisibility();
+        final String updateVisibilities = VisibilitySimplifier.unionAndSimplify(oldVisibility, childBindingSet.getVisibility());
+        state.setVisibility(updateVisibilities);
+
+        // Update the Aggregation State with each Aggregation function included within this group.
+        for(final AggregationElement aggregation : aggregationMetadata.getAggregations()) {
+            final AggregationType type = aggregation.getAggregationType();
+            final AggregationFunction function = FUNCTIONS.get(type);
+            if(function == null) {
+                throw new RuntimeException("Unrecognized aggregation function: " + type);
+            }
+
+            function.update(aggregation, state, childBindingSet);
+        }
+
+        log.trace(
+                "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                "After Update:" + state.getBindingSet().toString() + "\n" );
+
+        // Store the updated state. This will write on top of any old state that was present for the Group By values.
+        tx.set(rowId, FluoQueryColumns.AGGREGATION_BINDING_SET, Bytes.of(AGG_STATE_SERDE.serialize(state)));
+    }
+
+    /**
+     * A function that updates an {@link AggregationState}.
+     */
+    public static interface AggregationFunction {
+
+        /**
+         * Updates an {@link AggregationState} based on the values of a child Binding Set.
+         *
+         * @param aggregation - Defines which function needs to be performed as well as any details required
+         *   to do the aggregation work. (not null)
+         * @param state - The state that will be updated. (not null)
+         * @param childBindingSet - The Binding Set whose values will be used to update the state.
+         */
+        public void update(AggregationElement aggregation, AggregationState state, VisibilityBindingSet childBindingSet);
+    }
+
+    /**
+     * Increments the {@link AggregationState}'s count if the child Binding Set contains the binding name
+     * that is being counted by the {@link AggregationElement}.
+     */
+    public static final class CountFunction implements AggregationFunction {
+        @Override
+        public void update(final AggregationElement aggregation, final AggregationState state, final VisibilityBindingSet childBindingSet) {
+            checkArgument(aggregation.getAggregationType() == AggregationType.COUNT, "The CountFunction only accepts COUNT AggregationElements.");
+
+            // Only add one to the count if the child contains the binding that we are counting.
+            final String aggregatedName = aggregation.getAggregatedBindingName();
+            if(childBindingSet.hasBinding(aggregatedName)) {
+                final MapBindingSet result = state.getBindingSet();
+                final String resultName = aggregation.getResultBindingName();
+                final boolean newBinding = !result.hasBinding(resultName);
+
+                if(newBinding) {
+                    // Initialize the binding.
+                    result.addBinding(resultName, new IntegerLiteralImpl(BigInteger.ONE));
+                } else {
+                    // Update the existing binding.
+                    final Literal count = (Literal) result.getValue(resultName);
+                    final BigInteger updatedCount = count.integerValue().add( BigInteger.ONE );
+                    result.addBinding(resultName, new IntegerLiteralImpl(updatedCount));
+                }
+            }
+        }
+    }
+
+    /**
+     * Add to the {@link AggregationState}'s sum if the child Binding Set contains the binding name
+     * that is being summed by the {@link AggregationElement}.
+     */
+    public static final class SumFunction implements AggregationFunction {
+        @Override
+        public void update(final AggregationElement aggregation, final AggregationState state, final VisibilityBindingSet childBindingSet) {
+            checkArgument(aggregation.getAggregationType() == AggregationType.SUM, "The SumFunction only accepts SUM AggregationElements.");
+
+            // Only add values to the sum if the child contains the binding that we are summing.
+            final String aggregatedName = aggregation.getAggregatedBindingName();
+            if(childBindingSet.hasBinding(aggregatedName)) {
+                final MapBindingSet result = state.getBindingSet();
+                final String resultName = aggregation.getResultBindingName();
+                final boolean newBinding = !result.hasBinding(resultName);
+
+                // Get the starting number for the sum.
+                Literal sum;
+                if(newBinding) {
+                    sum = new IntegerLiteralImpl(BigInteger.ZERO);
+                } else {
+                    sum = (Literal) state.getBindingSet().getValue(resultName);
+                }
+
+                // Add the child binding set's value if it is a numeric literal.
+                final Value childValue = childBindingSet.getValue(aggregatedName);
+                if(childValue instanceof Literal) {
+                    final Literal childLiteral = (Literal) childValue;
+                    if (childLiteral.getDatatype() != null && XMLDatatypeUtil.isNumericDatatype(childLiteral.getDatatype())) {
+                        try {
+                            sum = MathUtil.compute(sum, childLiteral, MathOp.PLUS);
+                        } catch (final ValueExprEvaluationException e) {
+                            log.error("A problem was encountered while updating a Sum Aggregation. This binding set will be ignored: " + childBindingSet);
+                            return;
+                        }
+                    }
+                }
+
+                // Update the state to include the new sum.
+                result.addBinding(resultName, sum);
+            }
+        }
+    }
+
+    /**
+     * Update the {@link AggregationState}'s average if the child Binding Set contains the binding name
+     * that is being averaged by the {@link AggregationElement}.
+     */
+    public static final class AverageFunction implements AggregationFunction {
+        @Override
+        public void update(final AggregationElement aggregation, final AggregationState state, final VisibilityBindingSet childBindingSet) {
+            checkArgument(aggregation.getAggregationType() == AggregationType.AVERAGE, "The AverageFunction only accepts AVERAGE AggregationElements.");
+
+            // Only update the average if the child contains the binding that we are averaging.
+            final String aggregatedName = aggregation.getAggregatedBindingName();
+            if(childBindingSet.hasBinding(aggregatedName)) {
+                final MapBindingSet result = state.getBindingSet();
+                final String resultName = aggregation.getResultBindingName();
+                final boolean newBinding = !result.hasBinding(resultName);
+
+                // Get the state of the average.
+                final Map<String, AverageState> averageStates = state.getAverageStates();
+                AverageState averageState = newBinding ? new AverageState() : averageStates.get(resultName);
+
+                // Update the state of the average.
+                final Value childValue = childBindingSet.getValue(aggregatedName);
+                if(childValue instanceof Literal) {
+                    final Literal childLiteral = (Literal) childValue;
+                    if (childLiteral.getDatatype() != null && XMLDatatypeUtil.isNumericDatatype(childLiteral.getDatatype())) {
+                        try {
+                            // Update the sum.
+                            final Literal oldSum = new DecimalLiteralImpl(averageState.getSum());
+                            final BigDecimal sum = MathUtil.compute(oldSum, childLiteral, MathOp.PLUS).decimalValue();
+
+                            // Update the count.
+                            final BigInteger count = averageState.getCount().add( BigInteger.ONE );
+
+                            // Update the BindingSet to include the new average.
+                            final Literal sumLiteral = new DecimalLiteralImpl(sum);
+                            final Literal countLiteral = new IntegerLiteralImpl(count);
+                            final Literal average = MathUtil.compute(sumLiteral, countLiteral, MathOp.DIVIDE);
+                            result.addBinding(resultName, average);
+
+                            // Update the average state that is stored.
+                            averageState = new AverageState(sum, count);
+                            averageStates.put(resultName, averageState);
+                        } catch (final ValueExprEvaluationException e) {
+                            log.error("A problem was encountered while updating an Average Aggregation. This binding set will be ignored: " + childBindingSet);
+                            return;
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    /**
+     * Update the {@link AggregationState}'s max if the child binding Set contains the binding name that is being
+     * maxed by the {@link AggregationElement}.
+     */
+    public static final class MaxFunction implements AggregationFunction {
+
+        private final ValueComparator compare = new ValueComparator();
+
+        @Override
+        public void update(final AggregationElement aggregation, final AggregationState state, final VisibilityBindingSet childBindingSet) {
+            checkArgument(aggregation.getAggregationType() == AggregationType.MAX, "The MaxFunction only accepts MAX AggregationElements.");
+
+            // Only update the max if the child contains the binding that we are finding the max value for.
+            final String aggregatedName = aggregation.getAggregatedBindingName();
+            if(childBindingSet.hasBinding(aggregatedName)) {
+                final MapBindingSet result = state.getBindingSet();
+                final String resultName = aggregation.getResultBindingName();
+                final boolean newBinding = !result.hasBinding(resultName);
+
+                Value max;
+                if(newBinding) {
+                    max = childBindingSet.getValue(aggregatedName);
+                } else {
+                    final Value oldMax = result.getValue(resultName);
+                    final Value childMax = childBindingSet.getValue(aggregatedName);
+                    max = compare.compare(childMax, oldMax) > 0 ? childMax : oldMax;
+                }
+
+                result.addBinding(resultName, max);
+            }
+        }
+    }
+
+    /**
+     * Update the {@link AggregationState}'s min if the child binding Set contains the binding name that is being
+     * mined by the {@link AggregationElement}.
+     */
+    public static final class MinFunction implements AggregationFunction {
+
+        private final ValueComparator compare = new ValueComparator();
+
+        @Override
+        public void update(final AggregationElement aggregation, final AggregationState state, final VisibilityBindingSet childBindingSet) {
+            checkArgument(aggregation.getAggregationType() == AggregationType.MIN, "The MinFunction only accepts MIN AggregationElements.");
+
+            // Only update the min if the child contains the binding that we are finding the min value for.
+            final String aggregatedName = aggregation.getAggregatedBindingName();
+            if(childBindingSet.hasBinding(aggregatedName)) {
+                final MapBindingSet result = state.getBindingSet();
+                final String resultName = aggregation.getResultBindingName();
+                final boolean newBinding = !result.hasBinding(resultName);
+
+                Value min;
+                if(newBinding) {
+                    min = childBindingSet.getValue(aggregatedName);
+                } else {
+                    final Value oldMin = result.getValue(resultName);
+                    final Value chidlMin = childBindingSet.getValue(aggregatedName);
+                    min = compare.compare(chidlMin, oldMin) < 0 ? chidlMin : oldMin;
+                }
+
+                result.addBinding(resultName, min);
+            }
+        }
+    }
+
+    /**
+     * Reads/Writes instances of {@link AggregationState} to/from bytes.
+     */
+    public static interface AggregationStateSerDe {
+
+        /**
+         * @param state - The state that will be serialized. (not null)
+         * @return The state serialized to a byte[].
+         */
+        public byte[] serialize(AggregationState state);
+
+        /**
+         * @param bytes - The bytes that will be deserialized. (not null)
+         * @return The {@link AggregationState} that was read from the bytes.
+         */
+        public AggregationState deserialize(byte[] bytes);
+    }
+
+    /**
+     * An implementation of {@link AggregationStateSerDe} that uses Java Serialization.
+     */
+    public static final class ObjectSerializationAggregationStateSerDe implements AggregationStateSerDe {
+
+        @Override
+        public byte[] serialize(final AggregationState state) {
+            requireNonNull(state);
+
+            final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+            try(final ObjectOutputStream oos = new ObjectOutputStream(baos)) {
+                oos.writeObject(state);
+            } catch (final IOException e) {
+                throw new RuntimeException("A problem was encountered while serializing an AggregationState object.", e);
+            }
+
+            return baos.toByteArray();
+        }
+
+        @Override
+        public AggregationState deserialize(final byte[] bytes) {
+            requireNonNull(bytes);
+
+            final AggregationState state;
+
+            final ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
+            try(ObjectInputStream ois = new ObjectInputStream(bais)) {
+                final Object o = ois.readObject();
+                if(o instanceof AggregationState) {
+                    state = (AggregationState)o;
+                } else {
+                    throw new RuntimeException("A problem was encountered while deserializing an AggregationState object. Wrong class.");
+                }
+            } catch (final IOException | ClassNotFoundException e) {
+                throw new RuntimeException("A problem was encountered while deserializing an AggregationState object.", e);
+            }
+
+            return state;
+        }
+    }
+
+    /**
+     * Keeps track information required to update and build the resulting Binding Set for a set of Group By values.
+     */
+    public static final class AggregationState implements Serializable {
+        private static final long serialVersionUID = 1L;
+
+        // The visibility equation that encompasses all data the aggregation state is derived from.
+        private String visibility;
+
+        // A binding set that holds the current state of the aggregations.
+        private final MapBindingSet bindingSet;
+
+        // A map from result binding name to the state that derived that binding's value.
+        private final Map<String, AverageState> avgStates;
+
+        /**
+         * Constructs an instance of {@link AggregationState}.
+         */
+        public AggregationState() {
+            this.visibility = "";
+            this.bindingSet = new MapBindingSet();
+            this.avgStates = new HashMap<>();
+        }
+
+        /**
+         * Constructs an instance of {@link AggregationState}.
+         *
+         * @param visibility - The visibility equation associated with the resulting binding set. (not null)
+         * @param bindingSet - The Binding Set whose values are being updated. It holds the result for a set of
+         *   Group By values. (not null)
+         * @param avgStates - If the aggregation is doing an Average, this is a map from result binding name to
+         *   average state for that binding.
+         */
+        public AggregationState(
+                final String visibility,
+                final MapBindingSet bindingSet,
+                final Map<String, AverageState> avgStates) {
+            this.visibility = requireNonNull(visibility);
+            this.bindingSet = requireNonNull(bindingSet);
+            this.avgStates = requireNonNull(avgStates);
+        }
+
+        /**
+         * @return The visibility equation associated with the resulting binding set.
+         */
+        public String getVisibility() {
+            return visibility;
+        }
+
+        /**
+         * @param visibility - The visibility equation associated with the resulting binding set.
+         */
+        public void setVisibility(final String visibility) {
+            this.visibility = requireNonNull(visibility);
+        }
+
+        /**
+         * @return The Binding Set whose values are being updated. It holds the result for a set of Group By values.
+         */
+        public MapBindingSet getBindingSet() {
+            return bindingSet;
+        }
+
+        /**
+         * @return If the aggregation is doing an Average, this is a map from result binding name to
+         *   average state for that binding.
+         */
+        public Map<String, AverageState> getAverageStates() {
+            return avgStates;
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(visibility, bindingSet, avgStates);
+        }
+
+        @Override
+        public boolean equals(final Object o) {
+            if(o instanceof AggregationState) {
+                final AggregationState state = (AggregationState) o;
+                return Objects.equals(visibility, state.visibility) &&
+                        Objects.equals(bindingSet, state.bindingSet) &&
+                        Objects.equals(avgStates, state.avgStates);
+            }
+            return false;
+        }
+    }
+
+    /**
+     * The Sum and Count of the values that are being averaged. The average itself is derived from these values.
+     */
+    public static class AverageState implements Serializable {
+        private static final long serialVersionUID = 1L;
+
+        private final BigDecimal sum;
+        private final BigInteger count;
+
+        /**
+         * Constructs an instance of {@link AverageState} where the count and sum start at 0.
+         */
+        public AverageState() {
+            sum = BigDecimal.ZERO;
+            count = BigInteger.ZERO;
+        }
+
+        /**
+         * Constructs an instance of {@link AverageState}.
+         *
+         * @param sum - The sum of the values that are averaged. (not null)
+         * @param count - The number of values that are averaged. (not null)
+         */
+        public AverageState(final BigDecimal sum, final BigInteger count) {
+            this.sum = requireNonNull(sum);
+            this.count = requireNonNull(count);
+        }
+
+        /**
+         * @return The sum of the values that are averaged.
+         */
+        public BigDecimal getSum() {
+            return sum;
+        }
+
+        /**
+         * @return The number of values that are averaged.
+         */
+        public BigInteger getCount() {
+            return count;
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(sum, count);
+        }
+
+        @Override
+        public boolean equals(final Object o) {
+            if(o instanceof AverageState) {
+                final AverageState state = (AverageState) o;
+                return Objects.equals(sum, state.sum) &&
+                        Objects.equals(count, state.count);
+            }
+            return false;
+        }
+
+        @Override
+        public String toString() {
+            return "Sum: " + sum + " Count: " + count;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/BindingSetRow.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/BindingSetRow.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/BindingSetRow.java
index 8c8505d..2e45ea6 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/BindingSetRow.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/BindingSetRow.java
@@ -21,12 +21,12 @@ package org.apache.rya.indexing.pcj.fluo.app;
 import static com.google.common.base.Preconditions.checkNotNull;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.NODEID_BS_DELIM;
 
+import org.apache.fluo.api.data.Bytes;
+
 import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
 import edu.umd.cs.findbugs.annotations.NonNull;
 import net.jcip.annotations.Immutable;
 
-import org.apache.fluo.api.data.Bytes;
-
 /**
  * The values of an Accumulo Row ID for a row that stores a Binding set for
  * a specific Node ID of a query.
@@ -73,13 +73,8 @@ public class BindingSetRow {
 
         // Read the Node ID from the row's bytes.
         final String[] rowArray = row.toString().split(NODEID_BS_DELIM);
-        if(rowArray.length != 2) {
-            throw new IllegalArgumentException("A row must contain a single NODEID_BS_DELIM.");
-        }
-
         final String nodeId = rowArray[0];
-        String bindingSetString = rowArray[1];
-
+        final String bindingSetString = rowArray.length == 2 ? rowArray[1] : "";
         return new BindingSetRow(nodeId, bindingSetString);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java
index 3b17a33..42ec686 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java
@@ -19,24 +19,22 @@
 package org.apache.rya.indexing.pcj.fluo.app;
 
 import static com.google.common.base.Preconditions.checkNotNull;
-import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.NODEID_BS_DELIM;
-
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
 
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.log4j.Logger;
 import org.apache.rya.indexing.pcj.fluo.app.query.FilterMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
-import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetStringConverter;
+import org.apache.rya.indexing.pcj.fluo.app.util.BindingSetUtil;
+import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
 import org.openrdf.model.Resource;
 import org.openrdf.model.Statement;
 import org.openrdf.model.URI;
 import org.openrdf.model.Value;
 import org.openrdf.model.ValueFactory;
 import org.openrdf.model.impl.ValueFactoryImpl;
-import org.openrdf.query.Binding;
 import org.openrdf.query.BindingSet;
 import org.openrdf.query.QueryEvaluationException;
 import org.openrdf.query.algebra.Filter;
@@ -45,14 +43,12 @@ import org.openrdf.query.algebra.evaluation.TripleSource;
 import org.openrdf.query.algebra.evaluation.ValueExprEvaluationException;
 import org.openrdf.query.algebra.evaluation.impl.EvaluationStrategyImpl;
 import org.openrdf.query.algebra.evaluation.util.QueryEvaluationUtil;
-import org.openrdf.query.impl.MapBindingSet;
 
 import com.google.common.base.Optional;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 import info.aduna.iteration.CloseableIteration;
-import org.apache.fluo.api.client.TransactionBase;
-import org.apache.fluo.api.data.Bytes;
-import org.apache.fluo.api.data.Column;
 
 /**
  * Updates the results of a Filter node when its child has added a new Binding
@@ -61,8 +57,9 @@ import org.apache.fluo.api.data.Column;
 @DefaultAnnotation(NonNull.class)
 public class FilterResultUpdater {
 
-    private static final BindingSetStringConverter ID_CONVERTER = new BindingSetStringConverter();
-    private static final VisibilityBindingSetStringConverter VALUE_CONVERTER = new VisibilityBindingSetStringConverter();
+    private static final Logger log = Logger.getLogger(FilterResultUpdater.class);
+
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
 
     /**
      * A utility class used to search SPARQL queries for Filters.
@@ -96,7 +93,7 @@ public class FilterResultUpdater {
      * new Binding Set to its results.
      *
      * @param tx - The transaction all Fluo queries will use. (not null)
-     * @param childBindingSet - A binding set that the query's child node has emmitted. (not null)
+     * @param childBindingSet - A binding set that the query's child node has emitted. (not null)
      * @param filterMetadata - The metadata of the Filter whose results will be updated. (not null)
      * @throws Exception Something caused the update to fail.
      */
@@ -108,6 +105,11 @@ public class FilterResultUpdater {
         checkNotNull(childBindingSet);
         checkNotNull(filterMetadata);
 
+        log.trace(
+                "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                "Filter Node ID: " + filterMetadata.getNodeId() + "\n" +
+                "Binding Set:\n" + childBindingSet + "\n");
+
         // Parse the original query and find the Filter that represents filterId.
         final String sparql = filterMetadata.getOriginalSparql();
         final int indexWithinQuery = filterMetadata.getFilterIndexWithinSparql();
@@ -118,23 +120,22 @@ public class FilterResultUpdater {
         if (isTrue(condition, childBindingSet)) {
             // Create the Filter's binding set from the child's.
             final VariableOrder filterVarOrder = filterMetadata.getVariableOrder();
+            final BindingSet filterBindingSet = BindingSetUtil.keepBindings(filterVarOrder, childBindingSet);
 
-            final MapBindingSet filterBindingSet = new MapBindingSet();
-            for(final String bindingName : filterVarOrder) {
-                if(childBindingSet.hasBinding(bindingName)) {
-                    final Binding binding = childBindingSet.getBinding(bindingName);
-                    filterBindingSet.addBinding(binding);
-                }
-            }
+            // Create the Row Key for the emitted binding set. It does not contain visibilities.
+            final Bytes resultRow = RowKeyUtil.makeRowKey(filterMetadata.getNodeId(), filterVarOrder, filterBindingSet);
+
+            // If this is a new binding set, then emit it.
+            if(tx.get(resultRow, FluoQueryColumns.FILTER_BINDING_SET) == null) {
+                final VisibilityBindingSet visBindingSet = new VisibilityBindingSet(filterBindingSet, childBindingSet.getVisibility());
+                final Bytes nodeValueBytes = BS_SERDE.serialize(visBindingSet);
 
-            final String filterBindingSetIdString = ID_CONVERTER.convert(filterBindingSet, filterVarOrder);
-            String filterBindingSetValueString = "";
-            filterBindingSetValueString = VALUE_CONVERTER.convert(childBindingSet, filterVarOrder);
+                log.trace(
+                        "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                        "New Binding Set: " + visBindingSet + "\n");
 
-            final String row = filterMetadata.getNodeId() + NODEID_BS_DELIM + filterBindingSetIdString;
-            final Column col = FluoQueryColumns.FILTER_BINDING_SET;
-            final String value = filterBindingSetValueString;
-            tx.set(row, col, value);
+                tx.set(resultRow, FluoQueryColumns.FILTER_BINDING_SET, nodeValueBytes);
+            }
         }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncUpdateDAO.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncUpdateDAO.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncUpdateDAO.java
index b78562c..602fd9d 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncUpdateDAO.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncUpdateDAO.java
@@ -25,6 +25,7 @@ import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.UR
 
 import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.Snapshot;
+import org.apache.fluo.api.client.SnapshotBase;
 import org.apache.fluo.api.client.Transaction;
 import org.apache.fluo.api.client.scanner.CellScanner;
 import org.apache.fluo.api.data.Bytes;
@@ -53,7 +54,7 @@ public class IncUpdateDAO {
         return rs;
     }
 
-    private static String getTripleString(final RyaStatement rs) {
+    public static String getTripleString(final RyaStatement rs) {
         final String subj = rs.getSubject().getData() + TYPE_DELIM + URI_TYPE;
         final String pred = rs.getPredicate().getData() + TYPE_DELIM + URI_TYPE;
         final String objData = rs.getObject().getData();
@@ -102,81 +103,41 @@ public class IncUpdateDAO {
      */
     public static void printTriples(final FluoClient fluoClient) throws Exception {
         try (Snapshot snapshot = fluoClient.newSnapshot()) {
-        	CellScanner cscanner = snapshot.scanner().fetch(new Column("triples", "SPO")).build();
-        	for (RowColumnValue rcv : cscanner) {
+        	final CellScanner cscanner = snapshot.scanner().fetch(new Column("triples", "SPO")).build();
+        	for (final RowColumnValue rcv : cscanner) {
         		System.out.println("Triple: "+rcv.getsRow());
 			}
         }
     }
 
-//    /**
-//     * Print all bindings for the given queries. For demo's and diagnostics.
-//     * @param fluoClient
-//     * @param queryNames
-//     * @throws Exception
-//     */
-//    public static void printQueryResults(final FluoClient fluoClient,
-//            final Map<String, String> queryNames) throws Exception {
-//        try (Snapshot snapshot = fluoClient.newSnapshot();
-//                TypedTransaction tx1 = stl.wrap(fluoClient.newTransaction())) {
-//
-//            final ScannerConfiguration scanConfig = new ScannerConfiguration();
-//            scanConfig.fetchColumn(Bytes.of("query"), Bytes.of("bindingSet"));
-//
-//            final RowIterator rowIter = snapshot.get(scanConfig);
-//            String sparqlRow = "";
-//            System.out.println("*********************************************************");
-//
-//            while (rowIter.hasNext()) {
-//                final Entry<Bytes, ColumnIterator> row = rowIter.next();
-//                final String[] joinInfo = row.getKey().toString()
-//                        .split(NODEID_BS_DELIM);
-//                final String sparql = joinInfo[0];
-//                final String bs = joinInfo[1];
-//                if (!sparqlRow.equals(sparql)) {
-//                    sparqlRow = sparql;
-//                    System.out.println();
-//                    System.out.println();
-//                    System.out.println(queryNames.get(sparqlRow)
-//                            + " has bindings: ");
-//                    System.out.println();
-//                }
-//
-//                final String variables = tx1.get().row(sparqlRow).col(NODE_VARS).toString();
-//                final String[] vars = variables.split(";");
-//                final String[] bsVals = bs.split(DELIM);
-//                System.out.print("Bindingset:  ");
-//                for (int i = 0; i < vars.length; i++) {
-//                    System.out.print(vars[i] + " = " + bsVals[i] + "   ");
-//                }
-//                System.out.println();
-//
-//            }
-//
-//            System.out.println("*********************************************************");
-//        }
-//    }
+    /**
+     * Print all rows in the Fluo table for diagnostics.
+     * </p>
+     * Consider using {@code FluoITHelper.printFluoTable(FluoClient client)} instead.
+     */
+    @Deprecated
+    public static void printAll(final SnapshotBase sx) {
+        final String FORMAT = "%-30s | %-10s | %-10s | %-40s\n";
+        System.out.println("Printing all tables.  Showing unprintable bytes and braces as {ff} and {{} and {}} where ff is the value in hexadecimal.");
+        System.out.format(FORMAT, "--Row--", "--Column Family--", "--Column Qual--", "--Value--");
+        final CellScanner cscanner = sx.scanner().build();
+        for (final RowColumnValue rcv : cscanner) {
+            System.out.format(FORMAT, to_String(rcv.getRow()),
+                    to_String(rcv.getColumn().getFamily()),
+                    to_String(rcv.getColumn().getQualifier()),
+                    to_String(rcv.getValue()));
+        }
+    }
 
     /**
      * Print all rows in the Fluo table for diagnostics.
-     * @param fluoClient
-     * @throws Exception
+     * </p>
+     * Consider using {@code FluoITHelper.printFluoTable(FluoClient client)} instead.
      */
+    @Deprecated
     public static void printAll(final FluoClient fluoClient) throws Exception {
-        final String FORMAT = "%-30s | %-10s | %-10s | %-40s\n";
-        System.out
-        .println("Printing all tables.  Showing unprintable bytes and braces as {ff} and {{} and {}} where ff is the value in hexadecimal.");
-        System.out.format(FORMAT, "--Row--", "--Column Family--",
-                "--Column Qual--", "--Value--");
-        // Use try with resource to ensure snapshot is closed.
-        try (Snapshot snapshot = fluoClient.newSnapshot()) {
-        	CellScanner cscanner = snapshot.scanner().build();
-        	for (RowColumnValue rcv : cscanner) {
-        		System.out.format(FORMAT, to_String(rcv.getRow()),
-                        to_String(rcv.getColumn().getFamily()),
-                        to_String(rcv.getColumn().getQualifier()),
-                        to_String(rcv.getValue()));
-			}
+        try(Snapshot sx = fluoClient.newSnapshot()) {
+            printAll(sx);
         }
     }
 
@@ -208,4 +169,4 @@ public class IncUpdateDAO {
         }
         return sb.toString();
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java
index 84581ef..be4df71 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java
@@ -31,6 +31,7 @@ public class IncrementalUpdateConstants {
     public static final String SP_PREFIX = "STATEMENT_PATTERN";
     public static final String JOIN_PREFIX = "JOIN";
     public static final String FILTER_PREFIX = "FILTER";
+    public static final String AGGREGATION_PREFIX = "AGGREGATION";
     public static final String QUERY_PREFIX = "QUERY";
 
     public static final String URI_TYPE = "http://www.w3.org/2001/XMLSchema#anyURI";

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java
index 39dcc16..2cb5a54 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java
@@ -27,14 +27,20 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.client.scanner.ColumnScanner;
+import org.apache.fluo.api.client.scanner.RowScanner;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.ColumnValue;
+import org.apache.fluo.api.data.Span;
+import org.apache.log4j.Logger;
+import org.apache.rya.accumulo.utils.VisibilitySimplifier;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
 import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
-import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetStringConverter;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
@@ -42,17 +48,12 @@ import org.openrdf.query.Binding;
 import org.openrdf.query.BindingSet;
 import org.openrdf.query.impl.MapBindingSet;
 
-import com.google.common.base.Joiner;
 import com.google.common.base.Optional;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
-import org.apache.fluo.api.client.TransactionBase;
-import org.apache.fluo.api.client.scanner.ColumnScanner;
-import org.apache.fluo.api.client.scanner.RowScanner;
-import org.apache.fluo.api.data.Column;
-import org.apache.fluo.api.data.ColumnValue;
-import org.apache.fluo.api.data.Span;
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 
 /**
  * Updates the results of a Join node when one of its children has added a
@@ -61,49 +62,57 @@ import org.apache.fluo.api.data.Span;
 @DefaultAnnotation(NonNull.class)
 public class JoinResultUpdater {
 
-    private static final BindingSetStringConverter idConverter = new BindingSetStringConverter();
-    private static final VisibilityBindingSetStringConverter valueConverter = new VisibilityBindingSetStringConverter();
+    private static final Logger log = Logger.getLogger(JoinResultUpdater.class);
+
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
+    private static final VisibilityBindingSetStringConverter VIS_BS_CONVERTER = new VisibilityBindingSetStringConverter();
 
     private final FluoQueryMetadataDAO queryDao = new FluoQueryMetadataDAO();
-   
+
     /**
      * Updates the results of a Join node when one of its children has added a
      * new Binding Set to its results.
      *
      * @param tx - The transaction all Fluo queries will use. (not null)
-     * @param childId - The Node ID of the child whose results received a new Binding Set. (not null)
+     * @param childNodeId - The Node ID of the child whose results received a new Binding Set. (not null)
      * @param childBindingSet - The Binding Set that was just emitted by child node. (not null)
      * @param joinMetadata - The metadata for the Join that has been notified. (not null)
-     * @throws BindingSetConversionException
+     * @throws Exception The update could not be successfully performed.
      */
     public void updateJoinResults(
             final TransactionBase tx,
-            final String childId,
+            final String childNodeId,
             final VisibilityBindingSet childBindingSet,
-            final JoinMetadata joinMetadata) throws BindingSetConversionException {
+            final JoinMetadata joinMetadata) throws Exception {
         checkNotNull(tx);
-        checkNotNull(childId);
+        checkNotNull(childNodeId);
         checkNotNull(childBindingSet);
         checkNotNull(joinMetadata);
 
+        log.trace(
+                "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                "Join Node ID: " + joinMetadata.getNodeId() + "\n" +
+                "Child Node ID: " + childNodeId + "\n" +
+                "Child Binding Set:\n" + childBindingSet + "\n");
+
         // Figure out which join algorithm we are going to use.
         final IterativeJoin joinAlgorithm;
         switch(joinMetadata.getJoinType()) {
-            case NATURAL_JOIN:
-                joinAlgorithm = new NaturalJoin();
-                break;
-            case LEFT_OUTER_JOIN:
-                joinAlgorithm = new LeftOuterJoin();
-                break;
-            default:
-                throw new RuntimeException("Unsupported JoinType: " + joinMetadata.getJoinType());
+        case NATURAL_JOIN:
+            joinAlgorithm = new NaturalJoin();
+            break;
+        case LEFT_OUTER_JOIN:
+            joinAlgorithm = new LeftOuterJoin();
+            break;
+        default:
+            throw new RuntimeException("Unsupported JoinType: " + joinMetadata.getJoinType());
         }
 
         // Figure out which side of the join the new binding set appeared on.
         final Side emittingSide;
         final String siblingId;
 
-        if(childId.equals(joinMetadata.getLeftChildNodeId())) {
+        if(childNodeId.equals(joinMetadata.getLeftChildNodeId())) {
             emittingSide = Side.LEFT;
             siblingId = joinMetadata.getRightChildNodeId();
         } else {
@@ -112,7 +121,7 @@ public class JoinResultUpdater {
         }
 
         // Iterates over the sibling node's BindingSets that join with the new binding set.
-        final FluoTableIterator siblingBindingSets = makeSiblingScanIterator(childId, childBindingSet, siblingId, tx);
+        final FluoTableIterator siblingBindingSets = makeSiblingScanIterator(childNodeId, childBindingSet, siblingId, tx);
 
         // Iterates over the resulting BindingSets from the join.
         final Iterator<VisibilityBindingSet> newJoinResults;
@@ -125,14 +134,22 @@ public class JoinResultUpdater {
         // Insert the new join binding sets to the Fluo table.
         final VariableOrder joinVarOrder = joinMetadata.getVariableOrder();
         while(newJoinResults.hasNext()) {
-            final BindingSet newJoinResult = newJoinResults.next();
-            final String joinBindingSetStringId = idConverter.convert(newJoinResult, joinVarOrder);
-            final String joinBindingSetStringValue = valueConverter.convert(newJoinResult, joinVarOrder);
-
-            final String row = joinMetadata.getNodeId() + NODEID_BS_DELIM + joinBindingSetStringId;
-            final Column col = FluoQueryColumns.JOIN_BINDING_SET;
-            final String value = joinBindingSetStringValue;
-            tx.set(row, col, value);
+            final VisibilityBindingSet newJoinResult = newJoinResults.next();
+
+            // Create the Row Key for the emitted binding set. It does not contain visibilities.
+            final Bytes resultRow = RowKeyUtil.makeRowKey(joinMetadata.getNodeId(), joinVarOrder, newJoinResult);
+
+            // Only insert the join Binding Set if it is new.
+            if(tx.get(resultRow, FluoQueryColumns.JOIN_BINDING_SET) == null) {
+                // Create the Node Value. It does contain visibilities.
+                final Bytes nodeValueBytes = BS_SERDE.serialize(newJoinResult);
+
+                log.trace(
+                        "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                        "New Join Result:\n" + newJoinResult + "\n");
+
+                tx.set(resultRow, FluoQueryColumns.JOIN_BINDING_SET, nodeValueBytes);
+            }
         }
     }
 
@@ -150,8 +167,8 @@ public class JoinResultUpdater {
         final List<String> commonVars = getCommonVars(childVarOrder, siblingVarOrder);
 
         // Get the Binding strings
-        final String childBindingSetString = valueConverter.convert(childBindingSet, childVarOrder);
-        String[] childBindingArray = childBindingSetString.split("\u0001");
+        final String childBindingSetString = VIS_BS_CONVERTER.convert(childBindingSet, childVarOrder);
+        final String[] childBindingArray = childBindingSetString.split("\u0001");
         final String[] childBindingStrings = FluoStringConverter.toBindingStrings(childBindingArray[0]);
 
         // Create the prefix that will be used to scan for binding sets of the sibling node.
@@ -174,7 +191,7 @@ public class JoinResultUpdater {
         // earlier iterations of this algorithm.
 
         final RowScanner rs = tx.scanner().over(Span.prefix(siblingScanPrefix)).fetch(getScanColumnFamily(siblingId)).byRow().build();
-        return new FluoTableIterator(rs, siblingVarOrder);
+        return new FluoTableIterator(rs);
     }
 
 
@@ -270,7 +287,7 @@ public class JoinResultUpdater {
             default:
                 throw new IllegalArgumentException("The child node's sibling is not of type StatementPattern, Join, Left Join, or Filter.");
         }
-        
+
         return column;
     }
 
@@ -426,14 +443,8 @@ public class JoinResultUpdater {
                 leftVisi = joinResult.getVisibility();
                 rightVisi = newResult.getVisibility();
             }
+            final String visibility = VisibilitySimplifier.unionAndSimplify(leftVisi, rightVisi);
 
-            String visibility = "";
-            final Joiner join = Joiner.on(")&(");
-            if(leftVisi.isEmpty() || rightVisi.isEmpty()) {
-                visibility = (leftVisi + rightVisi).trim();
-            } else {
-                visibility = "(" + join.join(leftVisi, rightVisi) + ")";
-            }
             return new VisibilityBindingSet(bs, visibility);
         }
 
@@ -449,24 +460,22 @@ public class JoinResultUpdater {
      */
     private static final class FluoTableIterator implements Iterator<VisibilityBindingSet> {
 
+        private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
+
         private static final Set<Column> BINDING_SET_COLUMNS = Sets.newHashSet(
                 FluoQueryColumns.STATEMENT_PATTERN_BINDING_SET,
                 FluoQueryColumns.JOIN_BINDING_SET,
                 FluoQueryColumns.FILTER_BINDING_SET);
 
         private final Iterator<ColumnScanner> rows;
-        private final VariableOrder varOrder;
 
         /**
          * Constructs an instance of {@link FluoTableIterator}.
          *
          * @param rows - Iterates over RowId values in a Fluo Table. (not null)
-         * @param varOrder - The Variable Order of binding sets that will be
-         *   read from the Fluo Table. (not null)
          */
-        public FluoTableIterator(final RowScanner rows, final VariableOrder varOrder) {
+        public FluoTableIterator(final RowScanner rows) {
             this.rows = checkNotNull(rows).iterator();
-            this.varOrder = checkNotNull(varOrder);
         }
 
         @Override
@@ -478,12 +487,16 @@ public class JoinResultUpdater {
         public VisibilityBindingSet next() {
             final ColumnScanner columns = rows.next();
 
-            for (ColumnValue cv : columns) {
-            	 if(BINDING_SET_COLUMNS.contains(cv.getColumn())) {
-                     final String bindingSetString = cv.getsValue();
-                     return (VisibilityBindingSet) valueConverter.convert(bindingSetString, varOrder);
-                 }
-			}
+            for (final ColumnValue cv : columns) {
+                if(BINDING_SET_COLUMNS.contains(cv.getColumn())) {
+                    final Bytes value = cv.getValue();
+                    try {
+                        return BS_SERDE.deserialize(value);
+                    } catch (final Exception e) {
+                        throw new RuntimeException("Row did not containing a Binding Set.", e);
+                    }
+                }
+            }
 
             throw new RuntimeException("Row did not containing a Binding Set.");
         }

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java
index 0a5ecc1..5365e30 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java
@@ -19,6 +19,7 @@
 package org.apache.rya.indexing.pcj.fluo.app;
 
 import static java.util.Objects.requireNonNull;
+import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.AGGREGATION_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.FILTER_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.JOIN_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.QUERY_PREFIX;
@@ -26,14 +27,13 @@ import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.SP
 
 import java.util.List;
 
+import org.apache.fluo.api.data.Column;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns.QueryNodeMetadataColumns;
 import org.openrdf.query.BindingSet;
 
 import com.google.common.base.Optional;
 
-import org.apache.fluo.api.data.Column;
-
 /**
  * Represents the different types of nodes that a Query may have.
  */
@@ -41,7 +41,8 @@ public enum NodeType {
     FILTER (QueryNodeMetadataColumns.FILTER_COLUMNS, FluoQueryColumns.FILTER_BINDING_SET),
     JOIN(QueryNodeMetadataColumns.JOIN_COLUMNS, FluoQueryColumns.JOIN_BINDING_SET),
     STATEMENT_PATTERN(QueryNodeMetadataColumns.STATEMENTPATTERN_COLUMNS, FluoQueryColumns.STATEMENT_PATTERN_BINDING_SET),
-    QUERY(QueryNodeMetadataColumns.QUERY_COLUMNS, FluoQueryColumns.QUERY_BINDING_SET);
+    QUERY(QueryNodeMetadataColumns.QUERY_COLUMNS, FluoQueryColumns.QUERY_BINDING_SET),
+    AGGREGATION(QueryNodeMetadataColumns.AGGREGATION_COLUMNS, FluoQueryColumns.AGGREGATION_BINDING_SET);
 
     //Metadata Columns associated with given NodeType
     private QueryNodeMetadataColumns metadataColumns;
@@ -55,7 +56,7 @@ public enum NodeType {
      * @param metadataColumns - Metadata {@link Column}s associated with this {@link NodeType}. (not null)
      * @param bindingSetColumn - The {@link Column} used to store this {@link NodeType|'s {@link BindingSet}s. (not null)
      */
-    private NodeType(QueryNodeMetadataColumns metadataColumns, Column bindingSetColumn) {
+    private NodeType(final QueryNodeMetadataColumns metadataColumns, final Column bindingSetColumn) {
     	this.metadataColumns = requireNonNull(metadataColumns);
     	this.bindingSetColumn = requireNonNull(bindingSetColumn);
     }
@@ -95,6 +96,8 @@ public enum NodeType {
             type = JOIN;
         } else if(nodeId.startsWith(QUERY_PREFIX)) {
             type = QUERY;
+        } else if(nodeId.startsWith(AGGREGATION_PREFIX)) {
+            type = AGGREGATION;
         }
 
         return Optional.fromNullable(type);

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java
index 9cd2bd7..ba82726 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java
@@ -19,22 +19,22 @@
 package org.apache.rya.indexing.pcj.fluo.app;
 
 import static com.google.common.base.Preconditions.checkNotNull;
-import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.NODEID_BS_DELIM;
-
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
 
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.QueryMetadata;
-import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetStringConverter;
+import org.apache.rya.indexing.pcj.fluo.app.util.BindingSetUtil;
+import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
-import org.openrdf.query.Binding;
-import org.openrdf.query.impl.MapBindingSet;
+import org.openrdf.query.BindingSet;
 
-import org.apache.fluo.api.client.TransactionBase;
-import org.apache.fluo.api.data.Column;
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 
 /**
  * Updates the results of a Query node when one of its children has added a
@@ -42,9 +42,10 @@ import org.apache.fluo.api.data.Column;
  */
 @DefaultAnnotation(NonNull.class)
 public class QueryResultUpdater {
-    
-    private final BindingSetStringConverter converter = new BindingSetStringConverter();
-    private final VisibilityBindingSetStringConverter valueConverter = new VisibilityBindingSetStringConverter();
+    private static final Logger log = Logger.getLogger(QueryResultUpdater.class);
+
+    private static final FluoQueryMetadataDAO METADATA_DA0 = new FluoQueryMetadataDAO();
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
 
     /**
      * Updates the results of a Query node when one of its children has added a
@@ -53,32 +54,46 @@ public class QueryResultUpdater {
      * @param tx - The transaction all Fluo queries will use. (not null)
      * @param childBindingSet - A binding set that the query's child node has emmitted. (not null)
      * @param queryMetadata - The metadata of the Query whose results will be updated. (not null)
+     * @throws Exception A problem caused the update to fail.
      */
     public void updateQueryResults(
             final TransactionBase tx,
             final VisibilityBindingSet childBindingSet,
-            final QueryMetadata queryMetadata) {
+            final QueryMetadata queryMetadata) throws Exception {
         checkNotNull(tx);
         checkNotNull(childBindingSet);
         checkNotNull(queryMetadata);
 
+        log.trace(
+                "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                "Join Node ID: " + queryMetadata.getNodeId() + "\n" +
+                "Child Node ID: " + queryMetadata.getChildNodeId() + "\n" +
+                "Child Binding Set:\n" + childBindingSet + "\n");
+
         // Create the query's Binding Set from the child node's binding set.
         final VariableOrder queryVarOrder = queryMetadata.getVariableOrder();
+        final BindingSet queryBindingSet = BindingSetUtil.keepBindings(queryVarOrder, childBindingSet);
 
-        final MapBindingSet queryBindingSet = new MapBindingSet();
-        for(final String bindingName : queryVarOrder) {
-            if(childBindingSet.hasBinding(bindingName)) {
-                final Binding binding = childBindingSet.getBinding(bindingName);
-                queryBindingSet.addBinding(binding);
-            }
+        // Create the Row Key for the result. If the child node groups results, then the key must only contain the Group By variables.
+        final Bytes resultRow;
+
+        final String childNodeId = queryMetadata.getChildNodeId();
+        final boolean isGrouped = childNodeId.startsWith( IncrementalUpdateConstants.AGGREGATION_PREFIX );
+        if(isGrouped) {
+            final AggregationMetadata aggMetadata = METADATA_DA0.readAggregationMetadata(tx, childNodeId);
+            final VariableOrder groupByVars = aggMetadata.getGroupByVariableOrder();
+            resultRow = RowKeyUtil.makeRowKey(queryMetadata.getNodeId(), groupByVars, queryBindingSet);
+        } else {
+            resultRow = RowKeyUtil.makeRowKey(queryMetadata.getNodeId(), queryVarOrder, queryBindingSet);
         }
-        final String queryBindingSetString = converter.convert(queryBindingSet, queryVarOrder);
-        final String queryBindingSetValueString = valueConverter.convert(new VisibilityBindingSet(queryBindingSet, childBindingSet.getVisibility()), queryVarOrder);
 
-        // Commit it to the Fluo table for the SPARQL query. This isn't guaranteed to be a new entry.
-        final String row = queryMetadata.getNodeId() + NODEID_BS_DELIM + queryBindingSetString;
-        final Column col = FluoQueryColumns.QUERY_BINDING_SET;
-        final String value = queryBindingSetValueString;
-        tx.set(row, col, value);
+        // Create the Binding Set that goes in the Node Value. It does contain visibilities.
+        final Bytes nodeValueBytes = BS_SERDE.serialize(childBindingSet);
+
+        log.trace(
+                "Transaction ID: " + tx.getStartTimestamp() + "\n" +
+                "New Binding Set: " + childBindingSet + "\n");
+
+        tx.set(resultRow, FluoQueryColumns.QUERY_BINDING_SET, nodeValueBytes);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDe.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDe.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDe.java
new file mode 100644
index 0000000..34439e4
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDe.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * Serializes and deserializes a {@link VisibilityBindingSet} to and from {@link Bytes} objects.
+ */
+@DefaultAnnotation(NonNull.class)
+public class VisibilityBindingSetSerDe {
+
+    /**
+     * Serializes a {@link VisibilityBindingSet} into a {@link Bytes} object.
+     *
+     * @param bindingSet - The binding set that will be serialized. (not null)
+     * @return The serialized object.
+     * @throws Exception A problem was encountered while serializing the object.
+     */
+    public Bytes serialize(final VisibilityBindingSet bindingSet) throws Exception {
+        requireNonNull(bindingSet);
+
+        final ByteArrayOutputStream boas = new ByteArrayOutputStream();
+        try(final ObjectOutputStream oos = new ObjectOutputStream(boas)) {
+            oos.writeObject(bindingSet);
+        }
+
+        return Bytes.of(boas.toByteArray());
+    }
+
+    /**
+     * Deserializes a {@link VisibilityBindingSet} from a {@link Bytes} object.
+     *
+     * @param bytes - The bytes that will be deserialized. (not null)
+     * @return The deserialized object.
+     * @throws Exception A problem was encountered while deserializing the object.
+     */
+    public VisibilityBindingSet deserialize(final Bytes bytes) throws Exception {
+        requireNonNull(bytes);
+
+        try(final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes.toArray()))) {
+            final Object o = ois.readObject();
+            if(o instanceof VisibilityBindingSet) {
+                return (VisibilityBindingSet) o;
+            } else {
+                throw new Exception("Deserialized Object is not a VisibilityBindingSet. Was: " + o.getClass());
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/IncrementalResultExporter.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/IncrementalResultExporter.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/IncrementalResultExporter.java
index a0edbda..02dced7 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/IncrementalResultExporter.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/IncrementalResultExporter.java
@@ -18,27 +18,25 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app.export;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
 import org.apache.fluo.api.client.TransactionBase;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 
 /**
  * Exports a single Binding Set that is a new result for a SPARQL query to some
  * other location.
  */
 @DefaultAnnotation(NonNull.class)
-public interface IncrementalResultExporter {
+public interface IncrementalResultExporter extends AutoCloseable {
 
     /**
-     * Export a Binding Set that is a result of a SPARQL query.
+     * Export a Binding Set that is a result of a SPARQL query that does not include a Group By clause.
      *
      * @param tx - The Fluo transaction this export is a part of. (not null)
      * @param queryId - The Fluo ID of the SPARQL query the binding set is a result of. (not null)
-     * @param bindingSetString - The binding set as it was represented within the
-     *   Fluo application. (not null)
+     * @param bindingSetString - The Binding Set as it was represented within the Fluo application. (not null)
      * @throws ResultExportException The result could not be exported.
      */
     public void export(TransactionBase tx, String queryId, VisibilityBindingSet result) throws ResultExportException;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaResultExporter.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaResultExporter.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaResultExporter.java
index c40c5da..72ec947 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaResultExporter.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaResultExporter.java
@@ -20,9 +20,13 @@ package org.apache.rya.indexing.pcj.fluo.app.export.kafka;
 
 import static com.google.common.base.Preconditions.checkNotNull;
 
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.fluo.api.client.TransactionBase;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.clients.producer.RecordMetadata;
 import org.apache.log4j.Logger;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalResultExporter;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
@@ -32,17 +36,18 @@ import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
  * Incrementally exports SPARQL query results to Kafka topics.
  */
 public class KafkaResultExporter implements IncrementalResultExporter {
-    private final KafkaProducer<String, VisibilityBindingSet> producer;
     private static final Logger log = Logger.getLogger(KafkaResultExporter.class);
 
+    private final KafkaProducer<String, VisibilityBindingSet> producer;
+
     /**
      * Constructs an instance given a Kafka producer.
-     * 
+     *
      * @param producer
      *            for sending result set alerts to a broker. (not null)
      *            Can be created and configured by {@link KafkaResultExporterFactory}
      */
-    public KafkaResultExporter(KafkaProducer<String, VisibilityBindingSet> producer) {
+    public KafkaResultExporter(final KafkaProducer<String, VisibilityBindingSet> producer) {
         super();
         checkNotNull(producer, "Producer is required.");
         this.producer = producer;
@@ -58,18 +63,25 @@ public class KafkaResultExporter implements IncrementalResultExporter {
         checkNotNull(result);
         try {
             final String pcjId = fluoTx.gets(queryId, FluoQueryColumns.RYA_PCJ_ID);
-            String msg = "out to kafta topic: queryId=" + queryId + " pcjId=" + pcjId + " result=" + result;
+            final String msg = "out to kafta topic: queryId=" + queryId + " pcjId=" + pcjId + " result=" + result;
             log.trace(msg);
 
-            // Send result on topic
-            ProducerRecord<String, VisibilityBindingSet> rec = new ProducerRecord<String, VisibilityBindingSet>(/* topicname= */ queryId, /* value= */ result);
-            // Can add a key if you need to:
-            // ProducerRecord(String topic, K key, V value)
-            producer.send(rec);
+            // Send the result to the topic whose name matches the PCJ ID.
+            final ProducerRecord<String, VisibilityBindingSet> rec = new ProducerRecord<>(pcjId, result);
+            final Future<RecordMetadata> future = producer.send(rec);
+
+            // Don't let the export return until the result has been written to the topic. Otherwise we may lose results.
+            future.get();
+
             log.debug("producer.send(rec) completed");
 
         } catch (final Throwable e) {
             throw new ResultExportException("A result could not be exported to Kafka.", e);
         }
     }
-}
+
+    @Override
+    public void close() throws Exception {
+        producer.close(5, TimeUnit.SECONDS);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaResultExporter.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaResultExporter.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaResultExporter.java
index a4b589f..b8b3c45 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaResultExporter.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaResultExporter.java
@@ -19,16 +19,16 @@
 package org.apache.rya.indexing.pcj.fluo.app.export.rya;
 
 import static com.google.common.base.Preconditions.checkNotNull;
+import static java.util.Objects.requireNonNull;
 
 import java.util.Collections;
 
+import org.apache.fluo.api.client.TransactionBase;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalResultExporter;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.apache.fluo.api.client.TransactionBase;
-import org.apache.fluo.api.data.Bytes;
 
 /**
  * Incrementally exports SPARQL query results to Accumulo PCJ tables as they are defined by Rya.
@@ -51,10 +51,11 @@ public class RyaResultExporter implements IncrementalResultExporter {
             final TransactionBase fluoTx,
             final String queryId,
             final VisibilityBindingSet result) throws ResultExportException {
-        checkNotNull(fluoTx);
-        checkNotNull(queryId);
-        checkNotNull(result);
+        requireNonNull(fluoTx);
+        requireNonNull(queryId);
+        requireNonNull(result);
 
+        // Look up the ID the PCJ represents within the PCJ Storage.
         final String pcjId = fluoTx.gets(queryId, FluoQueryColumns.RYA_PCJ_ID);
 
         try {
@@ -63,4 +64,9 @@ public class RyaResultExporter implements IncrementalResultExporter {
             throw new ResultExportException("A result could not be exported to Rya.", e);
         }
     }
+
+    @Override
+    public void close() throws Exception {
+        pcjStorage.close();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/AggregationObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/AggregationObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/AggregationObserver.java
new file mode 100644
index 0000000..1cb1594
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/AggregationObserver.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.observers;
+
+import static java.util.Objects.requireNonNull;
+
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.fluo.app.AggregationResultUpdater.AggregationState;
+import org.apache.rya.indexing.pcj.fluo.app.AggregationResultUpdater.AggregationStateSerDe;
+import org.apache.rya.indexing.pcj.fluo.app.AggregationResultUpdater.ObjectSerializationAggregationStateSerDe;
+import org.apache.rya.indexing.pcj.fluo.app.BindingSetRow;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.openrdf.query.BindingSet;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * Notified when the results of an Aggregation have been updated to include a new
+ * {@link BindingSet} value. This observer updates its parent if the new Binding Set
+ * effects the parent's results.
+ */
+@DefaultAnnotation(NonNull.class)
+public class AggregationObserver extends BindingSetUpdater {
+
+    private static final AggregationStateSerDe STATE_SERDE = new ObjectSerializationAggregationStateSerDe();
+
+    private final FluoQueryMetadataDAO queryDao = new FluoQueryMetadataDAO();
+
+    @Override
+    public ObservedColumn getObservedColumn() {
+        return new ObservedColumn(FluoQueryColumns.AGGREGATION_BINDING_SET, NotificationType.STRONG);
+    }
+
+    @Override
+    public Observation parseObservation(final TransactionBase tx, final Bytes row) {
+        requireNonNull(tx);
+        requireNonNull(row);
+
+        // Fetch the Aggregation node's metadata.
+        final String nodeId = BindingSetRow.make(row).getNodeId();
+        final AggregationMetadata metadata = queryDao.readAggregationMetadata(tx, nodeId);
+
+        // Read the Visibility Binding Set from the value.
+        final Bytes stateBytes = tx.get(row, FluoQueryColumns.AGGREGATION_BINDING_SET);
+        final AggregationState state = STATE_SERDE.deserialize( stateBytes.toArray() );
+        final VisibilityBindingSet aggBindingSet = new VisibilityBindingSet(state.getBindingSet(), state.getVisibility());
+
+        // Figure out which node needs to handle the new metadata.
+        final String parentNodeId = metadata.getParentNodeId();
+
+        return new Observation(nodeId, aggBindingSet, parentNodeId);
+    }
+}


[4/9] incubator-rya git commit: RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java
index 3bb54c4..3a42a23 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java
@@ -24,58 +24,34 @@ import java.math.BigInteger;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.rya.indexing.pcj.fluo.ITBase;
-import org.junit.Test;
-
-import com.google.common.base.Optional;
-
-import org.apache.fluo.api.client.FluoAdmin;
-import org.apache.fluo.api.client.FluoAdmin.AlreadyInitializedException;
-import org.apache.fluo.api.client.FluoAdmin.TableExistsException;
+import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.FluoFactory;
-import org.apache.fluo.api.config.FluoConfiguration;
 import org.apache.fluo.api.config.ObserverSpecification;
-import org.apache.fluo.api.mini.MiniFluo;
 import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.api.domain.RyaURI;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
+import org.junit.Test;
+
+import com.google.common.base.Optional;
 
 /**
  * Tests the methods of {@link CountStatements}.
  */
-public class CountStatementsIT extends ITBase {
+public class CountStatementsIT extends RyaExportITBase {
 
     /**
      * Overriden so that no Observers will be started. This ensures whatever
      * statements are inserted as part of the test will not be consumed.
-     *
-     * @return A Mini Fluo cluster.
      */
     @Override
-    protected MiniFluo startMiniFluo() throws AlreadyInitializedException, TableExistsException {
+    protected void preFluoInitHook() throws Exception {
         // Setup the observers that will be used by the Fluo PCJ Application.
         final List<ObserverSpecification> observers = new ArrayList<>();
 
-        // Configure how the mini fluo will run.
-        final FluoConfiguration config = new FluoConfiguration();
-        config.setMiniStartAccumulo(false);
-        config.setAccumuloInstance(instanceName);
-        config.setAccumuloUser(ACCUMULO_USER);
-        config.setAccumuloPassword(ACCUMULO_PASSWORD);
-        config.setInstanceZookeepers(zookeepers + "/fluo");
-        config.setAccumuloZookeepers(zookeepers);
-
-        config.setApplicationName(FLUO_APP_NAME);
-        config.setAccumuloTable("fluo" + FLUO_APP_NAME);
-
-        config.addObservers(observers);
-
-        FluoFactory.newAdmin(config).initialize(
-                new FluoAdmin.InitializationOptions().setClearTable(true).setClearZookeeper(true) );
-        final MiniFluo miniFluo = FluoFactory.newMiniFluo(config);
-        return miniFluo;
+        // Add the observers to the Fluo Configuration.
+        super.getFluoConfiguration().addObservers(observers);
     }
 
-
     @Test
     public void countStatements() {
         // Insert some Triples into the Fluo app.
@@ -86,12 +62,14 @@ public class CountStatementsIT extends ITBase {
         triples.add( RyaStatement.builder().setSubject(new RyaURI("http://David")).setPredicate(new RyaURI("http://talksTo")).setObject(new RyaURI("http://Bob")).build() );
         triples.add( RyaStatement.builder().setSubject(new RyaURI("http://Eve")).setPredicate(new RyaURI("http://talksTo")).setObject(new RyaURI("http://Bob")).build() );
 
-        new InsertTriples().insert(fluoClient, triples, Optional.<String>absent());
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            new InsertTriples().insert(fluoClient, triples, Optional.<String>absent());
 
-        // Load some statements into the Fluo app.
-        final BigInteger count = new CountStatements().countStatements(fluoClient);
+            // Load some statements into the Fluo app.
+            final BigInteger count = new CountStatements().countStatements(fluoClient);
 
-        // Ensure the count matches the expected values.
-        assertEquals(BigInteger.valueOf(5), count);
+            // Ensure the count matches the expected values.
+            assertEquals(BigInteger.valueOf(5), count);
+        }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java
index 82b61bd..0aceaa3 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java
@@ -26,8 +26,11 @@ import java.util.Set;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.api.persist.RyaDAOException;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.GetPcjMetadata.NotInAccumuloException;
 import org.apache.rya.indexing.pcj.fluo.api.GetPcjMetadata.NotInFluoException;
 import org.apache.rya.indexing.pcj.storage.PcjException;
@@ -47,7 +50,7 @@ import com.google.common.collect.Sets;
 /**
  * Integration tests the methods of {@link GetPcjMetadata}.
  */
-public class GetPcjMetadataIT extends ITBase {
+public class GetPcjMetadataIT extends RyaExportITBase {
 
     @Test
     public void getMetadataByQueryId() throws RepositoryException, MalformedQueryException, SailException, QueryEvaluationException, PcjException, NotInFluoException, NotInAccumuloException, RyaDAOException {
@@ -59,56 +62,60 @@ public class GetPcjMetadataIT extends ITBase {
                 "}";
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
 
-        // Fetch the PCJ's Metadata through the GetPcjMetadata interactor.
-        final String queryId = new ListQueryIds().listQueryIds(fluoClient).get(0);
-        final PcjMetadata metadata = new GetPcjMetadata().getMetadata(pcjStorage, fluoClient, queryId);
+            // Fetch the PCJ's Metadata through the GetPcjMetadata interactor.
+            final String queryId = new ListQueryIds().listQueryIds(fluoClient).get(0);
+            final PcjMetadata metadata = new GetPcjMetadata().getMetadata(pcjStorage, fluoClient, queryId);
 
-        // Ensure the command returns the correct metadata.
-        final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
-        final PcjMetadata expected = new PcjMetadata(sparql, 0L, varOrders);
-        assertEquals(expected, metadata);
+            // Ensure the command returns the correct metadata.
+            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
+            final PcjMetadata expected = new PcjMetadata(sparql, 0L, varOrders);
+            assertEquals(expected, metadata);
+        }
     }
 
     @Test
     public void getAllMetadata() throws MalformedQueryException, SailException, QueryEvaluationException, PcjException, NotInFluoException, NotInAccumuloException, AccumuloException, AccumuloSecurityException, RyaDAOException {
-
-        final CreatePcj createPcj = new CreatePcj();
-
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
 
-        // Add a couple of queries to Accumulo.
-        final String q1Sparql =
-                "SELECT ?x " +
-                  "WHERE { " +
-                  "?x <http://talksTo> <http://Eve>. " +
-                  "?x <http://worksAt> <http://Chipotle>." +
-                "}";
-        final String q1PcjId = pcjStorage.createPcj(q1Sparql);
-        createPcj.withRyaIntegration(q1PcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        final String q2Sparql =
-                "SELECT ?x ?y " +
-                  "WHERE { " +
-                  "?x <http://talksTo> ?y. " +
-                  "?y <http://worksAt> <http://Chipotle>." +
-                "}";
-        final String q2PcjId = pcjStorage.createPcj(q2Sparql);
-        createPcj.withRyaIntegration(q2PcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Ensure the command returns the correct metadata.
-        final Set<PcjMetadata> expected = new HashSet<>();
-        final Set<VariableOrder> q1VarOrders = new ShiftVarOrderFactory().makeVarOrders(q1Sparql);
-        final Set<VariableOrder> q2VarOrders = new ShiftVarOrderFactory().makeVarOrders(q2Sparql);
-        expected.add(new PcjMetadata(q1Sparql, 0L, q1VarOrders));
-        expected.add(new PcjMetadata(q2Sparql, 0L, q2VarOrders));
-
-        final Map<String, PcjMetadata> metadata = new GetPcjMetadata().getMetadata(pcjStorage, fluoClient);
-        assertEquals(expected, Sets.newHashSet( metadata.values() ));
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Add a couple of queries to Accumulo.
+            final String q1Sparql =
+                    "SELECT ?x " +
+                            "WHERE { " +
+                            "?x <http://talksTo> <http://Eve>. " +
+                            "?x <http://worksAt> <http://Chipotle>." +
+                            "}";
+            final String q1PcjId = pcjStorage.createPcj(q1Sparql);
+            final CreatePcj createPcj = new CreatePcj();
+            createPcj.withRyaIntegration(q1PcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+
+            final String q2Sparql =
+                    "SELECT ?x ?y " +
+                            "WHERE { " +
+                            "?x <http://talksTo> ?y. " +
+                            "?y <http://worksAt> <http://Chipotle>." +
+                            "}";
+            final String q2PcjId = pcjStorage.createPcj(q2Sparql);
+            createPcj.withRyaIntegration(q2PcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+
+            // Ensure the command returns the correct metadata.
+            final Set<PcjMetadata> expected = new HashSet<>();
+            final Set<VariableOrder> q1VarOrders = new ShiftVarOrderFactory().makeVarOrders(q1Sparql);
+            final Set<VariableOrder> q2VarOrders = new ShiftVarOrderFactory().makeVarOrders(q2Sparql);
+            expected.add(new PcjMetadata(q1Sparql, 0L, q1VarOrders));
+            expected.add(new PcjMetadata(q2Sparql, 0L, q2VarOrders));
+
+            final Map<String, PcjMetadata> metadata = new GetPcjMetadata().getMetadata(pcjStorage, fluoClient);
+            assertEquals(expected, Sets.newHashSet( metadata.values() ));
+        }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java
index 85c31a0..10f2319 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java
@@ -26,8 +26,12 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.Connector;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.api.domain.RyaStatement;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.rya.api.domain.RyaURI;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.GetQueryReport.QueryReport;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQuery;
 import org.apache.rya.indexing.pcj.fluo.app.query.StatementPatternMetadata;
@@ -42,7 +46,7 @@ import com.google.common.collect.Sets;
 /**
  * Integration tests the methods of {@link GetQueryReportl}.
  */
-public class GetQueryReportIT extends ITBase {
+public class GetQueryReportIT extends RyaExportITBase {
 
     @Test
     public void getReport() throws Exception {
@@ -56,69 +60,72 @@ public class GetQueryReportIT extends ITBase {
 
         // Triples that will be streamed into Fluo after the PCJ has been created.
         final Set<RyaStatement> streamedTriples = Sets.newHashSet(
-                makeRyaStatement("http://Alice", "http://worksAt", "http://Taco Shop"),
-                makeRyaStatement("http://Alice", "http://worksAt", "http://Burger Join"),
-                makeRyaStatement("http://Alice", "http://worksAt", "http://Pastery Shop"),
-                makeRyaStatement("http://Alice", "http://worksAt", "http://Burrito Place"),
-                makeRyaStatement("http://Alice", "http://livesIn", "http://Lost County"),
-                makeRyaStatement("http://Alice", "http://livesIn", "http://Big City"),
-                makeRyaStatement("http://Bob", "http://worksAt", "http://Burrito Place"),
-                makeRyaStatement("http://Bob", "http://livesIn", "http://Big City"),
-                makeRyaStatement("http://Charlie", "http://worksAt", "http://Burrito Place"),
-                makeRyaStatement("http://Charlie", "http://livesIn", "http://Big City"),
-                makeRyaStatement("http://David", "http://worksAt", "http://Burrito Place"),
-                makeRyaStatement("http://David", "http://livesIn", "http://Lost County"),
-                makeRyaStatement("http://Eve", "http://worksAt", "http://Burrito Place"),
-                makeRyaStatement("http://Eve", "http://livesIn", "http://Big City"),
-                makeRyaStatement("http://Frank", "http://worksAt", "http://Burrito Place"),
-                makeRyaStatement("http://Frank", "http://livesIn", "http://Lost County"));
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://worksAt"), new RyaURI("http://Taco Shop")),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://worksAt"), new RyaURI("http://Burger Join")),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://worksAt"), new RyaURI("http://Pastery Shop")),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://worksAt"), new RyaURI("http://Burrito Place")),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://livesIn"), new RyaURI("http://Lost County")),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://livesIn"), new RyaURI("http://Big City")),
+                new RyaStatement(new RyaURI("http://Bob"), new RyaURI("http://worksAt"), new RyaURI("http://Burrito Place")),
+                new RyaStatement(new RyaURI("http://Bob"), new RyaURI("http://livesIn"), new RyaURI("http://Big City")),
+                new RyaStatement(new RyaURI("http://Charlie"), new RyaURI("http://worksAt"), new RyaURI("http://Burrito Place")),
+                new RyaStatement(new RyaURI("http://Charlie"), new RyaURI("http://livesIn"), new RyaURI("http://Big City")),
+                new RyaStatement(new RyaURI("http://David"), new RyaURI("http://worksAt"), new RyaURI("http://Burrito Place")),
+                new RyaStatement(new RyaURI("http://David"), new RyaURI("http://livesIn"), new RyaURI("http://Lost County")),
+                new RyaStatement(new RyaURI("http://Eve"), new RyaURI("http://worksAt"), new RyaURI("http://Burrito Place")),
+                new RyaStatement(new RyaURI("http://Eve"), new RyaURI("http://livesIn"), new RyaURI("http://Big City")),
+                new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://worksAt"), new RyaURI("http://Burrito Place")),
+                new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://livesIn"), new RyaURI("http://Lost County")));
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
 
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+            // Stream the data into Fluo.
+            new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
 
-        // Wait for the results to finish processing.
-        fluo.waitForObservers();
+            // Wait for the results to finish processing.
+            super.getMiniFluo().waitForObservers();
 
-        // Fetch the report.
-        final Map<String, PcjMetadata> metadata = new GetPcjMetadata().getMetadata(pcjStorage, fluoClient);
-        final Set<String> queryIds = metadata.keySet();
-        assertEquals(1, queryIds.size());
-        final String queryId = queryIds.iterator().next();
+            // Fetch the report.
+            final Map<String, PcjMetadata> metadata = new GetPcjMetadata().getMetadata(pcjStorage, fluoClient);
+            final Set<String> queryIds = metadata.keySet();
+            assertEquals(1, queryIds.size());
+            final String queryId = queryIds.iterator().next();
 
-        final QueryReport report = new GetQueryReport().getReport(fluoClient, queryId);
+            final QueryReport report = new GetQueryReport().getReport(fluoClient, queryId);
 
-        // Build the expected counts map.
-        final Map<String, BigInteger> expectedCounts = new HashMap<>();
+            // Build the expected counts map.
+            final Map<String, BigInteger> expectedCounts = new HashMap<>();
 
-        final FluoQuery fluoQuery = report.getFluoQuery();
+            final FluoQuery fluoQuery = report.getFluoQuery();
 
-        final String queryNodeId = fluoQuery.getQueryMetadata().getNodeId();
-        expectedCounts.put(queryNodeId, BigInteger.valueOf(8));
+            final String queryNodeId = fluoQuery.getQueryMetadata().getNodeId();
+            expectedCounts.put(queryNodeId, BigInteger.valueOf(8));
 
-        final String filterNodeId = fluoQuery.getFilterMetadata().iterator().next().getNodeId();
-        expectedCounts.put(filterNodeId, BigInteger.valueOf(8));
+            final String filterNodeId = fluoQuery.getFilterMetadata().iterator().next().getNodeId();
+            expectedCounts.put(filterNodeId, BigInteger.valueOf(8));
 
-        final String joinNodeId = fluoQuery.getJoinMetadata().iterator().next().getNodeId();
-        expectedCounts.put(joinNodeId, BigInteger.valueOf(13));
+            final String joinNodeId = fluoQuery.getJoinMetadata().iterator().next().getNodeId();
+            expectedCounts.put(joinNodeId, BigInteger.valueOf(13));
 
-        final Iterator<StatementPatternMetadata> patterns = fluoQuery.getStatementPatternMetadata().iterator();
-        final StatementPatternMetadata sp1 = patterns.next();
-        final StatementPatternMetadata sp2 = patterns.next();
-        if(sp1.getStatementPattern().contains("http://worksAt")) {
-            expectedCounts.put(sp1.getNodeId(), BigInteger.valueOf(9));
-            expectedCounts.put(sp2.getNodeId(), BigInteger.valueOf(7));
-        } else {
-            expectedCounts.put(sp2.getNodeId(), BigInteger.valueOf(9));
-            expectedCounts.put(sp1.getNodeId(), BigInteger.valueOf(7));
-        }
+            final Iterator<StatementPatternMetadata> patterns = fluoQuery.getStatementPatternMetadata().iterator();
+            final StatementPatternMetadata sp1 = patterns.next();
+            final StatementPatternMetadata sp2 = patterns.next();
+            if(sp1.getStatementPattern().contains("http://worksAt")) {
+                expectedCounts.put(sp1.getNodeId(), BigInteger.valueOf(9));
+                expectedCounts.put(sp2.getNodeId(), BigInteger.valueOf(7));
+            } else {
+                expectedCounts.put(sp2.getNodeId(), BigInteger.valueOf(9));
+                expectedCounts.put(sp1.getNodeId(), BigInteger.valueOf(7));
+            }
 
-        assertEquals(expectedCounts, report.getCounts());
+            assertEquals(expectedCounts, report.getCounts());
+        }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java
index 19bc272..ec301ba 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java
@@ -26,17 +26,18 @@ import java.util.List;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
+import org.apache.fluo.api.client.Transaction;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.junit.Test;
 
 import com.beust.jcommander.internal.Lists;
 
-import org.apache.fluo.api.client.Transaction;
-
 /**
  * Integration tests the methods of {@link ListQueryIds}.
  */
-public class ListQueryIdsIT extends ITBase {
+public class ListQueryIdsIT extends RyaExportITBase {
 
     /**
      * This test ensures that when there are PCJ tables in Accumulo as well as
@@ -45,18 +46,20 @@ public class ListQueryIdsIT extends ITBase {
      */
     @Test
     public void getQueryIds() throws AccumuloException, AccumuloSecurityException, TableExistsException {
-        // Store a few SPARQL/Query ID pairs in the Fluo table.
-        try(Transaction tx = fluoClient.newTransaction()) {
-            tx.set("SPARQL_3", QUERY_ID, "ID_3");
-            tx.set("SPARQL_1", QUERY_ID, "ID_1");
-            tx.set("SPARQL_4", QUERY_ID, "ID_4");
-            tx.set("SPARQL_2", QUERY_ID, "ID_2");
-            tx.commit();
-        }
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Store a few SPARQL/Query ID pairs in the Fluo table.
+            try(Transaction tx = fluoClient.newTransaction()) {
+                tx.set("SPARQL_3", QUERY_ID, "ID_3");
+                tx.set("SPARQL_1", QUERY_ID, "ID_1");
+                tx.set("SPARQL_4", QUERY_ID, "ID_4");
+                tx.set("SPARQL_2", QUERY_ID, "ID_2");
+                tx.commit();
+            }
 
-        // Ensure the correct list of Query IDs is retured.
-        final List<String> expected = Lists.newArrayList("ID_1", "ID_2", "ID_3", "ID_4");
-        final List<String> queryIds = new ListQueryIds().listQueryIds(fluoClient);
-        assertEquals(expected, queryIds);
+            // Ensure the correct list of Query IDs is retured.
+            final List<String> expected = Lists.newArrayList("ID_1", "ID_2", "ID_3", "ID_4");
+            final List<String> queryIds = new ListQueryIds().listQueryIds(fluoClient);
+            assertEquals(expected, queryIds);
+        }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java
index d5ed447..082f46d 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java
@@ -20,7 +20,13 @@ package org.apache.rya.indexing.pcj.fluo.app.query;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
+import org.apache.fluo.api.client.Snapshot;
+import org.apache.fluo.api.client.Transaction;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationElement;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationType;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType;
 import org.apache.rya.indexing.pcj.fluo.app.query.SparqlFluoQueryBuilder.NodeIds;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
@@ -30,13 +36,10 @@ import org.openrdf.query.parser.ParsedQuery;
 import org.openrdf.query.parser.sparql.SPARQLParser;
 import org.openrdf.repository.RepositoryException;
 
-import org.apache.fluo.api.client.Snapshot;
-import org.apache.fluo.api.client.Transaction;
-
 /**
  * Integration tests the methods of {@link FluoQueryMetadataDAO}.
  */
-public class FluoQueryMetadataDAOIT extends ITBase {
+public class FluoQueryMetadataDAOIT extends RyaExportITBase {
 
     @Test
     public void statementPatternMetadataTest() throws RepositoryException {
@@ -49,20 +52,22 @@ public class FluoQueryMetadataDAOIT extends ITBase {
         builder.setParentNodeId("parentNodeId");
         final StatementPatternMetadata originalMetadata = builder.build();
 
-        // Write it to the Fluo table.
-        try(Transaction tx = fluoClient.newTransaction()) {
-            dao.write(tx, originalMetadata);
-            tx.commit();
-        }
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Write it to the Fluo table.
+            try(Transaction tx = fluoClient.newTransaction()) {
+                dao.write(tx, originalMetadata);
+                tx.commit();
+            }
 
-        // Read it from the Fluo table.
-        StatementPatternMetadata storedMetadata = null;
-        try(Snapshot sx = fluoClient.newSnapshot()) {
-            storedMetadata = dao.readStatementPatternMetadata(sx, "nodeId");
-        }
+            // Read it from the Fluo table.
+            StatementPatternMetadata storedMetadata = null;
+            try(Snapshot sx = fluoClient.newSnapshot()) {
+                storedMetadata = dao.readStatementPatternMetadata(sx, "nodeId");
+            }
 
-        // Ensure the deserialized object is the same as the serialized one.
-        assertEquals(originalMetadata, storedMetadata);
+            // Ensure the deserialized object is the same as the serialized one.
+            assertEquals(originalMetadata, storedMetadata);
+        }
     }
 
     @Test
@@ -78,20 +83,22 @@ public class FluoQueryMetadataDAOIT extends ITBase {
         builder.setFilterIndexWithinSparql(2);
         final FilterMetadata originalMetadata = builder.build();
 
-        // Write it to the Fluo table.
-        try(Transaction tx = fluoClient.newTransaction()) {
-            dao.write(tx, originalMetadata);
-            tx.commit();
-        }
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Write it to the Fluo table.
+            try(Transaction tx = fluoClient.newTransaction()) {
+                dao.write(tx, originalMetadata);
+                tx.commit();
+            }
 
-        // Read it from the Fluo table.
-        FilterMetadata storedMetadata = null;
-        try(Snapshot sx = fluoClient.newSnapshot()) {
-            storedMetadata = dao.readFilterMetadata(sx, "nodeId");
-        }
+            // Read it from the Fluo table.
+            FilterMetadata storedMetadata = null;
+            try(Snapshot sx = fluoClient.newSnapshot()) {
+                storedMetadata = dao.readFilterMetadata(sx, "nodeId");
+            }
 
-        // Ensure the deserialized object is the same as the serialized one.
-        assertEquals(originalMetadata, storedMetadata);
+            // Ensure the deserialized object is the same as the serialized one.
+            assertEquals(originalMetadata, storedMetadata);
+        }
     }
 
     @Test
@@ -107,20 +114,22 @@ public class FluoQueryMetadataDAOIT extends ITBase {
         builder.setRightChildNodeId("rightChildNodeId");
         final JoinMetadata originalMetadata = builder.build();
 
-        // Write it to the Fluo table.
-        try(Transaction tx = fluoClient.newTransaction()) {
-            dao.write(tx, originalMetadata);
-            tx.commit();
-        }
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Write it to the Fluo table.
+            try(Transaction tx = fluoClient.newTransaction()) {
+                dao.write(tx, originalMetadata);
+                tx.commit();
+            }
 
-        // Read it from the Fluo table.
-        JoinMetadata storedMetadata = null;
-        try(Snapshot sx = fluoClient.newSnapshot()) {
-            storedMetadata = dao.readJoinMetadata(sx, "nodeId");
-        }
+            // Read it from the Fluo table.
+            JoinMetadata storedMetadata = null;
+            try(Snapshot sx = fluoClient.newSnapshot()) {
+                storedMetadata = dao.readJoinMetadata(sx, "nodeId");
+            }
 
-        // Ensure the deserialized object is the same as the serialized one.
-        assertEquals(originalMetadata, storedMetadata);
+            // Ensure the deserialized object is the same as the serialized one.
+            assertEquals(originalMetadata, storedMetadata);
+        }
     }
 
     @Test
@@ -134,20 +143,85 @@ public class FluoQueryMetadataDAOIT extends ITBase {
         builder.setChildNodeId("childNodeId");
         final QueryMetadata originalMetadata = builder.build();
 
-        // Write it to the Fluo table.
-        try(Transaction tx = fluoClient.newTransaction()) {
-            dao.write(tx, originalMetadata);
-            tx.commit();
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Write it to the Fluo table.
+            try(Transaction tx = fluoClient.newTransaction()) {
+                dao.write(tx, originalMetadata);
+                tx.commit();
+            }
+
+            // Read it from the Fluo table.
+            QueryMetadata storedMetdata = null;
+            try(Snapshot sx = fluoClient.newSnapshot()) {
+                storedMetdata = dao.readQueryMetadata(sx, "nodeId");
+            }
+
+            // Ensure the deserialized object is the same as the serialized one.
+            assertEquals(originalMetadata, storedMetdata);
         }
+    }
+
+    @Test
+    public void aggregationMetadataTest_withGroupByVarOrders() {
+        final FluoQueryMetadataDAO dao = new FluoQueryMetadataDAO();
+
+        // Create the object that will be serialized.
+        final AggregationMetadata originalMetadata = AggregationMetadata.builder("nodeId")
+                .setVariableOrder(new VariableOrder("totalCount"))
+                .setParentNodeId("parentNodeId")
+                .setChildNodeId("childNodeId")
+                .setGroupByVariableOrder(new VariableOrder("a", "b", "c"))
+                .addAggregation(new AggregationElement(AggregationType.COUNT, "count", "totalCount"))
+                .addAggregation(new AggregationElement(AggregationType.AVERAGE, "privae", "avgPrice"))
+                .build();
+
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Write it to the Fluo table.
+            try(Transaction tx = fluoClient.newTransaction()) {
+                dao.write(tx, originalMetadata);
+                tx.commit();
+            }
 
-        // Read it from the Fluo table.
-        QueryMetadata storedMetdata = null;
-        try(Snapshot sx = fluoClient.newSnapshot()) {
-            storedMetdata = dao.readQueryMetadata(sx, "nodeId");
+            // Read it from the Fluo table.
+            AggregationMetadata storedMetadata = null;
+            try(Snapshot sx = fluoClient.newSnapshot()) {
+                storedMetadata = dao.readAggregationMetadata(sx, "nodeId");
+            }
+
+            // Ensure the deserialized object is the same as the serialized one.
+            assertEquals(originalMetadata, storedMetadata);
         }
+    }
+
+    @Test
+    public void aggregationMetadataTest_noGroupByVarOrders() {
+        final FluoQueryMetadataDAO dao = new FluoQueryMetadataDAO();
+
+        // Create the object that will be serialized.
+        final AggregationMetadata originalMetadata = AggregationMetadata.builder("nodeId")
+                .setVariableOrder(new VariableOrder("totalCount"))
+                .setParentNodeId("parentNodeId")
+                .setChildNodeId("childNodeId")
+                .addAggregation(new AggregationElement(AggregationType.COUNT, "count", "totalCount"))
+                .addAggregation(new AggregationElement(AggregationType.AVERAGE, "privae", "avgPrice"))
+                .build();
+
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Write it to the Fluo table.
+            try(Transaction tx = fluoClient.newTransaction()) {
+                dao.write(tx, originalMetadata);
+                tx.commit();
+            }
+
+            // Read it from the Fluo table.
+            AggregationMetadata storedMetadata = null;
+            try(Snapshot sx = fluoClient.newSnapshot()) {
+                storedMetadata = dao.readAggregationMetadata(sx, "nodeId");
+            }
 
-        // Ensure the deserialized object is the same as the serialized one.
-        assertEquals(originalMetadata, storedMetdata);
+            // Ensure the deserialized object is the same as the serialized one.
+            assertEquals(originalMetadata, storedMetadata);
+        }
     }
 
     @Test
@@ -168,19 +242,21 @@ public class FluoQueryMetadataDAOIT extends ITBase {
         final ParsedQuery query = new SPARQLParser().parseQuery(sparql, null);
         final FluoQuery originalQuery = new SparqlFluoQueryBuilder().make(query, new NodeIds());
 
-        // Write it to the Fluo table.
-        try(Transaction tx = fluoClient.newTransaction()) {
-            dao.write(tx, originalQuery);
-            tx.commit();
-        }
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Write it to the Fluo table.
+            try(Transaction tx = fluoClient.newTransaction()) {
+                dao.write(tx, originalQuery);
+                tx.commit();
+            }
 
-        // Read it from the Fluo table.
-        FluoQuery storedQuery = null;
-        try(Snapshot sx = fluoClient.newSnapshot()) {
-            storedQuery = dao.readFluoQuery(sx, originalQuery.getQueryMetadata().getNodeId());
-        }
+            // Read it from the Fluo table.
+            FluoQuery storedQuery = null;
+            try(Snapshot sx = fluoClient.newSnapshot()) {
+                storedQuery = dao.readFluoQuery(sx, originalQuery.getQueryMetadata().getNodeId());
+            }
 
-        // Ensure the deserialized object is the same as the serialized one.
-        assertEquals(originalQuery, storedQuery);
+            // Ensure the deserialized object is the same as the serialized one.
+            assertEquals(originalQuery, storedQuery);
+        }
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java
index b4c8d69..21d7db0 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java
@@ -18,100 +18,152 @@
  */
 package org.apache.rya.indexing.pcj.fluo.integration;
 
+import static java.util.Objects.requireNonNull;
 import static org.junit.Assert.assertEquals;
 
 import java.util.ArrayList;
-import java.util.HashSet;
+import java.util.Collection;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
 import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
 import org.apache.fluo.api.client.Snapshot;
 import org.apache.fluo.api.client.scanner.ColumnScanner;
 import org.apache.fluo.api.client.scanner.RowScanner;
 import org.apache.fluo.api.data.Bytes;
 import org.apache.fluo.api.data.Span;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
-import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.DeletePcj;
-import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
-import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.junit.Test;
 import org.openrdf.model.Statement;
-import org.openrdf.model.impl.URIImpl;
-import org.openrdf.query.BindingSet;
-import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.repository.sail.SailRepositoryConnection;
 
 import com.google.common.collect.Sets;
 
-public class CreateDeleteIT extends ITBase {
+/**
+ * Tests that ensure the PCJ delete support works.
+ */
+public class CreateDeleteIT extends RyaExportITBase {
 
-    /**
-     * Ensure historic matches are included in the result.
-     */
     @Test
-    public void historicResults() throws Exception {
+    public void deletePCJ() throws Exception {
         // A query that finds people who talk to Eve and work at Chipotle.
-        final String sparql = "SELECT ?x " + "WHERE { " + "?x <http://talksTo> <http://Eve>. "
-                + "?x <http://worksAt> <http://Chipotle>." + "}";
+        final String sparql =
+                "SELECT ?x " + "WHERE { " +
+                    "?x <http://talksTo> <http://Eve>. " +
+                    "?x <http://worksAt> <http://Chipotle>." +
+                "}";
 
         // Triples that are loaded into Rya before the PCJ is created.
-        final Set<Statement> historicTriples = Sets.newHashSet(
-                makeStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeStatement("http://Bob", "http://talksTo", "http://Eve"),
-                makeStatement("http://Charlie", "http://talksTo", "http://Eve"),
-
-                makeStatement("http://Eve", "http://helps", "http://Kevin"),
-
-                makeStatement("http://Bob", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://Charlie", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://Eve", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://David", "http://worksAt", "http://Chipotle"));
-
-        // The expected results of the SPARQL query once the PCJ has been
-        // computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Bob"))));
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Charlie"))));
-
-        // Load the historic data into Rya.
-        for (final Statement triple : historicTriples) {
-            ryaConn.add(triple);
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Set<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://helps"), vf.createURI("http://Kevin")),
+
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Ensure the data was loaded.
+            final List<Bytes> rows = getFluoTableEntries(fluoClient);
+            assertEquals(17, rows.size());
+
+            // Delete the PCJ from the Fluo application.
+            new DeletePcj(1).deletePcj(fluoClient, pcjId);
+
+            // Ensure all data related to the query has been removed.
+            final List<Bytes> empty_rows = getFluoTableEntries(fluoClient);
+            assertEquals(0, empty_rows.size());
         }
+    }
+
+    @Test
+    public void deleteAggregation() throws Exception {
+        // A query that finds the maximum price for an item within the inventory.
+        final String sparql =
+                "SELECT (max(?price) as ?maxPrice) { " +
+                    "?item <urn:price> ?price . " +
+                "}";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:price"), vf.createLiteral(2.50)),
+                vf.createStatement(vf.createURI("urn:gum"), vf.createURI("urn:price"), vf.createLiteral(0.99)),
+                vf.createStatement(vf.createURI("urn:sandwich"), vf.createURI("urn:price"), vf.createLiteral(4.99)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Ensure the data was loaded.
+            final List<Bytes> rows = getFluoTableEntries(fluoClient);
+            assertEquals(10, rows.size());
+
+            // Delete the PCJ from the Fluo application.
+            new DeletePcj(1).deletePcj(fluoClient, pcjId);
+
+            // Ensure all data related to the query has been removed.
+            final List<Bytes> empty_rows = getFluoTableEntries(fluoClient);
+            assertEquals(0, empty_rows.size());
+        }
+    }
 
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(sparql);
+    private String loadData(final String sparql, final Collection<Statement> statements) throws Exception {
+        requireNonNull(sparql);
+        requireNonNull(statements);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        // Register the PCJ with Rya.
+        final Instance accInstance = super.getAccumuloConnector().getInstance();
+        final Connector accumuloConn = super.getAccumuloConnector();
 
-        // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(new AccumuloConnectionDetails(
+                ACCUMULO_USER,
+                ACCUMULO_PASSWORD.toCharArray(),
+                accInstance.getInstanceName(),
+                accInstance.getZooKeepers()), accumuloConn);
 
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
+        final String pcjId = ryaClient.getCreatePCJ().createPCJ(RYA_INSTANCE_NAME, sparql);
 
-        List<Bytes> rows = getFluoTableEntries(fluoClient);
-        assertEquals(17, rows.size());
+        // Write the data to Rya.
+        final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
+        ryaConn.begin();
+        ryaConn.add(statements);
+        ryaConn.commit();
+        ryaConn.close();
 
-        // Delete the PCJ from the Fluo application.
-        new DeletePcj(1).deletePcj(fluoClient, pcjId);
+        // Wait for the Fluo application to finish computing the end result.
+        super.getMiniFluo().waitForObservers();
 
-        // Ensure all data related to the query has been removed.
-        List<Bytes> empty_rows = getFluoTableEntries(fluoClient);
-        assertEquals(0, empty_rows.size());
+        // The PCJ Id is the topic name the results will be written to.
+        return pcjId;
     }
 
-    private List<Bytes> getFluoTableEntries(FluoClient fluoClient) {
+    private List<Bytes> getFluoTableEntries(final FluoClient fluoClient) {
         try (Snapshot snapshot = fluoClient.newSnapshot()) {
-            List<Bytes> rows = new ArrayList<>();
-            RowScanner rscanner = snapshot.scanner().over(Span.prefix("")).byRow().build();
+            final List<Bytes> rows = new ArrayList<>();
+            final RowScanner rscanner = snapshot.scanner().over(Span.prefix("")).byRow().build();
 
-            for(ColumnScanner cscanner: rscanner) {
+            for(final ColumnScanner cscanner: rscanner) {
             	rows.add(cscanner.getRow());
             }
-            
+
             return rows;
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java
index dcab997..ab97bbd 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java
@@ -19,32 +19,37 @@
 package org.apache.rya.indexing.pcj.fluo.integration;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.Connector;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.api.domain.RyaStatement;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.rya.api.domain.RyaURI;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.junit.Test;
 import org.openrdf.model.Statement;
-import org.openrdf.model.impl.URIImpl;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
 import org.openrdf.query.BindingSet;
-import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.query.impl.MapBindingSet;
+import org.openrdf.repository.sail.SailRepositoryConnection;
 
 import com.google.common.base.Optional;
 import com.google.common.collect.Sets;
 
 /**
  * Performs integration tests over the Fluo application geared towards various types of input.
- * <p>
- * These tests are being ignore so that they will not run as unit tests while building the application.
  */
-public class InputIT extends ITBase {
+public class InputIT extends RyaExportITBase {
 
     /**
      * Ensure historic matches are included in the result.
@@ -53,49 +58,64 @@ public class InputIT extends ITBase {
     public void historicResults() throws Exception {
         // A query that finds people who talk to Eve and work at Chipotle.
         final String sparql =
-              "SELECT ?x " +
-                "WHERE { " +
+              "SELECT ?x WHERE { " +
                 "?x <http://talksTo> <http://Eve>. " +
                 "?x <http://worksAt> <http://Chipotle>." +
               "}";
 
         // Triples that are loaded into Rya before the PCJ is created.
+        final ValueFactory vf = new ValueFactoryImpl();
         final Set<Statement> historicTriples = Sets.newHashSet(
-                makeStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeStatement("http://Bob", "http://talksTo", "http://Eve"),
-                makeStatement("http://Charlie", "http://talksTo", "http://Eve"),
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
 
-                makeStatement("http://Eve", "http://helps", "http://Kevin"),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://helps"), vf.createURI("http://Kevin")),
 
-                makeStatement("http://Bob", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://Charlie", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://Eve", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://David", "http://worksAt", "http://Chipotle"));
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
 
         // The expected results of the SPARQL query once the PCJ has been computed.
         final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(
-                new BindingImpl("x", new URIImpl("http://Bob"))));
-        expected.add(makeBindingSet(
-                new BindingImpl("x", new URIImpl("http://Charlie"))));
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("x", vf.createURI("http://Bob"));
+        expected.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("x", vf.createURI("http://Charlie"));
+        expected.add(bs);
 
         // Load the historic data into Rya.
+        final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
         for(final Statement triple : historicTriples) {
             ryaConn.add(triple);
         }
+        ryaConn.close();
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+
+            // Verify the end results of the query match the expected results.
+            super.getMiniFluo().waitForObservers();
 
-        // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
+            final Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add( resultsIt.next() );
+                }
+            }
 
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
+            assertEquals(expected, results);
+        }
     }
 
     /**
@@ -105,51 +125,67 @@ public class InputIT extends ITBase {
     public void streamedResults() throws Exception {
         // A query that finds people who talk to Eve and work at Chipotle.
         final String sparql =
-              "SELECT ?x " +
-                "WHERE { " +
+              "SELECT ?x WHERE { " +
                 "?x <http://talksTo> <http://Eve>. " +
                 "?x <http://worksAt> <http://Chipotle>." +
               "}";
 
         // Triples that will be streamed into Fluo after the PCJ has been created.
         final Set<RyaStatement> streamedTriples = Sets.newHashSet(
-                makeRyaStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeRyaStatement("http://Bob", "http://talksTo", "http://Eve"),
-                makeRyaStatement("http://Charlie", "http://talksTo", "http://Eve"),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"), new RyaURI("http://Eve")),
+                new RyaStatement(new RyaURI("http://Bob"), new RyaURI("http://talksTo"), new RyaURI("http://Eve")),
+                new RyaStatement(new RyaURI("http://Charlie"), new RyaURI("http://talksTo"), new RyaURI("http://Eve")),
 
-                makeRyaStatement("http://Eve", "http://helps", "http://Kevin"),
+                new RyaStatement(new RyaURI("http://Eve"), new RyaURI("http://helps"), new RyaURI("http://Kevin")),
 
-                makeRyaStatement("http://Bob", "http://worksAt", "http://Chipotle"),
-                makeRyaStatement("http://Charlie", "http://worksAt", "http://Chipotle"),
-                makeRyaStatement("http://Eve", "http://worksAt", "http://Chipotle"),
-                makeRyaStatement("http://David", "http://worksAt", "http://Chipotle"));
+                new RyaStatement(new RyaURI("http://Bob"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")),
+                new RyaStatement(new RyaURI("http://Charlie"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")),
+                new RyaStatement(new RyaURI("http://Eve"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")),
+                new RyaStatement(new RyaURI("http://David"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")));
 
         // The expected results of the SPARQL query once the PCJ has been computed.
+        final ValueFactory vf = new ValueFactoryImpl();
         final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(
-                new BindingImpl("x", new URIImpl("http://Bob"))));
-        expected.add(makeBindingSet(
-                new BindingImpl("x", new URIImpl("http://Charlie"))));
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("x", vf.createURI("http://Bob"));
+        expected.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("x", vf.createURI("http://Charlie"));
+        expected.add(bs);
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
 
-        // Ensure the query has no results yet.
-        fluo.waitForObservers();
-        Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertTrue( results.isEmpty() );
+            // Ensure the query has no results yet.
+            super.getMiniFluo().waitForObservers();
 
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                assertFalse( resultsIt.hasNext() );
+            }
 
-        // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
+            // Stream the data into Fluo.
+            new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+
+            // Verify the end results of the query match the expected results.
+            super.getMiniFluo().waitForObservers();
+
+            final HashSet<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add( resultsIt.next() );
+                }
+            }
+
+            assertEquals(expected, results);
+        }
     }
 
     /**
@@ -162,53 +198,75 @@ public class InputIT extends ITBase {
     public void historicThenStreamedResults() throws Exception {
         // A query that finds people who talk to Eve and work at Chipotle.
         final String sparql =
-              "SELECT ?x " +
-                "WHERE { " +
+              "SELECT ?x WHERE { " +
                 "?x <http://talksTo> <http://Eve>. " +
                 "?x <http://worksAt> <http://Chipotle>." +
               "}";
 
         // Triples that are loaded into Rya before the PCJ is created.
+        final ValueFactory vf = new ValueFactoryImpl();
         final Set<Statement> historicTriples = Sets.newHashSet(
-                makeStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeStatement("http://Alice", "http://worksAt", "http://Chipotle"));
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
 
         // Triples that will be streamed into Fluo after the PCJ has been created.
         final Set<RyaStatement> streamedTriples = Sets.newHashSet(
-                makeRyaStatement("http://Frank", "http://talksTo", "http://Eve"),
-                makeRyaStatement("http://Frank", "http://worksAt", "http://Chipotle"));
+                new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://talksTo"), new RyaURI("http://Eve")),
+                new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")));
 
         // Load the historic data into Rya.
+        final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
         for(final Statement triple : historicTriples) {
             ryaConn.add(triple);
         }
+        ryaConn.close();
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
 
-        // Ensure Alice is a match.
-        fluo.waitForObservers();
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(
-                new BindingImpl("x", new URIImpl("http://Alice"))));
+            // Ensure Alice is a match.
+            super.getMiniFluo().waitForObservers();
 
-        Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
+            final Set<BindingSet> expected = new HashSet<>();
 
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+            MapBindingSet bs = new MapBindingSet();
+            bs.addBinding("x", vf.createURI("http://Alice"));
+            expected.add(bs);
 
-        // Verify the end results of the query also include Frank.
-        fluo.waitForObservers();
-        expected.add(makeBindingSet(
-                new BindingImpl("x", new URIImpl("http://Frank"))));
+            Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add(resultsIt.next());
+                }
+            }
 
-        results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
+            assertEquals(expected, results);
+
+            // Stream the data into Fluo.
+            new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+
+            // Verify the end results of the query also include Frank.
+            super.getMiniFluo().waitForObservers();
+
+            bs = new MapBindingSet();
+            bs.addBinding("x", vf.createURI("http://Frank"));
+            expected.add(bs);
+
+            results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add(resultsIt.next());
+                }
+            }
+
+            assertEquals(expected, results);
+        }
     }
 
     /**
@@ -222,50 +280,69 @@ public class InputIT extends ITBase {
     public void historicAndStreamConflict() throws Exception {
         // A query that finds people who talk to Eve and work at Chipotle.
         final String sparql =
-              "SELECT ?x " +
-                "WHERE { " +
+              "SELECT ?x WHERE { " +
                 "?x <http://talksTo> <http://Eve>. " +
                 "?x <http://worksAt> <http://Chipotle>." +
               "}";
 
         // Triples that are loaded into Rya before the PCJ is created.
+        final ValueFactory vf = new ValueFactoryImpl();
         final Set<Statement> historicTriples = Sets.newHashSet(
-                makeStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeStatement("http://Alice", "http://worksAt", "http://Chipotle"));
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
 
         // Triples that will be streamed into Fluo after the PCJ has been created.
         final Set<RyaStatement> streamedTriples = Sets.newHashSet(
-                makeRyaStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeRyaStatement("http://Alice", "http://worksAt", "http://Chipotle"));
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"), new RyaURI("http://Eve")),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")));
 
         // The expected final result.
         final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(
-                new BindingImpl("x", new URIImpl("http://Alice"))));
+
+        final MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("x", vf.createURI("http://Alice"));
+        expected.add(bs);
 
         // Load the historic data into Rya.
+        final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
         for(final Statement triple : historicTriples) {
             ryaConn.add(triple);
         }
+        ryaConn.close();
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Ensure Alice is a match.
-        fluo.waitForObservers();
-        Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
-
-        // Stream the same Alice triple into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
-
-        // Verify the end results of the query is stiill only Alice.
-        fluo.waitForObservers();
-        results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+
+            // Ensure Alice is a match.
+            super.getMiniFluo().waitForObservers();
+
+            Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add( resultsIt.next() );
+                }
+            }
+            assertEquals(expected, results);
+
+            // Stream the same Alice triple into Fluo.
+            new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+
+            // Verify the end results of the query is stiill only Alice.
+            super.getMiniFluo().waitForObservers();
+
+            results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add( resultsIt.next() );
+                }
+            }
+            assertEquals(expected, results);
+        }
     }
 }
\ No newline at end of file


[9/9] incubator-rya git commit: RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.

Posted by ca...@apache.org.
RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.


Project: http://git-wip-us.apache.org/repos/asf/incubator-rya/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-rya/commit/c941aea8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-rya/tree/c941aea8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-rya/diff/c941aea8

Branch: refs/heads/master
Commit: c941aea8b65acb99b451757c48279914d9488c85
Parents: be9ea9a
Author: Kevin Chilton <ke...@parsons.com>
Authored: Fri Apr 7 15:57:57 2017 -0400
Committer: Caleb Meier <ca...@parsons.com>
Committed: Mon Apr 24 07:58:25 2017 -0700

----------------------------------------------------------------------
 .../AccumuloRyaInstanceDetailsRepository.java   |  20 +-
 .../accumulo/utils/VisibilitySimplifier.java    |  30 +-
 .../utils/VisibilitySimplifierTest.java         |  24 +
 .../rya/accumulo/utils/RyaTableNames.java       |  11 +-
 .../client/accumulo/AccumuloBatchUpdatePCJ.java |  87 ++-
 .../api/client/accumulo/AccumuloCreatePCJ.java  |  86 ++-
 .../api/client/accumulo/AccumuloDeletePCJ.java  |  38 +-
 .../pcj/matching/AccumuloIndexSetProvider.java  |  62 +-
 .../accumulo/AccumuloBatchUpdatePCJIT.java      |  10 +-
 .../client/accumulo/AccumuloCreatePCJIT.java    |  72 +-
 .../client/accumulo/AccumuloDeletePCJIT.java    |  46 +-
 .../benchmark/query/QueryBenchmarkRunIT.java    |  29 +-
 .../pcj/storage/PrecomputedJoinStorage.java     |  22 +-
 .../storage/accumulo/AccumuloPcjSerializer.java |  20 -
 .../storage/accumulo/AccumuloPcjStorage.java    |   8 +-
 .../storage/accumulo/BindingSetConverter.java   |   9 +-
 .../accumulo/BindingSetStringConverter.java     |  53 +-
 .../pcj/storage/accumulo/PcjTables.java         |  39 +-
 .../accumulo/ScannerBindingSetIterator.java     |  18 +-
 .../pcj/storage/accumulo/VariableOrder.java     |  15 +-
 .../pcj/update/PrecomputedJoinUpdater.java      |  10 +-
 .../accumulo/AccumuloPcjSerializerTest.java     | 185 +++++
 .../accumulo/AccumuloPcjSerialzerTest.java      | 175 -----
 .../accumulo/BindingSetStringConverterTest.java |  42 +-
 .../accumulo/PcjTablesIntegrationTest.java      |  26 +-
 .../accumulo/accumulo/AccumuloPcjStorageIT.java | 284 ++++----
 .../rya/indexing/pcj/fluo/api/CreatePcj.java    | 277 ++++----
 .../rya/indexing/pcj/fluo/api/DeletePcj.java    |  28 +-
 extras/rya.pcj.fluo/pcj.fluo.app/pom.xml        |  10 +-
 .../pcj/fluo/app/AggregationResultUpdater.java  | 572 +++++++++++++++
 .../indexing/pcj/fluo/app/BindingSetRow.java    |  11 +-
 .../pcj/fluo/app/FilterResultUpdater.java       |  57 +-
 .../rya/indexing/pcj/fluo/app/IncUpdateDAO.java |  95 +--
 .../fluo/app/IncrementalUpdateConstants.java    |   1 +
 .../pcj/fluo/app/JoinResultUpdater.java         | 131 ++--
 .../rya/indexing/pcj/fluo/app/NodeType.java     |  11 +-
 .../pcj/fluo/app/QueryResultUpdater.java        |  69 +-
 .../pcj/fluo/app/VisibilityBindingSetSerDe.java |  77 +++
 .../app/export/IncrementalResultExporter.java   |  12 +-
 .../app/export/kafka/KafkaResultExporter.java   |  32 +-
 .../fluo/app/export/rya/RyaResultExporter.java  |  16 +-
 .../fluo/app/observers/AggregationObserver.java |  74 ++
 .../fluo/app/observers/BindingSetUpdater.java   |  53 +-
 .../pcj/fluo/app/observers/FilterObserver.java  |  25 +-
 .../pcj/fluo/app/observers/JoinObserver.java    |  24 +-
 .../fluo/app/observers/QueryResultObserver.java |  54 +-
 .../app/observers/StatementPatternObserver.java |  25 +-
 .../pcj/fluo/app/observers/TripleObserver.java  | 158 +++--
 .../pcj/fluo/app/query/AggregationMetadata.java | 371 ++++++++++
 .../indexing/pcj/fluo/app/query/FluoQuery.java  | 111 ++-
 .../pcj/fluo/app/query/FluoQueryColumns.java    |  60 +-
 .../fluo/app/query/FluoQueryMetadataDAO.java    | 186 ++++-
 .../fluo/app/query/SparqlFluoQueryBuilder.java  | 116 +++-
 .../pcj/fluo/app/util/BindingSetUtil.java       |  54 ++
 .../indexing/pcj/fluo/app/util/RowKeyUtil.java  |  69 ++
 .../fluo/app/VisibilityBindingSetSerDeTest.java |  51 ++
 .../fluo/client/command/NewQueryCommand.java    |   2 +-
 .../pcj/fluo/demo/FluoAndHistoricPcjsDemo.java  |  38 +-
 .../rya.pcj.fluo/pcj.fluo.integration/pom.xml   |  13 +-
 .../apache/rya/indexing/pcj/fluo/ITBase.java    | 443 ------------
 .../indexing/pcj/fluo/KafkaExportITBase.java    | 315 +++++++++
 .../rya/indexing/pcj/fluo/RyaExportITBase.java  | 182 +++++
 .../pcj/fluo/api/CountStatementsIT.java         |  54 +-
 .../indexing/pcj/fluo/api/GetPcjMetadataIT.java |  91 +--
 .../indexing/pcj/fluo/api/GetQueryReportIT.java | 107 +--
 .../indexing/pcj/fluo/api/ListQueryIdsIT.java   |  35 +-
 .../fluo/app/query/FluoQueryMetadataDAOIT.java  | 204 ++++--
 .../pcj/fluo/integration/CreateDeleteIT.java    | 166 +++--
 .../indexing/pcj/fluo/integration/InputIT.java  | 275 +++++---
 .../pcj/fluo/integration/KafkaExportIT.java     | 693 ++++++++++++-------
 .../indexing/pcj/fluo/integration/QueryIT.java  | 580 ++++++++--------
 .../pcj/fluo/integration/RyaExportIT.java       | 101 +--
 .../RyaInputIncrementalUpdateIT.java            | 245 ++++---
 .../pcj/fluo/integration/StreamingTestIT.java   | 140 ++--
 .../HistoricStreamingVisibilityIT.java          |  80 ++-
 .../pcj/fluo/visibility/PcjVisibilityIT.java    | 199 +++---
 .../rya.pcj.fluo/rya.pcj.functions.geo/pom.xml  |  44 +-
 .../rya/indexing/pcj/fluo/RyaExportITBase.java  | 182 +++++
 .../pcj/functions/geo/GeoFunctionsIT.java       | 471 ++++++-------
 pom.xml                                         |  16 +
 80 files changed, 5719 insertions(+), 3208 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/instance/AccumuloRyaInstanceDetailsRepository.java
----------------------------------------------------------------------
diff --git a/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/instance/AccumuloRyaInstanceDetailsRepository.java b/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/instance/AccumuloRyaInstanceDetailsRepository.java
index be8e12c..dcd64de 100644
--- a/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/instance/AccumuloRyaInstanceDetailsRepository.java
+++ b/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/instance/AccumuloRyaInstanceDetailsRepository.java
@@ -22,9 +22,6 @@ import static java.util.Objects.requireNonNull;
 
 import java.util.Map.Entry;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -48,6 +45,9 @@ import org.apache.hadoop.io.Text;
 import org.apache.rya.api.instance.RyaDetails;
 import org.apache.rya.api.instance.RyaDetailsRepository;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
 /**
  * An implementation of {@link RyaDetailsRepository} that stores a Rya
  * instance's {@link RyaDetails} in an Accumulo table.
@@ -89,12 +89,17 @@ public class AccumuloRyaInstanceDetailsRepository implements RyaDetailsRepositor
 
     @Override
     public boolean isInitialized() throws RyaDetailsRepositoryException {
+        Scanner scanner = null;
         try {
-            final Scanner scanner = connector.createScanner(detailsTableName, new Authorizations());
+            scanner = connector.createScanner(detailsTableName, new Authorizations());
             scanner.fetchColumn(COL_FAMILY, COL_QUALIFIER);
             return scanner.iterator().hasNext();
         } catch (final TableNotFoundException e) {
             return false;
+        } finally {
+            if(scanner != null) {
+                scanner.close();
+            }
         }
     }
 
@@ -157,9 +162,10 @@ public class AccumuloRyaInstanceDetailsRepository implements RyaDetailsRepositor
         }
 
         // Read it from the table.
+        Scanner scanner = null;
         try {
             // Fetch the value from the table.
-            final Scanner scanner = connector.createScanner(detailsTableName, new Authorizations());
+            scanner = connector.createScanner(detailsTableName, new Authorizations());
             scanner.fetchColumn(COL_FAMILY, COL_QUALIFIER);
             final Entry<Key, Value> entry = scanner.iterator().next();
 
@@ -169,6 +175,10 @@ public class AccumuloRyaInstanceDetailsRepository implements RyaDetailsRepositor
 
         } catch (final TableNotFoundException e) {
             throw new RyaDetailsRepositoryException("Could not get the details from the table.", e);
+        } finally {
+            if(scanner != null) {
+                scanner.close();
+            }
         }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/utils/VisibilitySimplifier.java
----------------------------------------------------------------------
diff --git a/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/utils/VisibilitySimplifier.java b/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/utils/VisibilitySimplifier.java
index 98c6abd..8fa3b0e 100644
--- a/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/utils/VisibilitySimplifier.java
+++ b/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/utils/VisibilitySimplifier.java
@@ -20,13 +20,13 @@ package org.apache.rya.accumulo.utils;
 
 import static java.util.Objects.requireNonNull;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
 import org.apache.accumulo.core.security.ColumnVisibility;
 
 import com.google.common.base.Charsets;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
 /**
  * Simplifies Accumulo visibility expressions.
  */
@@ -34,12 +34,34 @@ import com.google.common.base.Charsets;
 public class VisibilitySimplifier {
 
     /**
+     * Unions two visibility equations and then simplifies the result.
+     *
+     * @param vis1 - The first visibility equation that will be unioned. (not null)
+     * @param vis2 - The other visibility equation that will be unioned. (not null)
+     * @return A simplified form of the unioned visibility equations.
+     */
+    public static String unionAndSimplify(final String vis1, final String vis2) {
+        requireNonNull(vis1);
+        requireNonNull(vis2);
+
+        if(vis1.isEmpty()) {
+            return vis2;
+        }
+
+        if(vis2.isEmpty()) {
+            return vis1;
+        }
+
+        return simplify("(" + vis1 + ")&(" + vis2 + ")");
+    }
+
+    /**
      * Simplifies an Accumulo visibility expression.
      *
      * @param visibility - The expression to simplify. (not null)
      * @return A simplified form of {@code visibility}.
      */
-    public String simplify(final String visibility) {
+    public static String simplify(final String visibility) {
         requireNonNull(visibility);
 
         String last = visibility;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/dao/accumulo.rya/src/test/java/org/apache/rya/accumulo/utils/VisibilitySimplifierTest.java
----------------------------------------------------------------------
diff --git a/dao/accumulo.rya/src/test/java/org/apache/rya/accumulo/utils/VisibilitySimplifierTest.java b/dao/accumulo.rya/src/test/java/org/apache/rya/accumulo/utils/VisibilitySimplifierTest.java
index a9a03ce..0adb325 100644
--- a/dao/accumulo.rya/src/test/java/org/apache/rya/accumulo/utils/VisibilitySimplifierTest.java
+++ b/dao/accumulo.rya/src/test/java/org/apache/rya/accumulo/utils/VisibilitySimplifierTest.java
@@ -50,4 +50,28 @@ public class VisibilitySimplifierTest {
         final String simplified = new VisibilitySimplifier().simplify("(a|b)|(a|b)|a|b");
         assertEquals("a|b", simplified);
     }
+
+    @Test
+    public void unionAndSimplify() {
+        final String simplified = new VisibilitySimplifier().unionAndSimplify("u&b", "u");
+        assertEquals("b&u", simplified);
+    }
+
+    @Test
+    public void unionAndSimplify_firstIsEmpty() {
+        final String simplified = new VisibilitySimplifier().unionAndSimplify("", "u");
+        assertEquals("u", simplified);
+    }
+
+    @Test
+    public void unionAndSimplify_secondIsEmpty() {
+        final String simplified = new VisibilitySimplifier().unionAndSimplify("u", "");
+        assertEquals("u", simplified);
+    }
+
+    @Test
+    public void unionAndSimplify_bothAreEmpty() {
+        final String simplified = new VisibilitySimplifier().unionAndSimplify("", "");
+        assertEquals("", simplified);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/indexing/src/main/java/org/apache/rya/accumulo/utils/RyaTableNames.java
----------------------------------------------------------------------
diff --git a/extras/indexing/src/main/java/org/apache/rya/accumulo/utils/RyaTableNames.java b/extras/indexing/src/main/java/org/apache/rya/accumulo/utils/RyaTableNames.java
index faeebbb..cd17cbc 100644
--- a/extras/indexing/src/main/java/org/apache/rya/accumulo/utils/RyaTableNames.java
+++ b/extras/indexing/src/main/java/org/apache/rya/accumulo/utils/RyaTableNames.java
@@ -34,6 +34,7 @@ import org.apache.rya.api.layout.TablePrefixLayoutStrategy;
 import org.apache.rya.indexing.accumulo.entity.EntityCentricIndex;
 import org.apache.rya.indexing.accumulo.freetext.AccumuloFreeTextIndexer;
 import org.apache.rya.indexing.accumulo.temporal.AccumuloTemporalIndexer;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.PcjTableNameFactory;
@@ -93,11 +94,13 @@ public class RyaTableNames {
  */
 
         if(details.getPCJIndexDetails().isEnabled()) {
-            final List<String> pcjIds = new AccumuloPcjStorage(conn, ryaInstanceName).listPcjs();
+            try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(conn, ryaInstanceName)) {
+                final List<String> pcjIds = pcjStorage.listPcjs();
 
-            final PcjTableNameFactory tableNameFactory = new PcjTableNameFactory();
-            for(final String pcjId : pcjIds) {
-                tables.add( tableNameFactory.makeTableName(ryaInstanceName, pcjId) );
+                final PcjTableNameFactory tableNameFactory = new PcjTableNameFactory();
+                for(final String pcjId : pcjIds) {
+                    tables.add( tableNameFactory.makeTableName(ryaInstanceName, pcjId) );
+                }
             }
         }
 

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJ.java
----------------------------------------------------------------------
diff --git a/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJ.java b/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJ.java
index 790fe80..76aad02 100644
--- a/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJ.java
+++ b/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJ.java
@@ -28,23 +28,6 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.log4j.Logger;
-import org.apache.rya.indexing.pcj.storage.PcjMetadata;
-import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
-import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
-import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.openrdf.query.BindingSet;
-import org.openrdf.query.MalformedQueryException;
-import org.openrdf.query.QueryEvaluationException;
-import org.openrdf.query.parser.ParsedQuery;
-import org.openrdf.query.parser.sparql.SPARQLParser;
-import org.openrdf.sail.Sail;
-import org.openrdf.sail.SailConnection;
-import org.openrdf.sail.SailException;
-
-import com.google.common.base.Optional;
-
-import info.aduna.iteration.CloseableIteration;
 import org.apache.rya.accumulo.AccumuloRdfConfiguration;
 import org.apache.rya.accumulo.instance.AccumuloRyaInstanceDetailsRepository;
 import org.apache.rya.api.client.BatchUpdatePCJ;
@@ -62,8 +45,25 @@ import org.apache.rya.api.instance.RyaDetailsUpdater.RyaDetailsMutator;
 import org.apache.rya.api.instance.RyaDetailsUpdater.RyaDetailsMutator.CouldNotApplyMutationException;
 import org.apache.rya.api.persist.RyaDAOException;
 import org.apache.rya.indexing.accumulo.ConfigUtils;
+import org.apache.rya.indexing.pcj.storage.PcjMetadata;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
 import org.apache.rya.rdftriplestore.inference.InferenceEngineException;
 import org.apache.rya.sail.config.RyaSailFactory;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.MalformedQueryException;
+import org.openrdf.query.QueryEvaluationException;
+import org.openrdf.query.parser.ParsedQuery;
+import org.openrdf.query.parser.sparql.SPARQLParser;
+import org.openrdf.sail.Sail;
+import org.openrdf.sail.SailConnection;
+import org.openrdf.sail.SailException;
+
+import com.google.common.base.Optional;
+
+import info.aduna.iteration.CloseableIteration;
 
 /**
  * Uses an in memory Rya Client to batch update a PCJ index.
@@ -126,12 +126,11 @@ public class AccumuloBatchUpdatePCJ extends AccumuloCommand implements BatchUpda
         SailConnection sailConn = null;
         CloseableIteration<? extends BindingSet, QueryEvaluationException> results = null;
 
-        try {
+        try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(super.getConnector(), ryaInstanceName)) {
             // Create an instance of Sail backed by the Rya instance.
             sail = connectToRya(ryaInstanceName);
 
             // Purge the old results from the PCJ.
-            final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(super.getConnector(), ryaInstanceName);
             try {
                 pcjStorage.purge(pcjId);
             } catch (final PCJStorageException e) {
@@ -139,37 +138,35 @@ public class AccumuloBatchUpdatePCJ extends AccumuloCommand implements BatchUpda
                         "results could not be purged from it.", e);
             }
 
-            try {
-                // Parse the PCJ's SPARQL query.
-                final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
-                final String sparql = metadata.getSparql();
-                final SPARQLParser parser = new SPARQLParser();
-                final ParsedQuery parsedQuery = parser.parseQuery(sparql, null);
-
-                // Execute the query.
-                sailConn = sail.getConnection();
-                results = sailConn.evaluate(parsedQuery.getTupleExpr(), null, null, false);
-
-                // Load the results into the PCJ table.
-                final List<VisibilityBindingSet> batch = new ArrayList<>(1000);
-
-                while(results.hasNext()) {
-                    final VisibilityBindingSet result = new VisibilityBindingSet(results.next(), "");
-                    batch.add(result);
-
-                    if(batch.size() == 1000) {
-                        pcjStorage.addResults(pcjId, batch);
-                        batch.clear();
-                    }
-                }
+            // Parse the PCJ's SPARQL query.
+            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
+            final String sparql = metadata.getSparql();
+            final SPARQLParser parser = new SPARQLParser();
+            final ParsedQuery parsedQuery = parser.parseQuery(sparql, null);
 
-                if(!batch.isEmpty()) {
+            // Execute the query.
+            sailConn = sail.getConnection();
+            results = sailConn.evaluate(parsedQuery.getTupleExpr(), null, null, false);
+
+            // Load the results into the PCJ table.
+            final List<VisibilityBindingSet> batch = new ArrayList<>(1000);
+
+            while(results.hasNext()) {
+                final VisibilityBindingSet result = new VisibilityBindingSet(results.next(), "");
+                batch.add(result);
+
+                if(batch.size() == 1000) {
                     pcjStorage.addResults(pcjId, batch);
                     batch.clear();
                 }
-            } catch(final MalformedQueryException | PCJStorageException | SailException | QueryEvaluationException e) {
-                throw new RyaClientException("Fail to batch load new results into the PCJ with ID '" + pcjId + "'.", e);
             }
+
+            if(!batch.isEmpty()) {
+                pcjStorage.addResults(pcjId, batch);
+                batch.clear();
+            }
+        } catch(final MalformedQueryException | PCJStorageException | SailException | QueryEvaluationException e) {
+            throw new RyaClientException("Fail to batch load new results into the PCJ with ID '" + pcjId + "'.", e);
         } finally {
             if(results != null) {
                 try {

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJ.java
----------------------------------------------------------------------
diff --git a/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJ.java b/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJ.java
index ac8da66..3fe1042 100644
--- a/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJ.java
+++ b/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJ.java
@@ -90,47 +90,46 @@ public class AccumuloCreatePCJ extends AccumuloCommand implements CreatePCJ {
 
         // Create the PCJ table that will receive the index results.
         final String pcjId;
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(getConnector(), instanceName);
-        try {
+        try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(getConnector(), instanceName)) {
             pcjId = pcjStorage.createPcj(sparql);
-        } catch (final PCJStorageException e) {
-            throw new RyaClientException("Problem while initializing the PCJ table.", e);
-        }
 
-        // If a Fluo application is being used, task it with updating the PCJ.
-        final Optional<FluoDetails> fluoDetailsHolder = pcjIndexDetails.getFluoDetails();
-        if(fluoDetailsHolder.isPresent()) {
-            final String fluoAppName = fluoDetailsHolder.get().getUpdateAppName();
-            try {
-                updateFluoApp(instanceName, fluoAppName, pcjStorage, pcjId);
-            } catch (RepositoryException | MalformedQueryException | SailException | QueryEvaluationException | PcjException | RyaDAOException e) {
-                throw new RyaClientException("Problem while initializing the Fluo application with the new PCJ.", e);
+            // If a Fluo application is being used, task it with updating the PCJ.
+            final Optional<FluoDetails> fluoDetailsHolder = pcjIndexDetails.getFluoDetails();
+            if(fluoDetailsHolder.isPresent()) {
+                final String fluoAppName = fluoDetailsHolder.get().getUpdateAppName();
+                try {
+                    updateFluoApp(instanceName, fluoAppName, pcjStorage, pcjId);
+                } catch (RepositoryException | MalformedQueryException | SailException | QueryEvaluationException | PcjException | RyaDAOException e) {
+                    throw new RyaClientException("Problem while initializing the Fluo application with the new PCJ.", e);
+                }
+
+                // Update the Rya Details to indicate the PCJ is being updated incrementally.
+                final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(getConnector(), instanceName);
+                try {
+                    new RyaDetailsUpdater(detailsRepo).update(new RyaDetailsMutator() {
+                        @Override
+                        public RyaDetails mutate(final RyaDetails originalDetails) throws CouldNotApplyMutationException {
+                            // Update the original PCJ Details to indicate they are incrementally updated.
+                            final PCJDetails originalPCJDetails = originalDetails.getPCJIndexDetails().getPCJDetails().get(pcjId);
+                            final PCJDetails.Builder mutatedPCJDetails = PCJDetails.builder( originalPCJDetails )
+                                    .setUpdateStrategy( PCJUpdateStrategy.INCREMENTAL );
+
+                            // Replace the old PCJ Details with the updated ones.
+                            final RyaDetails.Builder builder = RyaDetails.builder(originalDetails);
+                            builder.getPCJIndexDetails().addPCJDetails( mutatedPCJDetails );
+                            return builder.build();
+                        }
+                    });
+                } catch (RyaDetailsRepositoryException | CouldNotApplyMutationException e) {
+                    throw new RyaClientException("Problem while updating the Rya instance's Details to indicate the PCJ is being incrementally updated.", e);
+                }
             }
 
-            // Update the Rya Details to indicate the PCJ is being updated incrementally.
-            final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(getConnector(), instanceName);
-            try {
-                new RyaDetailsUpdater(detailsRepo).update(new RyaDetailsMutator() {
-                    @Override
-                    public RyaDetails mutate(final RyaDetails originalDetails) throws CouldNotApplyMutationException {
-                        // Update the original PCJ Details to indicate they are incrementally updated.
-                        final PCJDetails originalPCJDetails = originalDetails.getPCJIndexDetails().getPCJDetails().get(pcjId);
-                        final PCJDetails.Builder mutatedPCJDetails = PCJDetails.builder( originalPCJDetails )
-                            .setUpdateStrategy( PCJUpdateStrategy.INCREMENTAL );
-
-                        // Replace the old PCJ Details with the updated ones.
-                        final RyaDetails.Builder builder = RyaDetails.builder(originalDetails);
-                        builder.getPCJIndexDetails().addPCJDetails( mutatedPCJDetails );
-                        return builder.build();
-                    }
-                });
-            } catch (RyaDetailsRepositoryException | CouldNotApplyMutationException e) {
-                throw new RyaClientException("Problem while updating the Rya instance's Details to indicate the PCJ is being incrementally updated.", e);
-            }
+            // Return the ID that was assigned to the PCJ.
+            return pcjId;
+        } catch (final PCJStorageException e) {
+            throw new RyaClientException("Problem while initializing the PCJ table.", e);
         }
-
-        // Return the ID that was assigned to the PCJ.
-        return pcjId;
     }
 
     private void updateFluoApp(final String ryaInstance, final String fluoAppName, final PrecomputedJoinStorage pcjStorage, final String pcjId) throws RepositoryException, MalformedQueryException, SailException, QueryEvaluationException, PcjException, RyaDAOException {
@@ -139,16 +138,15 @@ public class AccumuloCreatePCJ extends AccumuloCommand implements CreatePCJ {
 
         // Connect to the Fluo application that is updating this instance's PCJs.
         final AccumuloConnectionDetails cd = super.getAccumuloConnectionDetails();
-        final FluoClient fluoClient = new FluoClientFactory().connect(
+        try(final FluoClient fluoClient = new FluoClientFactory().connect(
                 cd.getUsername(),
                 new String(cd.getPassword()),
                 cd.getInstanceName(),
                 cd.getZookeepers(),
-                fluoAppName);
-
-        // Initialize the PCJ within the Fluo application.
-        final org.apache.rya.indexing.pcj.fluo.api.CreatePcj fluoCreatePcj = new org.apache.rya.indexing.pcj.fluo.api.CreatePcj();
-        fluoCreatePcj.withRyaIntegration(pcjId, pcjStorage, fluoClient, getConnector(), ryaInstance);
+                fluoAppName);) {
+            // Initialize the PCJ within the Fluo application.
+            final org.apache.rya.indexing.pcj.fluo.api.CreatePcj fluoCreatePcj = new org.apache.rya.indexing.pcj.fluo.api.CreatePcj();
+            fluoCreatePcj.withRyaIntegration(pcjId, pcjStorage, fluoClient, getConnector(), ryaInstance);
+        }
     }
-
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJ.java
----------------------------------------------------------------------
diff --git a/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJ.java b/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJ.java
index b6728ec..96e6d58 100644
--- a/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJ.java
+++ b/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJ.java
@@ -20,19 +20,7 @@ package org.apache.rya.api.client.accumulo;
 
 import static java.util.Objects.requireNonNull;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
 import org.apache.accumulo.core.client.Connector;
-import org.apache.rya.indexing.pcj.fluo.api.DeletePcj;
-import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
-import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
-import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-
 import org.apache.fluo.api.client.FluoClient;
 import org.apache.rya.api.client.DeletePCJ;
 import org.apache.rya.api.client.GetInstanceDetails;
@@ -43,6 +31,17 @@ import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails;
 import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.FluoDetails;
 import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails;
 import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails.PCJUpdateStrategy;
+import org.apache.rya.indexing.pcj.fluo.api.DeletePcj;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 
 /**
  * An Accumulo implementation of the {@link DeletePCJ} command.
@@ -104,8 +103,7 @@ public class AccumuloDeletePCJ extends AccumuloCommand implements DeletePCJ {
         }
 
         // Drop the table that holds the PCJ results from Accumulo.
-        final PrecomputedJoinStorage pcjs = new AccumuloPcjStorage(getConnector(), instanceName);
-        try {
+        try(final PrecomputedJoinStorage pcjs = new AccumuloPcjStorage(getConnector(), instanceName)) {
             pcjs.dropPcj(pcjId);
         } catch (final PCJStorageException e) {
             throw new RyaClientException("Could not drop the PCJ's table from Accumulo.", e);
@@ -118,14 +116,14 @@ public class AccumuloDeletePCJ extends AccumuloCommand implements DeletePCJ {
 
         // Connect to the Fluo application that is updating this instance's PCJs.
         final AccumuloConnectionDetails cd = super.getAccumuloConnectionDetails();
-        final FluoClient fluoClient = new FluoClientFactory().connect(
+        try(final FluoClient fluoClient = new FluoClientFactory().connect(
                 cd.getUsername(),
                 new String(cd.getPassword()),
                 cd.getInstanceName(),
                 cd.getZookeepers(),
-                fluoAppName);
-
-        // Delete the PCJ from the Fluo App.
-        new DeletePcj(1000).deletePcj(fluoClient, pcjId);
+                fluoAppName)) {
+            // Delete the PCJ from the Fluo App.
+            new DeletePcj(1000).deletePcj(fluoClient, pcjId);
+        }
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/indexing/src/main/java/org/apache/rya/indexing/pcj/matching/AccumuloIndexSetProvider.java
----------------------------------------------------------------------
diff --git a/extras/indexing/src/main/java/org/apache/rya/indexing/pcj/matching/AccumuloIndexSetProvider.java b/extras/indexing/src/main/java/org/apache/rya/indexing/pcj/matching/AccumuloIndexSetProvider.java
index 828ee4b..1940e64 100644
--- a/extras/indexing/src/main/java/org/apache/rya/indexing/pcj/matching/AccumuloIndexSetProvider.java
+++ b/extras/indexing/src/main/java/org/apache/rya/indexing/pcj/matching/AccumuloIndexSetProvider.java
@@ -17,24 +17,6 @@
  * under the License.
  */
 package org.apache.rya.indexing.pcj.matching;
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
 
 import static java.util.Objects.requireNonNull;
 
@@ -184,29 +166,31 @@ public class AccumuloIndexSetProvider implements ExternalSetProvider<ExternalTup
         }
         // this maps associates pcj table name with pcj sparql query
         final Map<String, String> indexTables = Maps.newLinkedHashMap();
-        final PrecomputedJoinStorage storage = new AccumuloPcjStorage(conn, tablePrefix);
-        final PcjTableNameFactory pcjFactory = new PcjTableNameFactory();
 
-        final boolean tablesProvided = tables != null && !tables.isEmpty();
+        try(final PrecomputedJoinStorage storage = new AccumuloPcjStorage(conn, tablePrefix)) {
+            final PcjTableNameFactory pcjFactory = new PcjTableNameFactory();
 
-        if (tablesProvided) {
-            // if tables provided, associate table name with sparql
-            for (final String table : tables) {
-                indexTables.put(table, storage.getPcjMetadata(pcjFactory.getPcjId(table)).getSparql());
-            }
-        } else if (hasRyaDetails(tablePrefix, conn)) {
-            // If this is a newer install of Rya, and it has PCJ Details, then
-            // use those.
-            final List<String> ids = storage.listPcjs();
-            for (final String id : ids) {
-                indexTables.put(pcjFactory.makeTableName(tablePrefix, id), storage.getPcjMetadata(id).getSparql());
-            }
-        } else {
-            // Otherwise figure it out by scanning tables.
-            final PcjTables pcjTables = new PcjTables();
-            for (final String table : conn.tableOperations().list()) {
-                if (table.startsWith(tablePrefix + "INDEX")) {
-                    indexTables.put(table, pcjTables.getPcjMetadata(conn, table).getSparql());
+            final boolean tablesProvided = tables != null && !tables.isEmpty();
+
+            if (tablesProvided) {
+                // if tables provided, associate table name with sparql
+                for (final String table : tables) {
+                    indexTables.put(table, storage.getPcjMetadata(pcjFactory.getPcjId(table)).getSparql());
+                }
+            } else if (hasRyaDetails(tablePrefix, conn)) {
+                // If this is a newer install of Rya, and it has PCJ Details, then
+                // use those.
+                final List<String> ids = storage.listPcjs();
+                for (final String id : ids) {
+                    indexTables.put(pcjFactory.makeTableName(tablePrefix, id), storage.getPcjMetadata(id).getSparql());
+                }
+            } else {
+                // Otherwise figure it out by scanning tables.
+                final PcjTables pcjTables = new PcjTables();
+                for (final String table : conn.tableOperations().list()) {
+                    if (table.startsWith(tablePrefix + "INDEX")) {
+                        indexTables.put(table, pcjTables.getPcjMetadata(conn, table).getSparql());
+                    }
                 }
             }
         }

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJIT.java
----------------------------------------------------------------------
diff --git a/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJIT.java b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJIT.java
index 30eb4ca..5a2e69d 100644
--- a/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJIT.java
+++ b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloBatchUpdatePCJIT.java
@@ -31,6 +31,7 @@ import org.apache.rya.indexing.accumulo.ConfigUtils;
 import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType;
 import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.apache.rya.sail.config.RyaSailFactory;
 import org.junit.Test;
@@ -63,7 +64,7 @@ public class AccumuloBatchUpdatePCJIT extends AccumuloITBase {
                 .build());
 
         Sail sail = null;
-        try {
+        try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(super.getConnector(), RYA_INSTANCE_NAME)) {
             // Get a Sail connection backed by the installed Rya instance.
             final AccumuloRdfConfiguration ryaConf = new AccumuloRdfConfiguration();
             ryaConf.setTablePrefix(RYA_INSTANCE_NAME);
@@ -102,7 +103,6 @@ public class AccumuloBatchUpdatePCJIT extends AccumuloITBase {
             sailConn.close();
 
             // Create a PCJ for a SPARQL query.
-            final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(super.getConnector(), RYA_INSTANCE_NAME);
             final String sparql = "SELECT ?name WHERE { ?name <urn:likes> <urn:icecream> . ?name <urn:hasEyeColor> <urn:blue> . }";
             final String pcjId = pcjStorage.createPcj(sparql);
 
@@ -137,8 +137,10 @@ public class AccumuloBatchUpdatePCJIT extends AccumuloITBase {
             expectedResults.add(bs);
 
             final Set<BindingSet> results = new HashSet<>();
-            for(final BindingSet result : pcjStorage.listResults(pcjId)) {
-                results.add( result );
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add( resultsIt.next() );
+                }
             }
 
             assertEquals(expectedResults, results);

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java
----------------------------------------------------------------------
diff --git a/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java
index 6c9bf5e..f900837 100644
--- a/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java
+++ b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java
@@ -24,6 +24,15 @@ import static org.junit.Assert.assertFalse;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.rya.api.client.CreatePCJ;
+import org.apache.rya.api.client.Install;
+import org.apache.rya.api.client.Install.DuplicateInstanceNameException;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.api.client.InstanceDoesNotExistException;
+import org.apache.rya.api.client.RyaClientException;
+import org.apache.rya.api.instance.RyaDetails;
+import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails;
+import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails.PCJUpdateStrategy;
 import org.apache.rya.indexing.pcj.fluo.api.ListQueryIds;
 import org.apache.rya.indexing.pcj.storage.PcjMetadata;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
@@ -36,16 +45,6 @@ import org.openrdf.query.impl.MapBindingSet;
 import com.google.common.base.Optional;
 import com.google.common.collect.Sets;
 
-import org.apache.rya.api.client.CreatePCJ;
-import org.apache.rya.api.client.Install;
-import org.apache.rya.api.client.Install.DuplicateInstanceNameException;
-import org.apache.rya.api.client.Install.InstallConfiguration;
-import org.apache.rya.api.client.InstanceDoesNotExistException;
-import org.apache.rya.api.client.RyaClientException;
-import org.apache.rya.api.instance.RyaDetails;
-import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails;
-import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails.PCJUpdateStrategy;
-
 /**
  * Integration tests the methods of {@link AccumuloCreatePCJ}.
  */
@@ -80,42 +79,43 @@ public class AccumuloCreatePCJIT extends FluoITBase {
         assertEquals(PCJUpdateStrategy.INCREMENTAL, pcjDetails.getUpdateStrategy().get());
 
         // Verify the PCJ's metadata was initialized.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final PcjMetadata pcjMetadata = pcjStorage.getPcjMetadata(pcjId);
-        assertEquals(sparql, pcjMetadata.getSparql());
-        assertEquals(0L, pcjMetadata.getCardinality());
+        try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME)) {
+            final PcjMetadata pcjMetadata = pcjStorage.getPcjMetadata(pcjId);
+            assertEquals(sparql, pcjMetadata.getSparql());
+            assertEquals(0L, pcjMetadata.getCardinality());
 
-        // Verify a Query ID was added for the query within the Fluo app.
-        final List<String> fluoQueryIds = new ListQueryIds().listQueryIds(fluoClient);
-        assertEquals(1, fluoQueryIds.size());
+            // Verify a Query ID was added for the query within the Fluo app.
+            final List<String> fluoQueryIds = new ListQueryIds().listQueryIds(fluoClient);
+            assertEquals(1, fluoQueryIds.size());
 
-        // Insert some statements into Rya.
-        final ValueFactory vf = ryaRepo.getValueFactory();
-        ryaConn.add(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve"));
-        ryaConn.add(vf.createURI("http://Bob"), vf.createURI("http://talksTo"), vf.createURI("http://Eve"));
-        ryaConn.add(vf.createURI("http://Charlie"), vf.createURI("http://talksTo"), vf.createURI("http://Eve"));
+            // Insert some statements into Rya.
+            final ValueFactory vf = ryaRepo.getValueFactory();
+            ryaConn.add(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve"));
+            ryaConn.add(vf.createURI("http://Bob"), vf.createURI("http://talksTo"), vf.createURI("http://Eve"));
+            ryaConn.add(vf.createURI("http://Charlie"), vf.createURI("http://talksTo"), vf.createURI("http://Eve"));
 
-        ryaConn.add(vf.createURI("http://Eve"), vf.createURI("http://helps"), vf.createURI("http://Kevin"));
+            ryaConn.add(vf.createURI("http://Eve"), vf.createURI("http://helps"), vf.createURI("http://Kevin"));
 
-        ryaConn.add(vf.createURI("http://Bob"), vf.createURI("http://worksAt"), vf.createURI("http://TacoJoint"));
-        ryaConn.add(vf.createURI("http://Charlie"), vf.createURI("http://worksAt"), vf.createURI("http://TacoJoint"));
-        ryaConn.add(vf.createURI("http://Eve"), vf.createURI("http://worksAt"), vf.createURI("http://TacoJoint"));
-        ryaConn.add(vf.createURI("http://David"), vf.createURI("http://worksAt"), vf.createURI("http://TacoJoint"));
+            ryaConn.add(vf.createURI("http://Bob"), vf.createURI("http://worksAt"), vf.createURI("http://TacoJoint"));
+            ryaConn.add(vf.createURI("http://Charlie"), vf.createURI("http://worksAt"), vf.createURI("http://TacoJoint"));
+            ryaConn.add(vf.createURI("http://Eve"), vf.createURI("http://worksAt"), vf.createURI("http://TacoJoint"));
+            ryaConn.add(vf.createURI("http://David"), vf.createURI("http://worksAt"), vf.createURI("http://TacoJoint"));
 
-        // Verify the correct results were exported.
-        fluo.waitForObservers();
+            // Verify the correct results were exported.
+            fluo.waitForObservers();
 
-        final Set<BindingSet> results = Sets.newHashSet( pcjStorage.listResults(pcjId) );
+            final Set<BindingSet> results = Sets.newHashSet( pcjStorage.listResults(pcjId) );
 
-        final MapBindingSet bob = new MapBindingSet();
-        bob.addBinding("x", vf.createURI("http://Bob"));
+            final MapBindingSet bob = new MapBindingSet();
+            bob.addBinding("x", vf.createURI("http://Bob"));
 
-        final MapBindingSet charlie = new MapBindingSet();
-        charlie.addBinding("x", vf.createURI("http://Charlie"));
+            final MapBindingSet charlie = new MapBindingSet();
+            charlie.addBinding("x", vf.createURI("http://Charlie"));
 
-        final Set<BindingSet> expected = Sets.<BindingSet>newHashSet(bob, charlie);
+            final Set<BindingSet> expected = Sets.<BindingSet>newHashSet(bob, charlie);
 
-        assertEquals(expected, results);
+            assertEquals(expected, results);
+        }
     }
 
     @Test(expected = InstanceDoesNotExistException.class)

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJIT.java
----------------------------------------------------------------------
diff --git a/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJIT.java b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJIT.java
index 573fccd..fd75167 100644
--- a/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJIT.java
+++ b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloDeletePCJIT.java
@@ -24,6 +24,10 @@ import static org.junit.Assert.assertTrue;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.rya.api.client.CreatePCJ;
+import org.apache.rya.api.client.DeletePCJ;
+import org.apache.rya.api.client.InstanceDoesNotExistException;
+import org.apache.rya.api.client.RyaClientException;
 import org.apache.rya.indexing.pcj.fluo.api.ListQueryIds;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
@@ -36,11 +40,6 @@ import org.openrdf.repository.RepositoryException;
 
 import com.google.common.collect.Sets;
 
-import org.apache.rya.api.client.CreatePCJ;
-import org.apache.rya.api.client.DeletePCJ;
-import org.apache.rya.api.client.InstanceDoesNotExistException;
-import org.apache.rya.api.client.RyaClientException;
-
 /**
  * Integration tests the methods of {@link AccumuloCreatePCJ}.
  */
@@ -86,31 +85,32 @@ public class AccumuloDeletePCJIT extends FluoITBase {
         // Verify the correct results were exported.
         fluo.waitForObservers();
 
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final Set<BindingSet> results = Sets.newHashSet( pcjStorage.listResults(pcjId) );
+        try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME)) {
+            final Set<BindingSet> results = Sets.newHashSet( pcjStorage.listResults(pcjId) );
 
-        final MapBindingSet bob = new MapBindingSet();
-        bob.addBinding("x", vf.createURI("http://Bob"));
+            final MapBindingSet bob = new MapBindingSet();
+            bob.addBinding("x", vf.createURI("http://Bob"));
 
-        final MapBindingSet charlie = new MapBindingSet();
-        charlie.addBinding("x", vf.createURI("http://Charlie"));
+            final MapBindingSet charlie = new MapBindingSet();
+            charlie.addBinding("x", vf.createURI("http://Charlie"));
 
-        final Set<BindingSet> expected = Sets.<BindingSet>newHashSet(bob, charlie);
-        assertEquals(expected, results);
+            final Set<BindingSet> expected = Sets.<BindingSet>newHashSet(bob, charlie);
+            assertEquals(expected, results);
 
-        // Delete the PCJ.
-        final DeletePCJ deletePCJ = new AccumuloDeletePCJ(connectionDetails, accumuloConn);
-        deletePCJ.deletePCJ(RYA_INSTANCE_NAME, pcjId);
+            // Delete the PCJ.
+            final DeletePCJ deletePCJ = new AccumuloDeletePCJ(connectionDetails, accumuloConn);
+            deletePCJ.deletePCJ(RYA_INSTANCE_NAME, pcjId);
 
-        // Ensure the PCJ's metadata has been removed from the storage.
-        assertTrue( pcjStorage.listPcjs().isEmpty() );
+            // Ensure the PCJ's metadata has been removed from the storage.
+            assertTrue( pcjStorage.listPcjs().isEmpty() );
 
-        // Ensure the PCJ has been removed from the Fluo application.
-        fluo.waitForObservers();
+            // Ensure the PCJ has been removed from the Fluo application.
+            fluo.waitForObservers();
 
-        // Verify a Query ID was added for the query within the Fluo app.
-        fluoQueryIds = new ListQueryIds().listQueryIds(fluoClient);
-        assertEquals(0, fluoQueryIds.size());
+            // Verify a Query ID was added for the query within the Fluo app.
+            fluoQueryIds = new ListQueryIds().listQueryIds(fluoClient);
+            assertEquals(0, fluoQueryIds.size());
+        }
     }
 
     @Test(expected = InstanceDoesNotExistException.class)

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.benchmark/src/test/java/org/apache/rya/benchmark/query/QueryBenchmarkRunIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.benchmark/src/test/java/org/apache/rya/benchmark/query/QueryBenchmarkRunIT.java b/extras/rya.benchmark/src/test/java/org/apache/rya/benchmark/query/QueryBenchmarkRunIT.java
index c36cb2c..dd5fe68 100644
--- a/extras/rya.benchmark/src/test/java/org/apache/rya/benchmark/query/QueryBenchmarkRunIT.java
+++ b/extras/rya.benchmark/src/test/java/org/apache/rya/benchmark/query/QueryBenchmarkRunIT.java
@@ -25,10 +25,19 @@ import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
 import org.apache.rya.benchmark.query.QueryBenchmark.QueryBenchmarkRun;
 import org.apache.rya.benchmark.query.QueryBenchmark.QueryBenchmarkRun.NotEnoughResultsException;
+import org.apache.rya.indexing.accumulo.ConfigUtils;
+import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType;
+import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.sail.config.RyaSailFactory;
 import org.apache.zookeeper.ClientCnxn;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -38,16 +47,6 @@ import org.openrdf.sail.Sail;
 import org.openrdf.sail.SailConnection;
 import org.openrdf.sail.SailException;
 
-import org.apache.rya.accumulo.AccumuloRdfConfiguration;
-import org.apache.rya.api.client.Install.InstallConfiguration;
-import org.apache.rya.api.client.RyaClient;
-import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
-import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
-import org.apache.rya.indexing.accumulo.ConfigUtils;
-import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType;
-import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType;
-import org.apache.rya.sail.config.RyaSailFactory;
-
 /**
  * Integration tests {@link QueryBenchmarkRun}.
  */
@@ -145,12 +144,12 @@ public class QueryBenchmarkRunIT {
 
     private static void createTestPCJ(final RyaClient ryaClient) throws Exception {
         // Create an empty PCJ within the Rya instance's PCJ storage for the test query.
-        final PrecomputedJoinStorage pcjs = new AccumuloPcjStorage(cluster.getConnector(ACCUMULO_USER, ACCUMULO_PASSWORD), RYA_INSTANCE_NAME);
-        final String pcjId = pcjs.createPcj(SPARQL_QUERY);
+        try(final PrecomputedJoinStorage pcjs = new AccumuloPcjStorage(cluster.getConnector(ACCUMULO_USER, ACCUMULO_PASSWORD), RYA_INSTANCE_NAME)) {
+            final String pcjId = pcjs.createPcj(SPARQL_QUERY);
 
-
-        // Batch update the PCJ using the Rya Client.
-        ryaClient.getBatchUpdatePCJ().batchUpdate(RYA_INSTANCE_NAME, pcjId);
+            // Batch update the PCJ using the Rya Client.
+            ryaClient.getBatchUpdatePCJ().batchUpdate(RYA_INSTANCE_NAME, pcjId);
+        }
     }
 
     @AfterClass

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PrecomputedJoinStorage.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PrecomputedJoinStorage.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PrecomputedJoinStorage.java
index 16653ee..38ae1b2 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PrecomputedJoinStorage.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PrecomputedJoinStorage.java
@@ -22,17 +22,17 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
 import org.openrdf.query.BindingSet;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
 /**
  * Functions that create and maintain the PCJ tables that are used by Rya.
  */
 @DefaultAnnotation(NonNull.class)
-public interface PrecomputedJoinStorage {
+public interface PrecomputedJoinStorage extends AutoCloseable {
 
     /**
      * Get a list of all Precomputed Join indices that are being maintained.
@@ -75,7 +75,7 @@ public interface PrecomputedJoinStorage {
      *   results for the PCJ.
      * @throws PCJStorageException The scan couldn't be performed.
      */
-    public Iterable<BindingSet> listResults(String pcjId) throws PCJStorageException;
+    public CloseableIterator<BindingSet> listResults(String pcjId) throws PCJStorageException;
 
     /**
      * Clears all values from a Precomputed Join index. The index will remain,
@@ -94,15 +94,25 @@ public interface PrecomputedJoinStorage {
      */
     public void dropPcj(final String pcjId) throws PCJStorageException;
 
-
     /**
      * Releases and resources that are being used by the storage.
      *
      * @throws PCJStorageException Indicates the resources could not be released.
      */
+    @Override
     public void close() throws PCJStorageException;
 
     /**
+     * An {@link Iterator} that also extends {@link AutoCloseable} because it has reference to resources
+     * that need to be released once you are done iterating.
+     *
+     * @param <T> The type of object that is iterated over.
+     */
+    public static interface CloseableIterator<T> extends Iterator<T>, AutoCloseable {
+
+    }
+
+    /**
      * An operation of {@link PrecomputedJoinStorage} failed.
      */
     public static class PCJStorageException extends PcjException {

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerializer.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerializer.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerializer.java
index 4769758..999b26f 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerializer.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerializer.java
@@ -34,7 +34,6 @@ import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
 import edu.umd.cs.findbugs.annotations.NonNull;
 
 import org.openrdf.model.Value;
-import org.openrdf.query.Binding;
 import org.openrdf.query.BindingSet;
 import org.openrdf.query.algebra.evaluation.QueryBindingSet;
 
@@ -57,7 +56,6 @@ public class AccumuloPcjSerializer implements BindingSetConverter<byte[]> {
     public byte[] convert(BindingSet bindingSet, VariableOrder varOrder) throws BindingSetConversionException {
         checkNotNull(bindingSet);
         checkNotNull(varOrder);
-        checkBindingsSubsetOfVarOrder(bindingSet, varOrder);
 
         // A list that holds all of the byte segments that will be concatenated at the end.
         // This minimizes byte[] construction.
@@ -112,24 +110,6 @@ public class AccumuloPcjSerializer implements BindingSetConverter<byte[]> {
         }
     }
 
-    /**
-     * Checks to see if the names of all the {@link Binding}s in the {@link BindingSet}
-     * are a subset of the variables names in {@link VariableOrder}.
-     *
-     * @param bindingSet - The binding set whose Bindings will be inspected. (not null)
-     * @param varOrder - The names of the bindings that may appear in the BindingSet. (not null)
-     * @throws IllegalArgumentException Indicates the names of the bindings are
-     *   not a subset of the variable order.
-     */
-    private static void checkBindingsSubsetOfVarOrder(BindingSet bindingSet, VariableOrder varOrder) throws IllegalArgumentException {
-        checkNotNull(bindingSet);
-        checkNotNull(varOrder);
-
-        Set<String> bindingNames = bindingSet.getBindingNames();
-        List<String> varNames = varOrder.getVariableOrders();
-        checkArgument(varNames.containsAll(bindingNames), "The BindingSet contains a Binding whose name is not part of the VariableOrder.");
-    }
-
     private static final byte[] concat(Iterable<byte[]> byteSegments) {
         checkNotNull(byteSegments);
 

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjStorage.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjStorage.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjStorage.java
index 6024d12..b8974e6 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjStorage.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjStorage.java
@@ -25,9 +25,6 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Set;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -47,6 +44,9 @@ import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.openrdf.query.BindingSet;
 import org.openrdf.query.MalformedQueryException;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
 /**
  * An Accumulo backed implementation of {@link PrecomputedJoinStorage}.
  */
@@ -156,7 +156,7 @@ public class AccumuloPcjStorage implements PrecomputedJoinStorage {
     }
 
     @Override
-    public Iterable<BindingSet> listResults(final String pcjId) throws PCJStorageException {
+    public CloseableIterator<BindingSet> listResults(final String pcjId) throws PCJStorageException {
         requireNonNull(pcjId);
 
         try {

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetConverter.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetConverter.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetConverter.java
index d2cf366..c920824 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetConverter.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetConverter.java
@@ -18,12 +18,12 @@
  */
 package org.apache.rya.indexing.pcj.storage.accumulo;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
 import org.openrdf.query.Binding;
 import org.openrdf.query.BindingSet;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
 /**
  * Converts {@link BindingSet}s into other representations. This library is
  * intended to convert between BindingSet and whatever format it is being
@@ -52,8 +52,7 @@ public interface BindingSetConverter<T> {
     *   resulting model. (not null)
     * @return The BindingSet formatted as the target model.
     * @throws BindingSetConversionException The BindingSet was unable to be
-    *   converted into the target model. This will happen if the BindingSet has
-    *   a binding whose name is not in the VariableOrder or if one of the values
+    *   converted into the target model. This will happen if one of the values
     *   could not be converted into the target model.
     */
    public T convert(BindingSet bindingSet, VariableOrder varOrder) throws BindingSetConversionException;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverter.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverter.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverter.java
index b2d04e1..4120fd9 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverter.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverter.java
@@ -19,29 +19,27 @@
 package org.apache.rya.indexing.pcj.storage.accumulo;
 
 import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
+import static java.util.Objects.requireNonNull;
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Set;
-
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
 
+import org.apache.rya.api.domain.RyaType;
+import org.apache.rya.api.resolver.RdfToRyaConversions;
 import org.openrdf.model.URI;
 import org.openrdf.model.Value;
 import org.openrdf.model.ValueFactory;
 import org.openrdf.model.impl.URIImpl;
 import org.openrdf.model.impl.ValueFactoryImpl;
 import org.openrdf.model.vocabulary.XMLSchema;
-import org.openrdf.query.Binding;
 import org.openrdf.query.BindingSet;
 import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+import org.openrdf.query.impl.MapBindingSet;
 
 import com.google.common.base.Joiner;
 
-import org.apache.rya.api.domain.RyaType;
-import org.apache.rya.api.resolver.RdfToRyaConversions;
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 
 /**
  * Converts {@link BindingSet}s to Strings and back again. The Strings do not
@@ -58,7 +56,8 @@ public class BindingSetStringConverter implements BindingSetConverter<String> {
 
     @Override
     public String convert(final BindingSet bindingSet, final VariableOrder varOrder) {
-        checkBindingsSubsetOfVarOrder(bindingSet, varOrder);
+        requireNonNull(bindingSet);
+        requireNonNull(varOrder);
 
         // Convert each Binding to a String.
         final List<String> bindingStrings = new ArrayList<>();
@@ -79,38 +78,26 @@ public class BindingSetStringConverter implements BindingSetConverter<String> {
         return Joiner.on(BINDING_DELIM).join(bindingStrings);
     }
 
-    /**
-     * Checks to see if the names of all the {@link Binding}s in the {@link BindingSet}
-     * are a subset of the variables names in {@link VariableOrder}.
-     *
-     * @param bindingSet - The binding set whose Bindings will be inspected. (not null)
-     * @param varOrder - The names of the bindings that may appear in the BindingSet. (not null)
-     * @throws IllegalArgumentException Indicates the names of the bindings are
-     *   not a subset of the variable order.
-     */
-    private static void checkBindingsSubsetOfVarOrder(final BindingSet bindingSet, final VariableOrder varOrder) throws IllegalArgumentException {
-        checkNotNull(bindingSet);
-        checkNotNull(varOrder);
-
-        final Set<String> bindingNames = bindingSet.getBindingNames();
-        final List<String> varNames = varOrder.getVariableOrders();
-        checkArgument(varNames.containsAll(bindingNames), "The BindingSet contains a Binding whose name is not part of the VariableOrder.");
-    }
-
     @Override
     public BindingSet convert(final String bindingSetString, final VariableOrder varOrder) {
-        checkNotNull(bindingSetString);
-        checkNotNull(varOrder);
+        requireNonNull(bindingSetString);
+        requireNonNull(varOrder);
+
+        // If both are empty, return an empty binding set.
+        if(bindingSetString.isEmpty() && varOrder.toString().isEmpty()) {
+            return new MapBindingSet();
+        }
 
+        // Otherwise parse it.
         final String[] bindingStrings = bindingSetString.split(BINDING_DELIM);
-        final String[] varOrrderArr = varOrder.toArray();
-        checkArgument(varOrrderArr.length == bindingStrings.length, "The number of Bindings must match the length of the VariableOrder.");
+        final String[] varOrderArr = varOrder.toArray();
+        checkArgument(varOrderArr.length == bindingStrings.length, "The number of Bindings must match the length of the VariableOrder.");
 
         final QueryBindingSet bindingSet = new QueryBindingSet();
         for(int i = 0; i < bindingStrings.length; i++) {
             final String bindingString = bindingStrings[i];
             if(!NULL_VALUE_STRING.equals(bindingString)) {
-                final String name = varOrrderArr[i];
+                final String name = varOrderArr[i];
                 final Value value = toValue(bindingStrings[i]);
                 bindingSet.addBinding(name, value);
             }
@@ -125,7 +112,7 @@ public class BindingSetStringConverter implements BindingSetConverter<String> {
      * @return The {@link Value} representation of the String.
      */
     protected static Value toValue(final String valueString) {
-        checkNotNull(valueString);
+        requireNonNull(valueString);
 
         // Split the String that was stored in Fluo into its Value and Type parts.
         final String[] valueAndType = valueString.split(TYPE_DELIM);

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java
index ce3e5d1..5d13597 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java
@@ -30,9 +30,6 @@ import java.util.List;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -59,6 +56,7 @@ import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 import org.apache.rya.indexing.pcj.storage.PcjMetadata;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
 import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
 import org.openrdf.query.BindingSet;
@@ -72,6 +70,9 @@ import org.openrdf.repository.RepositoryException;
 
 import com.google.common.base.Optional;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
 /**
  * Functions that create and maintain the PCJ tables that are used by Rya.
  */
@@ -157,6 +158,7 @@ public class PcjTables {
 
         final TableOperations tableOps = accumuloConn.tableOperations();
         if(!tableOps.exists(pcjTableName)) {
+            BatchWriter writer = null;
             try {
                 // Create the new table in Accumulo.
                 tableOps.create(pcjTableName);
@@ -165,14 +167,21 @@ public class PcjTables {
                 final PcjMetadata pcjMetadata = new PcjMetadata(sparql, 0L, varOrders);
                 final List<Mutation> mutations = makeWriteMetadataMutations(pcjMetadata);
 
-                final BatchWriter writer = accumuloConn.createBatchWriter(pcjTableName, new BatchWriterConfig());
+                writer = accumuloConn.createBatchWriter(pcjTableName, new BatchWriterConfig());
                 writer.addMutations(mutations);
-                writer.close();
             } catch (final TableExistsException e) {
                 log.warn("Something else just created the Rya PCJ export table named '" + pcjTableName
                         + "'. This is unexpected, but we will continue as normal.");
             } catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) {
                 throw new PCJStorageException("Could not create a new PCJ named: " + pcjTableName, e);
+            } finally {
+                if(writer != null) {
+                    try {
+                        writer.close();
+                    } catch (final MutationsRejectedException e) {
+                        log.error("Mutations rejected while creating the PCJ table.", e);
+                    }
+                }
             }
         }
     }
@@ -231,9 +240,10 @@ public class PcjTables {
         checkNotNull(accumuloConn);
         checkNotNull(pcjTableName);
 
+        Scanner scanner = null;
         try {
             // Create an Accumulo scanner that iterates through the metadata entries.
-            final Scanner scanner = accumuloConn.createScanner(pcjTableName, new Authorizations());
+            scanner = accumuloConn.createScanner(pcjTableName, new Authorizations());
             final Iterator<Entry<Key, Value>> entries = scanner.iterator();
 
             // No metadata has been stored in the table yet.
@@ -266,6 +276,10 @@ public class PcjTables {
 
         } catch (final TableNotFoundException e) {
             throw new PCJStorageException("Could not add results to a PCJ because the PCJ table does not exist.", e);
+        } finally {
+            if(scanner != null) {
+                scanner.close();
+            }
         }
     }
 
@@ -310,7 +324,7 @@ public class PcjTables {
      *   results for the PCJ.
      * @throws PCJStorageException The binding sets could not be fetched.
      */
-    public Iterable<BindingSet> listResults(final Connector accumuloConn, final String pcjTableName, final Authorizations auths) throws PCJStorageException {
+    public CloseableIterator<BindingSet> listResults(final Connector accumuloConn, final String pcjTableName, final Authorizations auths) throws PCJStorageException {
         requireNonNull(pcjTableName);
 
         // Fetch the Variable Orders for the binding sets and choose one of them. It
@@ -324,12 +338,7 @@ public class PcjTables {
             scanner.fetchColumnFamily( new Text(varOrder.toString()) );
 
             // Return an Iterator that uses that scanner.
-            return new Iterable<BindingSet>() {
-                @Override
-                public Iterator<BindingSet> iterator() {
-                    return new ScannerBindingSetIterator(scanner, varOrder);
-                }
-            };
+            return new ScannerBindingSetIterator(scanner, varOrder);
 
         } catch (final TableNotFoundException e) {
             throw new PCJStorageException(String.format("PCJ Table does not exist for name '%s'.", pcjTableName), e);
@@ -398,10 +407,10 @@ public class PcjTables {
         for(final VariableOrder varOrder : varOrders) {
             try {
                 // Serialize the result to the variable order.
-                final byte[] serializedResult = converter.convert(result, varOrder);
+                final byte[] rowKey = converter.convert(result, varOrder);
 
                 // Row ID = binding set values, Column Family = variable order of the binding set.
-                final Mutation addResult = new Mutation(serializedResult);
+                final Mutation addResult = new Mutation(rowKey);
                 final String visibility = result.getVisibility();
                 addResult.put(varOrder.toString(), "", new ColumnVisibility(visibility), "");
                 mutations.add(addResult);

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/ScannerBindingSetIterator.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/ScannerBindingSetIterator.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/ScannerBindingSetIterator.java
index d0fd7bf..26fd8c9 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/ScannerBindingSetIterator.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/ScannerBindingSetIterator.java
@@ -20,27 +20,30 @@ package org.apache.rya.indexing.pcj.storage.accumulo;
 
 import static java.util.Objects.requireNonNull;
 
+import java.io.IOException;
 import java.util.Iterator;
 import java.util.Map.Entry;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
 import org.openrdf.query.BindingSet;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
 /**
  * Iterates over the results of a {@link Scanner} assuming the results are
  * binding sets that can be converted using a {@link AccumuloPcjSerializer}.
  */
 @DefaultAnnotation(NonNull.class)
-public class ScannerBindingSetIterator implements Iterator<BindingSet> {
+public class ScannerBindingSetIterator implements CloseableIterator<BindingSet> {
 
     private static final AccumuloPcjSerializer converter = new AccumuloPcjSerializer();
 
+    private final Scanner scanner;
     private final Iterator<Entry<Key, Value>> accEntries;
     private final VariableOrder varOrder;
 
@@ -51,7 +54,7 @@ public class ScannerBindingSetIterator implements Iterator<BindingSet> {
      * @param varOrder - The variable order of the binding sets the scanner returns. (not null)
      */
     public ScannerBindingSetIterator(final Scanner scanner, final VariableOrder varOrder) {
-        requireNonNull(scanner);
+        this.scanner = requireNonNull(scanner);
         this.accEntries = scanner.iterator();
         this.varOrder = requireNonNull(varOrder);
     }
@@ -71,4 +74,9 @@ public class ScannerBindingSetIterator implements Iterator<BindingSet> {
             throw new RuntimeException("Could not deserialize a BindingSet from Accumulo.", e);
         }
     }
+
+    @Override
+    public void close() throws IOException {
+        scanner.close();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/VariableOrder.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/VariableOrder.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/VariableOrder.java
index 6ec801e..151db50 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/VariableOrder.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/VariableOrder.java
@@ -23,15 +23,15 @@ import static com.google.common.base.Preconditions.checkNotNull;
 import java.util.Collection;
 import java.util.Iterator;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-import net.jcip.annotations.Immutable;
-
 import org.openrdf.query.BindingSet;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableList;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import net.jcip.annotations.Immutable;
+
 /**
  * An ordered list of {@link BindingSet} variable names. These are used to
  * specify the order {@link Binding}s within the set are serialized to Accumulo.
@@ -46,6 +46,13 @@ public final class VariableOrder implements Iterable<String> {
     private final ImmutableList<String> variableOrder;
 
     /**
+     * Constructs an instance of {@link VariableOrder} when there are no variables.
+     */
+    public VariableOrder() {
+        variableOrder = ImmutableList.of();
+    }
+
+    /**
      * Constructs an instance of {@link VariableOrder}.
      *
      * @param varOrder - An ordered array of Binding Set variables. (not null)

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/update/PrecomputedJoinUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/update/PrecomputedJoinUpdater.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/update/PrecomputedJoinUpdater.java
index 2baa52e..f67110e 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/update/PrecomputedJoinUpdater.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/update/PrecomputedJoinUpdater.java
@@ -20,19 +20,18 @@ package org.apache.rya.indexing.pcj.update;
 
 import java.util.Collection;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
+import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.indexing.pcj.storage.PcjException;
 
-import org.apache.rya.api.domain.RyaStatement;
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 
 /**
  * Updates the state of all PCJ indices whenever {@link RyaStatement}s are
  * added to or removed from the system.
  */
 @DefaultAnnotation(NonNull.class)
-public interface PrecomputedJoinUpdater {
+public interface PrecomputedJoinUpdater extends AutoCloseable {
 
     /**
      * The PCJ indices will be updated to include new statements within
@@ -80,6 +79,7 @@ public interface PrecomputedJoinUpdater {
      *
      * @throws PcjUpdateException The updater could not be closed.
      */
+    @Override
     public void close() throws PcjUpdateException;
 
     /**


[5/9] incubator-rya git commit: RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java
index f9acb11..dfc3333 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java
@@ -18,23 +18,30 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app.query;
 
-import static com.google.common.base.Preconditions.checkNotNull;
+import static java.util.Objects.requireNonNull;
 
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.Collection;
 import java.util.Map;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
+import org.apache.fluo.api.client.SnapshotBase;
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.log4j.Logger;
 import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationElement;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 
-import com.google.common.collect.Sets;
+import com.google.common.base.Charsets;
+import com.google.common.base.Joiner;
 
-import org.apache.fluo.api.client.SnapshotBase;
-import org.apache.fluo.api.client.TransactionBase;
-import org.apache.fluo.api.data.Bytes;
-import org.apache.fluo.api.data.Column;
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 
 /**
  * Reads and writes {@link FluoQuery} instances and their components to/from
@@ -50,8 +57,8 @@ public class FluoQueryMetadataDAO {
      * @param metadata - The Query node metadata that will be written to the table. (not null)
      */
     public void write(final TransactionBase tx, final QueryMetadata metadata) {
-        checkNotNull(tx);
-        checkNotNull(metadata);
+        requireNonNull(tx);
+        requireNonNull(metadata);
 
         final String rowId = metadata.getNodeId();
         tx.set(rowId, FluoQueryColumns.QUERY_NODE_ID, rowId);
@@ -65,19 +72,19 @@ public class FluoQueryMetadataDAO {
      *
      * @param sx - The snapshot that will be used to read the metadata . (not null)
      * @param nodeId - The nodeId of the Query node that will be read. (not nul)
-     * @return The {@link QueryMetadata} that was read from table.
+     * @return The {@link QueryMetadata} that was read from the table.
      */
     public QueryMetadata readQueryMetadata(final SnapshotBase sx, final String nodeId) {
         return readQueryMetadataBuilder(sx, nodeId).build();
     }
 
     private QueryMetadata.Builder readQueryMetadataBuilder(final SnapshotBase sx, final String nodeId) {
-        checkNotNull(sx);
-        checkNotNull(nodeId);
+        requireNonNull(sx);
+        requireNonNull(nodeId);
 
         // Fetch the values from the Fluo table.
         final String rowId = nodeId;
-        final Map<Column, String> values = sx.gets(rowId, 
+        final Map<Column, String> values = sx.gets(rowId,
                 FluoQueryColumns.QUERY_VARIABLE_ORDER,
                 FluoQueryColumns.QUERY_SPARQL,
                 FluoQueryColumns.QUERY_CHILD_NODE_ID);
@@ -102,8 +109,8 @@ public class FluoQueryMetadataDAO {
      * @param metadata - The Filter node metadata that will be written to the table. (not null)
      */
     public void write(final TransactionBase tx, final FilterMetadata metadata) {
-        checkNotNull(tx);
-        checkNotNull(metadata);
+        requireNonNull(tx);
+        requireNonNull(metadata);
 
         final String rowId = metadata.getNodeId();
         tx.set(rowId, FluoQueryColumns.FILTER_NODE_ID, rowId);
@@ -119,19 +126,19 @@ public class FluoQueryMetadataDAO {
      *
      * @param sx - The snapshot that will be used to read the metadata. (not null)
      * @param nodeId - The nodeId of the Filter node that will be read. (not nul)
-     * @return The {@link FilterMetadata} that was read from table.
+     * @return The {@link FilterMetadata} that was read from the table.
      */
     public FilterMetadata readFilterMetadata(final SnapshotBase sx, final String nodeId) {
         return readFilterMetadataBuilder(sx, nodeId).build();
     }
 
     private FilterMetadata.Builder readFilterMetadataBuilder(final SnapshotBase sx, final String nodeId) {
-        checkNotNull(sx);
-        checkNotNull(nodeId);
+        requireNonNull(sx);
+        requireNonNull(nodeId);
 
         // Fetch the values from the Fluo table.
         final String rowId = nodeId;
-        final Map<Column, String> values = sx.gets(rowId, 
+        final Map<Column, String> values = sx.gets(rowId,
                 FluoQueryColumns.FILTER_VARIABLE_ORDER,
                 FluoQueryColumns.FILTER_ORIGINAL_SPARQL,
                 FluoQueryColumns.FILTER_INDEX_WITHIN_SPARQL,
@@ -162,8 +169,8 @@ public class FluoQueryMetadataDAO {
      * @param metadata - The Join node metadata that will be written to the table. (not null)
      */
     public void write(final TransactionBase tx, final JoinMetadata metadata) {
-        checkNotNull(tx);
-        checkNotNull(metadata);
+        requireNonNull(tx);
+        requireNonNull(metadata);
 
         final String rowId = metadata.getNodeId();
         tx.set(rowId, FluoQueryColumns.JOIN_NODE_ID, rowId);
@@ -179,15 +186,15 @@ public class FluoQueryMetadataDAO {
      *
      * @param sx - The snapshot that will be used to read the metadata. (not null)
      * @param nodeId - The nodeId of the Join node that will be read. (not nul)
-     * @return The {@link JoinMetadata} that was read from table.
+     * @return The {@link JoinMetadata} that was read from the table.
      */
     public JoinMetadata readJoinMetadata(final SnapshotBase sx, final String nodeId) {
         return readJoinMetadataBuilder(sx, nodeId).build();
     }
 
     private JoinMetadata.Builder readJoinMetadataBuilder(final SnapshotBase sx, final String nodeId) {
-        checkNotNull(sx);
-        checkNotNull(nodeId);
+        requireNonNull(sx);
+        requireNonNull(nodeId);
 
         // Fetch the values from the Fluo table.
         final String rowId = nodeId;
@@ -224,8 +231,8 @@ public class FluoQueryMetadataDAO {
      * @param metadata - The Statement Pattern node metadata that will be written to the table. (not null)
      */
     public void write(final TransactionBase tx, final StatementPatternMetadata metadata) {
-        checkNotNull(tx);
-        checkNotNull(metadata);
+        requireNonNull(tx);
+        requireNonNull(metadata);
 
         final String rowId = metadata.getNodeId();
         tx.set(rowId, FluoQueryColumns.STATEMENT_PATTERN_NODE_ID, rowId);
@@ -239,15 +246,15 @@ public class FluoQueryMetadataDAO {
      *
      * @param sx - The snapshot that will be used to read the metadata. (not null)
      * @param nodeId - The nodeId of the Statement Pattern node that will be read. (not nul)
-     * @return The {@link StatementPatternMetadata} that was read from table.
+     * @return The {@link StatementPatternMetadata} that was read from the table.
      */
     public StatementPatternMetadata readStatementPatternMetadata(final SnapshotBase sx, final String nodeId) {
         return readStatementPatternMetadataBuilder(sx, nodeId).build();
     }
 
     private StatementPatternMetadata.Builder readStatementPatternMetadataBuilder(final SnapshotBase sx, final String nodeId) {
-        checkNotNull(sx);
-        checkNotNull(nodeId);
+        requireNonNull(sx);
+        requireNonNull(nodeId);
 
         // Fetch the values from the Fluo table.
         final String rowId = nodeId;
@@ -270,14 +277,104 @@ public class FluoQueryMetadataDAO {
     }
 
     /**
+     * Write an instance of {@link AggregationMetadata} to the Fluo table.
+     *
+     * @param tx - The transaction that will be used to commit the metadata. (not null)
+     * @param metadata - The Aggregation node metadata that will be written to the table. (not null)
+     */
+    public void write(final TransactionBase tx, final AggregationMetadata metadata) {
+        requireNonNull(tx);
+        requireNonNull(metadata);
+
+        final String rowId = metadata.getNodeId();
+        tx.set(rowId, FluoQueryColumns.AGGREGATION_NODE_ID, rowId);
+        tx.set(rowId, FluoQueryColumns.AGGREGATION_VARIABLE_ORDER, metadata.getVariableOrder().toString());
+        tx.set(rowId, FluoQueryColumns.AGGREGATION_PARENT_NODE_ID, metadata.getParentNodeId());
+        tx.set(rowId, FluoQueryColumns.AGGREGATION_CHILD_NODE_ID, metadata.getChildNodeId());
+
+        // Store the Group By variable order.
+        final VariableOrder groupByVars = metadata.getGroupByVariableOrder();
+        final String groupByString = Joiner.on(";").join(groupByVars.getVariableOrders());
+        tx.set(rowId, FluoQueryColumns.AGGREGATION_GROUP_BY_BINDING_NAMES, groupByString);
+
+        // Serialize the collection of AggregationElements.
+        final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        try(final ObjectOutputStream oos = new ObjectOutputStream(baos)) {
+            oos.writeObject( metadata.getAggregations() );
+        } catch (final IOException e) {
+            throw new RuntimeException("Problem encountered while writing AggregationMetadata to the Fluo table. Unable " +
+                    "to serialize the AggregationElements to a byte[].", e);
+        }
+        tx.set(Bytes.of(rowId.getBytes(Charsets.UTF_8)), FluoQueryColumns.AGGREGATION_AGGREGATIONS, Bytes.of(baos.toByteArray()));
+    }
+
+    /**
+     * Read an instance of {@link AggregationMetadata} from the Fluo table.
+     *
+     * @param sx - The snapshot that will be used to read the metadata. (not null)
+     * @param nodeId - The nodeId of the Aggregation node that will be read. (not null)
+     * @return The {@link AggregationMetadata} that was read from the table.
+     */
+    public AggregationMetadata readAggregationMetadata(final SnapshotBase sx, final String nodeId) {
+        return readAggregationMetadataBuilder(sx, nodeId).build();
+    }
+
+    private AggregationMetadata.Builder readAggregationMetadataBuilder(final SnapshotBase sx, final String nodeId) {
+        requireNonNull(sx);
+        requireNonNull(nodeId);
+
+        // Fetch the values from the Fluo table.
+        final String rowId = nodeId;
+        final Map<Column, String> values = sx.gets(rowId,
+                FluoQueryColumns.AGGREGATION_VARIABLE_ORDER,
+                FluoQueryColumns.AGGREGATION_PARENT_NODE_ID,
+                FluoQueryColumns.AGGREGATION_CHILD_NODE_ID,
+                FluoQueryColumns.AGGREGATION_GROUP_BY_BINDING_NAMES);
+
+
+        // Return an object holding them.
+        final String varOrderString = values.get(FluoQueryColumns.AGGREGATION_VARIABLE_ORDER);
+        final VariableOrder varOrder = new VariableOrder(varOrderString);
+
+        final String parentNodeId = values.get(FluoQueryColumns.AGGREGATION_PARENT_NODE_ID);
+        final String childNodeId = values.get(FluoQueryColumns.AGGREGATION_CHILD_NODE_ID);
+
+        // Read the Group By variable order if one was present.
+        final String groupByString = values.get(FluoQueryColumns.AGGREGATION_GROUP_BY_BINDING_NAMES);
+        final VariableOrder groupByVars = groupByString.isEmpty() ? new VariableOrder() : new VariableOrder( groupByString.split(";") );
+
+        // Deserialize the collection of AggregationElements.
+        final Bytes aggBytes = sx.get(Bytes.of(nodeId.getBytes(Charsets.UTF_8)), FluoQueryColumns.AGGREGATION_AGGREGATIONS);
+        final Collection<AggregationElement> aggregations;
+        try(final ObjectInputStream ois = new ObjectInputStream(aggBytes.toInputStream())) {
+             aggregations = (Collection<AggregationElement>)ois.readObject();
+        } catch (final IOException | ClassNotFoundException e) {
+            throw new RuntimeException("Problem encountered while reading AggregationMetadata from the Fluo table. Unable " +
+                    "to deserialize the AggregationElements from a byte[].", e);
+        }
+
+        final AggregationMetadata.Builder builder = AggregationMetadata.builder(nodeId)
+                .setVariableOrder(varOrder)
+                .setParentNodeId(parentNodeId)
+                .setChildNodeId(childNodeId)
+                .setGroupByVariableOrder(groupByVars);
+
+        for(final AggregationElement aggregation : aggregations) {
+            builder.addAggregation(aggregation);
+        }
+
+        return builder;
+    }
+
+    /**
      * Write an instance of {@link FluoQuery} to the Fluo table.
      *
      * @param tx - The transaction that will be used to commit the metadata. (not null)
      * @param query - The query metadata that will be written to the table. (not null)
      */
     public void write(final TransactionBase tx, final FluoQuery query) {
-        checkNotNull(tx);
-        checkNotNull(query);
+        requireNonNull(tx);
+        requireNonNull(query);
 
         // Store the Query ID so that it may be looked up from the original SPARQL string.
         final String sparql = query.getQueryMetadata().getSparql();
@@ -298,6 +395,10 @@ public class FluoQueryMetadataDAO {
         for(final StatementPatternMetadata statementPattern : query.getStatementPatternMetadata()) {
             write(tx, statementPattern);
         }
+
+        for(final AggregationMetadata aggregation : query.getAggregationMetadata()) {
+            write(tx, aggregation);
+        }
     }
 
     /**
@@ -308,8 +409,8 @@ public class FluoQueryMetadataDAO {
      * @return The {@link FluoQuery} that was read from table.
      */
     public FluoQuery readFluoQuery(final SnapshotBase sx, final String queryId) {
-        checkNotNull(sx);
-        checkNotNull(queryId);
+        requireNonNull(sx);
+        requireNonNull(queryId);
 
         final FluoQuery.Builder fluoQueryBuilder = FluoQuery.builder();
         addChildMetadata(sx, fluoQueryBuilder, queryId);
@@ -317,9 +418,9 @@ public class FluoQueryMetadataDAO {
     }
 
     private void addChildMetadata(final SnapshotBase sx, final FluoQuery.Builder builder, final String childNodeId) {
-        checkNotNull(sx);
-        checkNotNull(builder);
-        checkNotNull(childNodeId);
+        requireNonNull(sx);
+        requireNonNull(builder);
+        requireNonNull(childNodeId);
 
         final NodeType childType = NodeType.fromNodeId(childNodeId).get();
         switch(childType) {
@@ -357,6 +458,15 @@ public class FluoQueryMetadataDAO {
                 final StatementPatternMetadata.Builder spBuilder = readStatementPatternMetadataBuilder(sx, childNodeId);
                 builder.addStatementPatternBuilder(spBuilder);
                 break;
+
+            case AGGREGATION:
+                // Add this node's metadata.
+                final AggregationMetadata.Builder aggregationBuilder = readAggregationMetadataBuilder(sx, childNodeId);
+                builder.addAggregateMetadata(aggregationBuilder);
+
+                // Add it's child's metadata.
+                addChildMetadata(sx, builder, aggregationBuilder.build().getChildNodeId());
+                break;
         }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java
index 2128700..562470a 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java
@@ -19,6 +19,7 @@
 package org.apache.rya.indexing.pcj.fluo.app.query;
 
 import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.AGGREGATION_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.FILTER_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.JOIN_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.QUERY_PREFIX;
@@ -29,32 +30,40 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.UUID;
-
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-import net.jcip.annotations.Immutable;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.rya.indexing.pcj.fluo.app.FilterResultUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.FluoStringConverter;
 import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationElement;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationType;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.openrdf.query.algebra.AggregateOperator;
+import org.openrdf.query.algebra.Extension;
 import org.openrdf.query.algebra.Filter;
+import org.openrdf.query.algebra.Group;
+import org.openrdf.query.algebra.GroupElem;
 import org.openrdf.query.algebra.Join;
 import org.openrdf.query.algebra.LeftJoin;
 import org.openrdf.query.algebra.Projection;
 import org.openrdf.query.algebra.QueryModelNode;
 import org.openrdf.query.algebra.StatementPattern;
 import org.openrdf.query.algebra.TupleExpr;
+import org.openrdf.query.algebra.Var;
 import org.openrdf.query.algebra.helpers.QueryModelVisitorBase;
 import org.openrdf.query.parser.ParsedQuery;
 
-import com.google.common.base.Optional;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import net.jcip.annotations.Immutable;
+
 /**
  * Creates the {@link FluoQuery} metadata that is required by the Fluo
  * application to process a SPARQL query.
@@ -119,7 +128,7 @@ public class SparqlFluoQueryBuilder {
          */
         public Optional<String> getId(final QueryModelNode node) {
             checkNotNull(node);
-            return Optional.fromNullable( nodeIds.get(node) );
+            return Optional.ofNullable( nodeIds.get(node) );
         }
 
         /**
@@ -157,14 +166,15 @@ public class SparqlFluoQueryBuilder {
                 prefix = JOIN_PREFIX;
             } else if(node instanceof Projection) {
                 prefix = QUERY_PREFIX;
+            } else if(node instanceof Extension) {
+                prefix = AGGREGATION_PREFIX;
             } else {
-                throw new IllegalArgumentException("Node must be of type {StatementPattern, Join, Filter, Projection} but was " + node.getClass());
+                throw new IllegalArgumentException("Node must be of type {StatementPattern, Join, Filter, Extension, Projection} but was " + node.getClass());
             }
 
             // Create the unique portion of the id.
             final String unique = UUID.randomUUID().toString().replaceAll("-", "");
 
-
             // Put them together to create the Node ID.
             return prefix + "_" + unique;
         }
@@ -204,6 +214,77 @@ public class SparqlFluoQueryBuilder {
             this.nodeIds = checkNotNull(nodeIds);
         }
 
+        /**
+         * If we encounter an Extension node that contains a Group, then we've found an aggregation.
+         */
+        @Override
+        public void meet(final Extension node) {
+            final TupleExpr arg = node.getArg();
+            if(arg instanceof Group) {
+                final Group group = (Group) arg;
+
+                // Get the Aggregation Node's id.
+                final String aggregationId = nodeIds.getOrMakeId(node);
+
+                // Get the group's child node id. This call forces it to be a supported child type.
+                final TupleExpr child = group.getArg();
+                final String childNodeId = nodeIds.getOrMakeId( child );
+
+                // Get the list of group by binding names.
+                VariableOrder groupByVariableOrder = null;
+                if(!group.getGroupBindingNames().isEmpty()) {
+                    groupByVariableOrder = new VariableOrder(group.getGroupBindingNames());
+                } else {
+                    groupByVariableOrder = new VariableOrder();
+                }
+
+                // The aggregations that need to be performed are the Group Elements.
+                final List<AggregationElement> aggregations = new ArrayList<>();
+                for(final GroupElem groupElem : group.getGroupElements()) {
+                    // Figure out the type of the aggregation.
+                    final AggregateOperator operator = groupElem.getOperator();
+                    final Optional<AggregationType> type = AggregationType.byOperatorClass( operator.getClass() );
+
+                    // If the type is one we support, create the AggregationElement.
+                    if(type.isPresent()) {
+                        final String resultBindingName = groupElem.getName();
+
+                        final AtomicReference<String> aggregatedBindingName = new AtomicReference<>();
+                        groupElem.visitChildren(new QueryModelVisitorBase<RuntimeException>() {
+                            @Override
+                            public void meet(final Var node) {
+                                aggregatedBindingName.set( node.getName() );
+                            }
+                        });
+
+                        aggregations.add( new AggregationElement(type.get(), aggregatedBindingName.get(), resultBindingName) );
+                    }
+                }
+
+                // Update the aggregation's metadata.
+                AggregationMetadata.Builder aggregationBuilder = fluoQueryBuilder.getAggregateBuilder(aggregationId).orNull();
+                if(aggregationBuilder == null) {
+                    aggregationBuilder = AggregationMetadata.builder(aggregationId);
+                    fluoQueryBuilder.addAggregateMetadata(aggregationBuilder);
+                }
+
+                aggregationBuilder.setChildNodeId(childNodeId);
+                aggregationBuilder.setGroupByVariableOrder(groupByVariableOrder);
+                for(final AggregationElement aggregation : aggregations) {
+                    aggregationBuilder.addAggregation(aggregation);
+                }
+
+                // Update the child node's metadata.
+                final Set<String> childVars = getVars(child);
+                final VariableOrder childVarOrder = new VariableOrder(childVars);
+
+                setChildMetadata(childNodeId, childVarOrder, aggregationId);
+            }
+
+            // Walk to the next node.
+            super.meet(node);
+        }
+
         @Override
         public void meet(final StatementPattern node) {
             // Extract metadata that will be stored from the node.
@@ -386,10 +467,21 @@ public class SparqlFluoQueryBuilder {
                     filterBuilder.setParentNodeId(parentNodeId);
                     break;
 
-            case QUERY:
-                throw new IllegalArgumentException("QUERY nodes do not have children.");
-            default:
-                throw new IllegalArgumentException("Unsupported NodeType: " + childType);
+                case AGGREGATION:
+                    AggregationMetadata.Builder aggregationBuilder = fluoQueryBuilder.getAggregateBuilder(childNodeId).orNull();
+                    if(aggregationBuilder == null) {
+                        aggregationBuilder = AggregationMetadata.builder(childNodeId);
+                        fluoQueryBuilder.addAggregateMetadata(aggregationBuilder);
+                    }
+
+                    aggregationBuilder.setVariableOrder(childVarOrder);
+                    aggregationBuilder.setParentNodeId(parentNodeId);
+                    break;
+
+                case QUERY:
+                    throw new IllegalArgumentException("QUERY nodes do not have children.");
+                default:
+                    throw new IllegalArgumentException("Unsupported NodeType: " + childType);
             }
         }
 

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/BindingSetUtil.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/BindingSetUtil.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/BindingSetUtil.java
new file mode 100644
index 0000000..30f026c
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/BindingSetUtil.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.util;
+
+import static java.util.Objects.requireNonNull;
+
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.openrdf.query.Binding;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.impl.MapBindingSet;
+
+/**
+ * A utility class that defines functions that make it easier to work with {@link BindingSet} objects.
+ */
+public class BindingSetUtil {
+
+    /**
+     * Create a new {@link BindingSet} that only includes the bindings whose names appear within the {@code variableOrder}.
+     * If no binding is found for a variable, then that binding is just omitted from the resulting object.
+     *
+     * @param variableOrder - Defines which bindings will be kept. (not null)
+     * @param bindingSet - Contains the source {@link Binding}s. (not null)
+     * @return A new {@link BindingSet} containing only the specified bindings.
+     */
+    public static BindingSet keepBindings(final VariableOrder variableOrder, final BindingSet bindingSet) {
+        requireNonNull(variableOrder);
+        requireNonNull(bindingSet);
+
+        final MapBindingSet result = new MapBindingSet();
+        for(final String bindingName : variableOrder) {
+            if(bindingSet.hasBinding(bindingName)) {
+                final Binding binding = bindingSet.getBinding(bindingName);
+                result.addBinding(binding);
+            }
+        }
+        return result;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/RowKeyUtil.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/RowKeyUtil.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/RowKeyUtil.java
new file mode 100644
index 0000000..ffb2320
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/RowKeyUtil.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.util;
+
+import static java.util.Objects.requireNonNull;
+
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetStringConverter;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.openrdf.query.BindingSet;
+
+import com.google.common.base.Charsets;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * TODO doc that this implements utility functions used to create the Fluo Row Keys used when referencing the binding
+ * set results of a query node.
+ */
+@DefaultAnnotation(NonNull.class)
+public class RowKeyUtil {
+
+    private static final BindingSetStringConverter BS_CONVERTER = new BindingSetStringConverter();
+
+    /**
+     * Creates the Row Key that will be used by a node within the PCJ Fluo application to represent where a specific
+     * result of that node will be placed.
+     *
+     * @param nodeId - Identifies the Node that the Row Key is for. (not null)
+     * @param variableOrder - Specifies which bindings from {@code bindingSet} will be included within the Row Key as
+     *   well as the order they will appear. (not null)
+     * @param bindingSet - The Binding Set whose values will be used to create the Row Key. (not null)
+     * @return A Row Key built using the provided values.
+     */
+    public static Bytes makeRowKey(final String nodeId, final VariableOrder variableOrder, final BindingSet bindingSet) {
+        requireNonNull(nodeId);
+        requireNonNull(variableOrder);
+        requireNonNull(bindingSet);
+
+        // The Row Key starts with the Node ID of the node the result belongs to.
+        String rowId = nodeId + IncrementalUpdateConstants.NODEID_BS_DELIM;
+
+        // Append the String formatted bindings that are included in the Variable Order. The Variable Order also defines
+        // the order the binding will be written to the Row Key. If a Binding is missing for one of the Binding Names
+        // that appears within the Variable Order, then an empty value will be written for that location within the Row Key.
+        rowId += BS_CONVERTER.convert(bindingSet, variableOrder);
+
+        // Format the Row Key as a UTF 8 encoded Bytes object.
+        return Bytes.of( rowId.getBytes(Charsets.UTF_8) );
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDeTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDeTest.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDeTest.java
new file mode 100644
index 0000000..99791ee
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDeTest.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.rya.indexing.pcj.fluo.app;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.junit.Test;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.query.impl.MapBindingSet;
+
+/**
+ * Tests the methods of {@link VisibilityBindingSetSerDe}.
+ */
+public class VisibilityBindingSetSerDeTest {
+
+    @Test
+    public void rountTrip() throws Exception {
+        final ValueFactory vf = new ValueFactoryImpl();
+
+        final MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("name", vf.createLiteral("Alice"));
+        bs.addBinding("age", vf.createLiteral(5));
+        final VisibilityBindingSet original = new VisibilityBindingSet(bs, "u");
+
+        final VisibilityBindingSetSerDe serde = new VisibilityBindingSetSerDe();
+        final Bytes bytes = serde.serialize(original);
+        final VisibilityBindingSet result = serde.deserialize(bytes);
+
+        assertEquals(original, result);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/command/NewQueryCommand.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/command/NewQueryCommand.java b/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/command/NewQueryCommand.java
index 43dac3c..854798d 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/command/NewQueryCommand.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/command/NewQueryCommand.java
@@ -135,7 +135,7 @@ public class NewQueryCommand implements PcjAdminClientCommand {
             // Tell the Fluo PCJ Updater app to maintain the PCJ.
             createPcj.withRyaIntegration(pcjId, pcjStorage, fluo, accumulo, ryaTablePrefix);
 
-        } catch (MalformedQueryException | SailException | QueryEvaluationException | PcjException | RyaDAOException e) {
+        } catch (MalformedQueryException | PcjException | RyaDAOException e) {
             throw new ExecutionException("Could not create and load historic matches into the the Fluo app for the query.", e);
         }
 

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.demo/src/main/java/org/apache/rya/indexing/pcj/fluo/demo/FluoAndHistoricPcjsDemo.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.demo/src/main/java/org/apache/rya/indexing/pcj/fluo/demo/FluoAndHistoricPcjsDemo.java b/extras/rya.pcj.fluo/pcj.fluo.demo/src/main/java/org/apache/rya/indexing/pcj/fluo/demo/FluoAndHistoricPcjsDemo.java
index 105f697..c8dc737 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.demo/src/main/java/org/apache/rya/indexing/pcj/fluo/demo/FluoAndHistoricPcjsDemo.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.demo/src/main/java/org/apache/rya/indexing/pcj/fluo/demo/FluoAndHistoricPcjsDemo.java
@@ -18,19 +18,29 @@
  */
 package org.apache.rya.indexing.pcj.fluo.demo;
 
+import java.io.IOException;
 import java.util.Set;
 
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.commons.lang3.StringUtils;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.mini.MiniFluo;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.apache.rya.api.domain.RyaStatement;
+import org.apache.rya.api.domain.RyaType;
+import org.apache.rya.api.domain.RyaURI;
+import org.apache.rya.api.persist.RyaDAOException;
+import org.apache.rya.api.resolver.RyaToRdfConversions;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
 import org.apache.rya.indexing.pcj.storage.PcjException;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.rdftriplestore.RyaSailRepository;
 import org.openrdf.model.Statement;
 import org.openrdf.query.BindingSet;
 import org.openrdf.query.MalformedQueryException;
@@ -45,16 +55,6 @@ import org.openrdf.sail.SailException;
 import com.google.common.base.Optional;
 import com.google.common.collect.Sets;
 
-import org.apache.fluo.api.client.FluoClient;
-import org.apache.fluo.api.mini.MiniFluo;
-import org.apache.rya.accumulo.query.AccumuloRyaQueryEngine;
-import org.apache.rya.api.domain.RyaStatement;
-import org.apache.rya.api.domain.RyaType;
-import org.apache.rya.api.domain.RyaURI;
-import org.apache.rya.api.persist.RyaDAOException;
-import org.apache.rya.api.resolver.RyaToRdfConversions;
-import org.apache.rya.rdftriplestore.RyaSailRepository;
-
 /**
  * Demonstrates historicly added Rya statements that are stored within the core
  * Rya tables joining with newly streamed statements into the Fluo application.
@@ -181,7 +181,7 @@ public class FluoAndHistoricPcjsDemo implements Demo {
             // Tell the Fluo app to maintain it.
             new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, ryaTablePrefix);
 
-        } catch (MalformedQueryException | SailException | QueryEvaluationException | PcjException | RyaDAOException e) {
+        } catch (MalformedQueryException | PcjException | RyaDAOException e) {
             throw new DemoExecutionException("Error while using Fluo to compute and export historic matches, so the demo can not continue. Exiting.", e);
         }
 
@@ -192,11 +192,11 @@ public class FluoAndHistoricPcjsDemo implements Demo {
 
         // 5. Show that the Fluo app exported the results to the PCJ table in Accumulo.
         log.info("The following Binding Sets were exported to the PCJ with ID '" + pcjId + "' in Rya:");
-        try {
-            for(final BindingSet result : pcjStorage.listResults(pcjId)) {
-                log.info("    " + result);
+        try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+            while(resultsIt.hasNext()) {
+                log.info("    " + resultsIt.next());
             }
-        } catch (final PCJStorageException e) {
+        } catch (final Exception e) {
             throw new DemoExecutionException("Could not fetch the PCJ's reuslts from Accumulo. Exiting.", e);
         }
         waitForEnter();
@@ -257,11 +257,11 @@ public class FluoAndHistoricPcjsDemo implements Demo {
 
         // 8. Show the new results have been exported to the PCJ table in Accumulo.
         log.info("The following Binding Sets were exported to the PCJ with ID '" + pcjId + "' in Rya:");
-        try {
-            for(final BindingSet result : pcjStorage.listResults(pcjId)) {
-                log.info("    " + result);
+        try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+            while(resultsIt.hasNext()) {
+                log.info("    " + resultsIt.next());
             }
-        } catch (final PCJStorageException e) {
+        } catch (final Exception e) {
             throw new DemoExecutionException("Could not fetch the PCJ's reuslts from Accumulo. Exiting.", e);
         }
         log.info("");

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml b/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml
index ab99ecd..9263362 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml
@@ -41,6 +41,11 @@
             <groupId>org.apache.rya</groupId>
             <artifactId>rya.indexing</artifactId>
         </dependency>
+         <dependency>
+            <groupId>org.apache.fluo</groupId>
+            <artifactId>fluo-api</artifactId>
+        </dependency>
+
         <!-- Testing dependencies. -->
         <dependency>
             <groupId>org.apache.fluo</groupId>
@@ -86,8 +91,10 @@
                 </exclusion>
             </exclusions>
         </dependency>
-
-
-
+        <dependency>
+             <groupId>org.apache.fluo</groupId>
+            <artifactId>fluo-recipes-test</artifactId>
+            <scope>test</scope>
+        </dependency>
     </dependencies>
 </project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/ITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/ITBase.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/ITBase.java
deleted file mode 100644
index 6e696c8..0000000
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/ITBase.java
+++ /dev/null
@@ -1,443 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.rya.indexing.pcj.fluo;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.minicluster.MiniAccumuloCluster;
-import org.apache.accumulo.minicluster.MiniAccumuloConfig;
-import org.apache.fluo.api.client.FluoAdmin;
-import org.apache.fluo.api.client.FluoAdmin.AlreadyInitializedException;
-import org.apache.fluo.api.client.FluoAdmin.TableExistsException;
-import org.apache.fluo.api.client.FluoClient;
-import org.apache.fluo.api.client.FluoFactory;
-import org.apache.fluo.api.client.Snapshot;
-import org.apache.fluo.api.client.scanner.CellScanner;
-import org.apache.fluo.api.config.FluoConfiguration;
-import org.apache.fluo.api.config.ObserverSpecification;
-import org.apache.fluo.api.data.Bytes;
-import org.apache.fluo.api.data.RowColumnValue;
-import org.apache.fluo.api.mini.MiniFluo;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.rya.accumulo.AccumuloRdfConfiguration;
-import org.apache.rya.api.client.Install.InstallConfiguration;
-import org.apache.rya.api.client.RyaClient;
-import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
-import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
-import org.apache.rya.api.domain.RyaStatement;
-import org.apache.rya.api.domain.RyaStatement.RyaStatementBuilder;
-import org.apache.rya.api.domain.RyaType;
-import org.apache.rya.api.domain.RyaURI;
-import org.apache.rya.api.resolver.RyaToRdfConversions;
-import org.apache.rya.indexing.accumulo.ConfigUtils;
-import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig;
-import org.apache.rya.indexing.pcj.fluo.app.export.rya.RyaExportParameters;
-import org.apache.rya.indexing.pcj.fluo.app.observers.FilterObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.JoinObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.QueryResultObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.StatementPatternObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.TripleObserver;
-import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
-import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
-import org.apache.rya.indexing.pcj.fluo.app.query.QueryMetadata;
-import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetStringConverter;
-import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
-import org.apache.rya.rdftriplestore.RyaSailRepository;
-import org.apache.rya.sail.config.RyaSailFactory;
-import org.apache.zookeeper.ClientCnxn;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.openrdf.model.Statement;
-import org.openrdf.model.vocabulary.XMLSchema;
-import org.openrdf.query.Binding;
-import org.openrdf.query.BindingSet;
-import org.openrdf.query.impl.MapBindingSet;
-import org.openrdf.repository.RepositoryConnection;
-import org.openrdf.sail.Sail;
-
-import com.google.common.io.Files;
-
-import org.apache.rya.api.RdfCloudTripleStoreConfiguration;
-
-/**
- * Integration tests that ensure the Fluo application processes PCJs results
- * correctly.
- * <p>
- * This class is being ignored because it doesn't contain any unit tests.
- */
-public abstract class ITBase {
-    private static final Logger log = Logger.getLogger(ITBase.class);
-
-    // Rya data store and connections.
-    protected static final String RYA_INSTANCE_NAME = "demo_";
-    protected RyaSailRepository ryaRepo = null;
-    protected RepositoryConnection ryaConn = null;
-
-    // Mini Accumulo Cluster
-    protected static final String ACCUMULO_USER = "root";
-    protected static final String ACCUMULO_PASSWORD = "password";
-    protected MiniAccumuloCluster cluster;
-    protected static Connector accumuloConn = null;
-    protected String instanceName = null;
-    protected String zookeepers = null;
-
-    // Fluo data store and connections.
-    protected static final String FLUO_APP_NAME = "IntegrationTests";
-    protected MiniFluo fluo = null;
-    protected FluoClient fluoClient = null;
-
-    @BeforeClass
-    public static void killLoudLogs() {
-        Logger.getRootLogger().setLevel(Level.ERROR);
-        Logger.getLogger(ClientCnxn.class).setLevel(Level.OFF);
-    }
-
-    @Before
-    public void setupMiniResources() throws Exception {
-        // Will set defaults for log4J
-        org.apache.log4j.BasicConfigurator.configure();
-    	// Initialize the Mini Accumulo that will be used to host Rya and Fluo.
-    	setupMiniAccumulo();
-
-        // Initialize the Mini Fluo that will be used to store created queries.
-        fluo = startMiniFluo();
-        fluoClient = FluoFactory.newClient(fluo.getClientConfiguration());
-
-        // Initialize the Rya that will be used by the tests.
-        ryaRepo = setupRya(instanceName, zookeepers);
-        ryaConn = ryaRepo.getConnection();
-    }
-
-    @After
-    public void shutdownMiniResources() {
-        if (ryaConn != null) {
-            try {
-                log.info("Shutting down Rya Connection.");
-                ryaConn.close();
-                log.info("Rya Connection shut down.");
-            } catch (final Exception e) {
-                log.error("Could not shut down the Rya Connection.", e);
-            }
-        }
-
-        if (ryaRepo != null) {
-            try {
-                log.info("Shutting down Rya Repo.");
-                ryaRepo.shutDown();
-                log.info("Rya Repo shut down.");
-            } catch (final Exception e) {
-                log.error("Could not shut down the Rya Repo.", e);
-            }
-        }
-
-        if (fluoClient != null) {
-            try {
-                log.info("Shutting down Fluo Client.");
-                fluoClient.close();
-                log.info("Fluo Client shut down.");
-            } catch (final Exception e) {
-                log.error("Could not shut down the Fluo Client.", e);
-            }
-        }
-
-        if (fluo != null) {
-            try {
-                log.info("Shutting down Mini Fluo.");
-                fluo.close();
-                log.info("Mini Fluo shut down.");
-            } catch (final Exception e) {
-                log.error("Could not shut down the Mini Fluo.", e);
-            }
-        }
-
-        if(cluster != null) {
-            try {
-                log.info("Shutting down the Mini Accumulo being used as a Rya store.");
-                cluster.stop();
-                log.info("Mini Accumulo being used as a Rya store shut down.");
-            } catch(final Exception e) {
-                log.error("Could not shut down the Mini Accumulo.", e);
-            }
-        }
-    }
-
-    /**
-     * A helper fuction for creating a {@link BindingSet} from an array of
-     * {@link Binding}s.
-     *
-     * @param bindings - The bindings to include in the set. (not null)
-     * @return A {@link BindingSet} holding the bindings.
-     */
-    protected static BindingSet makeBindingSet(final Binding... bindings) {
-        final MapBindingSet bindingSet = new MapBindingSet();
-        for (final Binding binding : bindings) {
-            bindingSet.addBinding(binding);
-        }
-        return bindingSet;
-    }
-
-    /**
-     * A helper function for creating a {@link RyaStatement} that represents a
-     * Triple.
-     *
-     * @param subject - The Subject of the Triple. (not null)
-     * @param predicate - The Predicate of the Triple. (not null)
-     * @param object - The Object of the Triple. (not null)
-     * @return A Triple as a {@link RyaStatement}.
-     */
-    protected static RyaStatement makeRyaStatement(final String subject, final String predicate, final String object) {
-        checkNotNull(subject);
-        checkNotNull(predicate);
-        checkNotNull(object);
-
-        final RyaStatementBuilder builder = RyaStatement.builder().setSubject(new RyaURI(subject))
-                .setPredicate(new RyaURI(predicate));
-
-        if (object.startsWith("http://") || object.startsWith("tag:")) {
-            builder.setObject(new RyaURI(object));
-        } else {
-            builder.setObject(new RyaType(object));
-        }
-
-        return builder.build();
-    }
-
-    /**
-     * A helper function for creating a {@link RyaStatement} that represents a Triple.
-     * This overload takes a typed literal for the object. Prepare it like this for example specify the type (wktLiteral) and the value (Point...):
-     * makeRyaStatement(s, p, new RyaType(new URIImpl("http://www.opengis.net/ont/geosparql#wktLiteral"), "Point(-77.03524 38.889468)")) //
-     *
-     * @param subject - The Subject of the Triple. (not null)
-     * @param predicate - The Predicate of the Triple. (not null)
-     * @param object - The Object of the Triple. (not null)
-     * @return A Triple as a {@link RyaStatement}.
-     */
-    protected static RyaStatement makeRyaStatement(final String subject, final String predicate, final RyaType object) {
-        checkNotNull(subject);
-        checkNotNull(predicate);
-        checkNotNull(object);
-
-        final RyaStatementBuilder builder = RyaStatement.builder()//
-                        .setSubject(new RyaURI(subject))//
-                        .setPredicate(new RyaURI(predicate))//
-                        .setObject(object);
-        return builder.build();
-    }
-
-    /**
-     * A helper function for creating a {@link RyaStatement} that represents a Triple with an integer.
-     *
-     * @param subject - The Subject of the Triple. (not null)
-     * @param predicate - The Predicate of the Triple. (not null)
-     * @param object - The Object of the Triple, an integer value (int).
-     * @return A Triple as a {@link RyaStatement}.
-     */
-    protected static RyaStatement makeRyaStatement(final String subject, final String predicate, final int object) {
-        checkNotNull(subject);
-        checkNotNull(predicate);
-
-        return RyaStatement.builder().setSubject(new RyaURI(subject)).setPredicate(new RyaURI(predicate))
-                .setObject(new RyaType(XMLSchema.INT, "" + object)).build();
-    }
-
-    /**
-     * A helper function for creating a Sesame {@link Statement} that represents
-     * a Triple..
-     *
-     * @param subject - The Subject of the Triple. (not null)
-     * @param predicate - The Predicate of the Triple. (not null)
-     * @param object - The Object of the Triple. (not null)
-     * @return A Triple as a {@link Statement}.
-     */
-    protected static Statement makeStatement(final String subject, final String predicate, final String object) {
-        checkNotNull(subject);
-        checkNotNull(predicate);
-        checkNotNull(object);
-
-        final RyaStatement ryaStmt = makeRyaStatement(subject, predicate, object);
-        return RyaToRdfConversions.convertStatement(ryaStmt);
-    }
-
-    /**
-     * Fetches the binding sets that are the results of a specific SPARQL query from the Fluo table.
-     *
-     * @param fluoClient- A connection to the Fluo table where the results reside. (not null)
-     * @param sparql - This query's results will be fetched. (not null)
-     * @return The binding sets for the query's results.
-     */
-    protected static Set<BindingSet> getQueryBindingSetValues(final FluoClient fluoClient, final String sparql) {
-        final Set<BindingSet> bindingSets = new HashSet<>();
-
-        try (Snapshot snapshot = fluoClient.newSnapshot()) {
-            final String queryId = snapshot.get(Bytes.of(sparql), FluoQueryColumns.QUERY_ID).toString();
-
-            // Fetch the query's variable order.
-            final QueryMetadata queryMetadata = new FluoQueryMetadataDAO().readQueryMetadata(snapshot, queryId);
-            final VariableOrder varOrder = queryMetadata.getVariableOrder();
-
-            CellScanner cellScanner = snapshot.scanner().fetch(FluoQueryColumns.QUERY_BINDING_SET).build();
-            final BindingSetStringConverter converter = new BindingSetStringConverter();
-
-           Iterator<RowColumnValue> iter = cellScanner.iterator();
-            
-            while (iter.hasNext()) {
-            	final String bindingSetString = iter.next().getsValue();
-                final BindingSet bindingSet = converter.convert(bindingSetString, varOrder);
-                bindingSets.add(bindingSet);
-            }
-        }
-
-        return bindingSets;
-    }
-
-    private void setupMiniAccumulo() throws IOException, InterruptedException, AccumuloException, AccumuloSecurityException {
-    	final File miniDataDir = Files.createTempDir();
-
-    	// Setup and start the Mini Accumulo.
-    	final MiniAccumuloConfig cfg = new MiniAccumuloConfig(miniDataDir, ACCUMULO_PASSWORD);
-    	cluster = new MiniAccumuloCluster(cfg);
-    	cluster.start();
-
-    	// Store a connector to the Mini Accumulo.
-    	instanceName = cluster.getInstanceName();
-    	zookeepers = cluster.getZooKeepers();
-
-    	final Instance instance = new ZooKeeperInstance(instanceName, zookeepers);
-    	accumuloConn = instance.getConnector(ACCUMULO_USER, new PasswordToken(ACCUMULO_PASSWORD));
-    }
-
-     /**
-      * Sets up a Rya instance.
-      */
-    protected static RyaSailRepository setupRya(final String instanceName, final String zookeepers) throws Exception {
-        checkNotNull(instanceName);
-        checkNotNull(zookeepers);
-
-        // Install the Rya instance to the mini accumulo cluster.
-        final RyaClient ryaClient = AccumuloRyaClientFactory.build(new AccumuloConnectionDetails(
-                ACCUMULO_USER,
-                ACCUMULO_PASSWORD.toCharArray(),
-                instanceName,
-                zookeepers), accumuloConn);
-
-        ryaClient.getInstall().install(RYA_INSTANCE_NAME, InstallConfiguration.builder()
-                .setEnableTableHashPrefix(false)
-                .setEnableFreeTextIndex(true)
-                .setEnableEntityCentricIndex(true)
-                .setEnableGeoIndex(true)
-                .setEnableTemporalIndex(true)
-                .setEnablePcjIndex(true)
-                .setFluoPcjAppName(FLUO_APP_NAME)
-                .build());
-
-        // Connect to the Rya instance that was just installed.
-        final AccumuloRdfConfiguration conf = makeConfig(instanceName, zookeepers);
-        final Sail sail = RyaSailFactory.getInstance(conf);
-        final RyaSailRepository ryaRepo = new RyaSailRepository(sail);
-        return ryaRepo;
-    }
-
-    protected static AccumuloRdfConfiguration makeConfig(final String instanceName, final String zookeepers) {
-        final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
-        conf.setTablePrefix(RYA_INSTANCE_NAME);
-        // Accumulo connection information.
-        conf.set(ConfigUtils.CLOUDBASE_USER, ACCUMULO_USER);
-        conf.set(ConfigUtils.CLOUDBASE_PASSWORD, ACCUMULO_PASSWORD);
-        conf.set(ConfigUtils.CLOUDBASE_INSTANCE, instanceName);
-        conf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, zookeepers);
-        conf.set(RdfCloudTripleStoreConfiguration.CONF_QUERY_AUTH, "");
-        // PCJ configuration information.
-        conf.set(ConfigUtils.USE_PCJ, "true");
-        conf.set(ConfigUtils.USE_PCJ_UPDATER_INDEX, "true");
-        conf.set(ConfigUtils.FLUO_APP_NAME, FLUO_APP_NAME);
-        conf.set(ConfigUtils.PCJ_STORAGE_TYPE,
-                PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType.ACCUMULO.toString());
-        conf.set(ConfigUtils.PCJ_UPDATER_TYPE,
-                PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType.FLUO.toString());
-
-        return conf;
-    }
-
-    protected MiniFluo startMiniFluo() throws AlreadyInitializedException, TableExistsException {
-        // Setup the observers that will be used by the Fluo PCJ Application.
-        final List<ObserverSpecification> observers = new ArrayList<>();
-        observers.add(new ObserverSpecification(TripleObserver.class.getName()));
-        observers.add(new ObserverSpecification(StatementPatternObserver.class.getName()));
-        observers.add(new ObserverSpecification(JoinObserver.class.getName()));
-        observers.add(new ObserverSpecification(FilterObserver.class.getName()));
-
-        // Set export details for exporting from Fluo to a Rya repository and a subscriber queue.
-        final HashMap<String, String> exportParams = new HashMap<>();
-        setExportParameters(exportParams);
-        
-        // Configure the export observer to export new PCJ results to the mini accumulo cluster.
-        final ObserverSpecification exportObserverConfig = new ObserverSpecification(QueryResultObserver.class.getName(), exportParams);
-        observers.add(exportObserverConfig);
-
-        // Configure how the mini fluo will run.
-        final FluoConfiguration config = new FluoConfiguration();
-        config.setMiniStartAccumulo(false);
-        config.setAccumuloInstance(instanceName);
-        config.setAccumuloUser(ACCUMULO_USER);
-        config.setAccumuloPassword(ACCUMULO_PASSWORD);
-        config.setInstanceZookeepers(zookeepers + "/fluo");
-        config.setAccumuloZookeepers(zookeepers);
-
-        config.setApplicationName(FLUO_APP_NAME);
-        config.setAccumuloTable("fluo" + FLUO_APP_NAME);
-
-        config.addObservers(observers);
-
-        FluoFactory.newAdmin(config).initialize(new FluoAdmin.InitializationOptions().setClearTable(true).setClearZookeeper(true) );
-        return FluoFactory.newMiniFluo(config);
-    }
-
-    /**
-     * Set export details for exporting from Fluo to a Rya repository and a subscriber queue.
-     * Override this if you have custom export destinations.
-     * 
-     * @param exportParams
-     */
-    protected void setExportParameters(final HashMap<String, String> exportParams) {
-        final RyaExportParameters ryaParams = new RyaExportParameters(exportParams);
-        ryaParams.setExportToRya(true);
-        ryaParams.setRyaInstanceName(RYA_INSTANCE_NAME);
-        ryaParams.setAccumuloInstanceName(instanceName);
-        ryaParams.setZookeeperServers(zookeepers);
-        ryaParams.setExporterUsername(ITBase.ACCUMULO_USER);
-        ryaParams.setExporterPassword(ITBase.ACCUMULO_PASSWORD);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/KafkaExportITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/KafkaExportITBase.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/KafkaExportITBase.java
new file mode 100644
index 0000000..cd84cb9
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/KafkaExportITBase.java
@@ -0,0 +1,315 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo;
+
+import static org.junit.Assert.assertEquals;
+
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.fluo.api.config.ObserverSpecification;
+import org.apache.fluo.recipes.test.AccumuloExportITBase;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.indexing.accumulo.ConfigUtils;
+import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig;
+import org.apache.rya.indexing.pcj.fluo.app.export.kafka.KafkaExportParameters;
+import org.apache.rya.indexing.pcj.fluo.app.observers.AggregationObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.FilterObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.JoinObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.QueryResultObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.StatementPatternObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.TripleObserver;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.rdftriplestore.RyaSailRepository;
+import org.apache.rya.sail.config.RyaSailFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.openrdf.sail.Sail;
+
+import kafka.admin.AdminUtils;
+import kafka.admin.RackAwareMode;
+import kafka.server.KafkaConfig;
+import kafka.server.KafkaServer;
+import kafka.utils.MockTime;
+import kafka.utils.TestUtils;
+import kafka.utils.Time;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+import kafka.zk.EmbeddedZookeeper;
+
+/**
+ * The base Integration Test class used for Fluo applications that export to a Kakfa topic.
+ */
+public class KafkaExportITBase extends AccumuloExportITBase {
+
+    protected static final String RYA_INSTANCE_NAME = "test_";
+
+    private static final String ZKHOST = "127.0.0.1";
+    private static final String BROKERHOST = "127.0.0.1";
+    private static final String BROKERPORT = "9092";
+    private ZkUtils zkUtils;
+    private KafkaServer kafkaServer;
+    private EmbeddedZookeeper zkServer;
+    private ZkClient zkClient;
+
+    // The Rya instance statements are written to that will be fed into the Fluo app.
+    private RyaSailRepository ryaSailRepo = null;
+
+    /**
+     * Add info about the Kafka queue/topic to receive the export.
+     *
+     * @see org.apache.rya.indexing.pcj.fluo.ITBase#setExportParameters(java.util.HashMap)
+     */
+    @Override
+    protected void preFluoInitHook() throws Exception {
+        // Setup the observers that will be used by the Fluo PCJ Application.
+        final List<ObserverSpecification> observers = new ArrayList<>();
+        observers.add(new ObserverSpecification(TripleObserver.class.getName()));
+        observers.add(new ObserverSpecification(StatementPatternObserver.class.getName()));
+        observers.add(new ObserverSpecification(JoinObserver.class.getName()));
+        observers.add(new ObserverSpecification(FilterObserver.class.getName()));
+        observers.add(new ObserverSpecification(AggregationObserver.class.getName()));
+
+        // Configure the export observer to export new PCJ results to the mini accumulo cluster.
+        final HashMap<String, String> exportParams = new HashMap<>();
+
+        final KafkaExportParameters kafkaParams = new KafkaExportParameters(exportParams);
+        kafkaParams.setExportToKafka(true);
+
+        // Configure the Kafka Producer
+        final Properties producerConfig = new Properties();
+        producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
+        producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
+        producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.rya.indexing.pcj.fluo.app.export.kafka.KryoVisibilityBindingSetSerializer");
+        kafkaParams.addAllProducerConfig(producerConfig);
+
+        final ObserverSpecification exportObserverConfig = new ObserverSpecification(QueryResultObserver.class.getName(), exportParams);
+        observers.add(exportObserverConfig);
+
+        // Add the observers to the Fluo Configuration.
+        super.getFluoConfiguration().addObservers(observers);
+    }
+
+    /**
+     * setup mini kafka and call the super to setup mini fluo
+     *
+     * @see org.apache.rya.indexing.pcj.fluo.ITBase#setupMiniResources()
+     */
+    @Before
+    public void setupKafka() throws Exception {
+        // Install an instance of Rya on the Accumulo cluster.
+        installRyaInstance();
+
+        // Setup Kafka.
+        zkServer = new EmbeddedZookeeper();
+        final String zkConnect = ZKHOST + ":" + zkServer.port();
+        zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
+        zkUtils = ZkUtils.apply(zkClient, false);
+
+        // setup Broker
+        final Properties brokerProps = new Properties();
+        brokerProps.setProperty("zookeeper.connect", zkConnect);
+        brokerProps.setProperty("broker.id", "0");
+        brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
+        brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
+        final KafkaConfig config = new KafkaConfig(brokerProps);
+        final Time mock = new MockTime();
+        kafkaServer = TestUtils.createServer(config, mock);
+    }
+
+    @After
+    public void teardownRya() throws Exception {
+        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
+        final String instanceName = cluster.getInstanceName();
+        final String zookeepers = cluster.getZooKeepers();
+
+        // Uninstall the instance of Rya.
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
+                new AccumuloConnectionDetails(
+                    ACCUMULO_USER,
+                    ACCUMULO_PASSWORD.toCharArray(),
+                    instanceName,
+                    zookeepers),
+                super.getAccumuloConnector());
+
+        ryaClient.getUninstall().uninstall(RYA_INSTANCE_NAME);
+
+        // Shutdown the repo.
+        ryaSailRepo.shutDown();
+    }
+
+    private void installRyaInstance() throws Exception {
+        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
+        final String instanceName = cluster.getInstanceName();
+        final String zookeepers = cluster.getZooKeepers();
+
+        // Install the Rya instance to the mini accumulo cluster.
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
+                new AccumuloConnectionDetails(
+                    ACCUMULO_USER,
+                    ACCUMULO_PASSWORD.toCharArray(),
+                    instanceName,
+                    zookeepers),
+                super.getAccumuloConnector());
+
+        ryaClient.getInstall().install(RYA_INSTANCE_NAME, InstallConfiguration.builder()
+                .setEnableTableHashPrefix(false)
+                .setEnableFreeTextIndex(false)
+                .setEnableEntityCentricIndex(false)
+                .setEnableGeoIndex(false)
+                .setEnableTemporalIndex(false)
+                .setEnablePcjIndex(true)
+                .setFluoPcjAppName( super.getFluoConfiguration().getApplicationName() )
+                .build());
+
+        // Connect to the Rya instance that was just installed.
+        final AccumuloRdfConfiguration conf = makeConfig(instanceName, zookeepers);
+        final Sail sail = RyaSailFactory.getInstance(conf);
+        ryaSailRepo = new RyaSailRepository(sail);
+    }
+
+    protected AccumuloRdfConfiguration makeConfig(final String instanceName, final String zookeepers) {
+        final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
+        conf.setTablePrefix(RYA_INSTANCE_NAME);
+
+        // Accumulo connection information.
+        conf.setAccumuloUser(AccumuloExportITBase.ACCUMULO_USER);
+        conf.setAccumuloPassword(AccumuloExportITBase.ACCUMULO_PASSWORD);
+        conf.setAccumuloInstance(super.getAccumuloConnector().getInstance().getInstanceName());
+        conf.setAccumuloZookeepers(super.getAccumuloConnector().getInstance().getZooKeepers());
+        conf.setAuths("");
+
+
+        // PCJ configuration information.
+        conf.set(ConfigUtils.USE_PCJ, "true");
+        conf.set(ConfigUtils.USE_PCJ_UPDATER_INDEX, "true");
+        conf.set(ConfigUtils.FLUO_APP_NAME, super.getFluoConfiguration().getApplicationName());
+        conf.set(ConfigUtils.PCJ_STORAGE_TYPE,
+                PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType.ACCUMULO.toString());
+        conf.set(ConfigUtils.PCJ_UPDATER_TYPE,
+                PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType.FLUO.toString());
+
+        conf.setDisplayQueryPlan(true);
+
+        return conf;
+    }
+
+    /**
+     * @return A {@link RyaSailRepository} that is connected to the Rya instance that statements are loaded into.
+     */
+    protected RyaSailRepository getRyaSailRepository() throws Exception {
+        return ryaSailRepo;
+    }
+
+    /**
+     * Close all the Kafka mini server and mini-zookeeper
+     *
+     * @see org.apache.rya.indexing.pcj.fluo.ITBase#shutdownMiniResources()
+     */
+    @After
+    public void teardownKafka() {
+        kafkaServer.shutdown();
+        zkClient.close();
+        zkServer.shutdown();
+    }
+
+    /**
+     * Test kafka without rya code to make sure kafka works in this environment.
+     * If this test fails then its a testing environment issue, not with Rya.
+     * Source: https://github.com/asmaier/mini-kafka
+     */
+    @Test
+    public void embeddedKafkaTest() throws Exception {
+        // create topic
+        final String topic = "testTopic";
+        AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
+
+        // setup producer
+        final Properties producerProps = new Properties();
+        producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
+        producerProps.setProperty("key.serializer","org.apache.kafka.common.serialization.IntegerSerializer");
+        producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
+        final KafkaProducer<Integer, byte[]> producer = new KafkaProducer<>(producerProps);
+
+        // setup consumer
+        final Properties consumerProps = new Properties();
+        consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
+        consumerProps.setProperty("group.id", "group0");
+        consumerProps.setProperty("client.id", "consumer0");
+        consumerProps.setProperty("key.deserializer","org.apache.kafka.common.serialization.IntegerDeserializer");
+        consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
+
+        // to make sure the consumer starts from the beginning of the topic
+        consumerProps.put("auto.offset.reset", "earliest");
+
+        final KafkaConsumer<Integer, byte[]> consumer = new KafkaConsumer<>(consumerProps);
+        consumer.subscribe(Arrays.asList(topic));
+
+        // send message
+        final ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(topic, 42, "test-message".getBytes(StandardCharsets.UTF_8));
+        producer.send(data);
+        producer.close();
+
+        // starting consumer
+        final ConsumerRecords<Integer, byte[]> records = consumer.poll(3000);
+        assertEquals(1, records.count());
+        final Iterator<ConsumerRecord<Integer, byte[]>> recordIterator = records.iterator();
+        final ConsumerRecord<Integer, byte[]> record = recordIterator.next();
+        assertEquals(42, (int) record.key());
+        assertEquals("test-message", new String(record.value(), StandardCharsets.UTF_8));
+        consumer.close();
+    }
+
+    protected KafkaConsumer<Integer, VisibilityBindingSet> makeConsumer(final String TopicName) {
+        // setup consumer
+        final Properties consumerProps = new Properties();
+        consumerProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
+        consumerProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group0");
+        consumerProps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "consumer0");
+        consumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.IntegerDeserializer");
+        consumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.rya.indexing.pcj.fluo.app.export.kafka.KryoVisibilityBindingSetSerializer");
+
+        // to make sure the consumer starts from the beginning of the topic
+        consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+
+        final KafkaConsumer<Integer, VisibilityBindingSet> consumer = new KafkaConsumer<>(consumerProps);
+        consumer.subscribe(Arrays.asList(TopicName));
+        return consumer;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java
new file mode 100644
index 0000000..5fe999f
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.fluo.api.config.ObserverSpecification;
+import org.apache.fluo.recipes.test.AccumuloExportITBase;
+import org.apache.log4j.BasicConfigurator;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.indexing.accumulo.ConfigUtils;
+import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig;
+import org.apache.rya.indexing.pcj.fluo.app.export.rya.RyaExportParameters;
+import org.apache.rya.indexing.pcj.fluo.app.observers.AggregationObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.FilterObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.JoinObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.QueryResultObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.StatementPatternObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.TripleObserver;
+import org.apache.rya.rdftriplestore.RyaSailRepository;
+import org.apache.rya.sail.config.RyaSailFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.openrdf.sail.Sail;
+
+/**
+ * The base Integration Test class used for Fluo applications that export to a Rya PCJ Index.
+ */
+public class RyaExportITBase extends AccumuloExportITBase {
+
+    protected static final String RYA_INSTANCE_NAME = "test_";
+
+    private RyaSailRepository ryaSailRepo = null;
+
+    public RyaExportITBase() {
+        // Indicates that MiniFluo should be started before each test.
+        super(true);
+    }
+
+    @BeforeClass
+    public static void setupLogging() {
+        BasicConfigurator.configure();
+        Logger.getRootLogger().setLevel(Level.ERROR);
+    }
+
+    @Override
+    protected void preFluoInitHook() throws Exception {
+        // Setup the observers that will be used by the Fluo PCJ Application.
+        final List<ObserverSpecification> observers = new ArrayList<>();
+        observers.add(new ObserverSpecification(TripleObserver.class.getName()));
+        observers.add(new ObserverSpecification(StatementPatternObserver.class.getName()));
+        observers.add(new ObserverSpecification(JoinObserver.class.getName()));
+        observers.add(new ObserverSpecification(FilterObserver.class.getName()));
+        observers.add(new ObserverSpecification(AggregationObserver.class.getName()));
+
+        // Configure the export observer to export new PCJ results to the mini accumulo cluster.
+        final HashMap<String, String> exportParams = new HashMap<>();
+        final RyaExportParameters ryaParams = new RyaExportParameters(exportParams);
+        ryaParams.setExportToRya(true);
+        ryaParams.setRyaInstanceName(RYA_INSTANCE_NAME);
+        ryaParams.setAccumuloInstanceName(super.getMiniAccumuloCluster().getInstanceName());
+        ryaParams.setZookeeperServers(super.getMiniAccumuloCluster().getZooKeepers());
+        ryaParams.setExporterUsername(ACCUMULO_USER);
+        ryaParams.setExporterPassword(ACCUMULO_PASSWORD);
+
+        final ObserverSpecification exportObserverConfig = new ObserverSpecification(QueryResultObserver.class.getName(), exportParams);
+        observers.add(exportObserverConfig);
+
+        // Add the observers to the Fluo Configuration.
+        super.getFluoConfiguration().addObservers(observers);
+    }
+
+    @Before
+    public void setupRya() throws Exception {
+        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
+        final String instanceName = cluster.getInstanceName();
+        final String zookeepers = cluster.getZooKeepers();
+
+        // Install the Rya instance to the mini accumulo cluster.
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
+                new AccumuloConnectionDetails(
+                    ACCUMULO_USER,
+                    ACCUMULO_PASSWORD.toCharArray(),
+                    instanceName,
+                    zookeepers),
+                super.getAccumuloConnector());
+
+        ryaClient.getInstall().install(RYA_INSTANCE_NAME, InstallConfiguration.builder()
+                .setEnableTableHashPrefix(false)
+                .setEnableFreeTextIndex(false)
+                .setEnableEntityCentricIndex(false)
+                .setEnableGeoIndex(false)
+                .setEnableTemporalIndex(false)
+                .setEnablePcjIndex(true)
+                .setFluoPcjAppName( super.getFluoConfiguration().getApplicationName() )
+                .build());
+
+        // Connect to the Rya instance that was just installed.
+        final AccumuloRdfConfiguration conf = makeConfig(instanceName, zookeepers);
+        final Sail sail = RyaSailFactory.getInstance(conf);
+        ryaSailRepo = new RyaSailRepository(sail);
+    }
+
+    @After
+    public void teardownRya() throws Exception {
+        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
+        final String instanceName = cluster.getInstanceName();
+        final String zookeepers = cluster.getZooKeepers();
+
+        // Uninstall the instance of Rya.
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
+                new AccumuloConnectionDetails(
+                    ACCUMULO_USER,
+                    ACCUMULO_PASSWORD.toCharArray(),
+                    instanceName,
+                    zookeepers),
+                super.getAccumuloConnector());
+
+        ryaClient.getUninstall().uninstall(RYA_INSTANCE_NAME);
+
+        // Shutdown the repo.
+        ryaSailRepo.shutDown();
+    }
+
+    /**
+     * @return A {@link RyaSailRepository} that is connected to the Rya instance that statements are loaded into.
+     */
+    protected RyaSailRepository getRyaSailRepository() throws Exception {
+        return ryaSailRepo;
+    }
+
+    protected AccumuloRdfConfiguration makeConfig(final String instanceName, final String zookeepers) {
+        final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
+        conf.setTablePrefix(RYA_INSTANCE_NAME);
+
+        // Accumulo connection information.
+        conf.setAccumuloUser(AccumuloExportITBase.ACCUMULO_USER);
+        conf.setAccumuloPassword(AccumuloExportITBase.ACCUMULO_PASSWORD);
+        conf.setAccumuloInstance(super.getAccumuloConnector().getInstance().getInstanceName());
+        conf.setAccumuloZookeepers(super.getAccumuloConnector().getInstance().getZooKeepers());
+        conf.setAuths("");
+
+        // PCJ configuration information.
+        conf.set(ConfigUtils.USE_PCJ, "true");
+        conf.set(ConfigUtils.USE_PCJ_UPDATER_INDEX, "true");
+        conf.set(ConfigUtils.FLUO_APP_NAME, super.getFluoConfiguration().getApplicationName());
+        conf.set(ConfigUtils.PCJ_STORAGE_TYPE,
+                PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType.ACCUMULO.toString());
+        conf.set(ConfigUtils.PCJ_UPDATER_TYPE,
+                PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType.FLUO.toString());
+
+        conf.setDisplayQueryPlan(true);
+
+        return conf;
+    }
+}
\ No newline at end of file


[8/9] incubator-rya git commit: RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerializerTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerializerTest.java b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerializerTest.java
new file mode 100644
index 0000000..d904d83
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerializerTest.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage.accumulo;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjSerializer;
+import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter;
+import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.junit.Test;
+import org.openrdf.model.impl.LiteralImpl;
+import org.openrdf.model.impl.URIImpl;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+import org.openrdf.query.impl.MapBindingSet;
+
+import org.apache.rya.api.resolver.RyaTypeResolverException;
+
+/**
+ * Tests the methods of {@link AccumuloPcjSerialzer}.
+ */
+public class AccumuloPcjSerializerTest {
+
+    /**
+     * The BindingSet has fewer Bindings than there are variables in the variable
+     * order, but they are all in the variable order. This is the case where
+     * the missing bindings were optional.
+     */
+    @Test
+    public void serialize_bindingsSubsetOfVarOrder() throws BindingSetConversionException {
+        // Setup the Binding Set.
+        final MapBindingSet originalBindingSet = new MapBindingSet();
+        originalBindingSet.addBinding("x", new URIImpl("http://a"));
+        originalBindingSet.addBinding("y", new URIImpl("http://b"));
+
+        // Setup the variable order.
+        final VariableOrder varOrder = new VariableOrder("x", "a", "y", "b");
+
+        // Create the byte[] representation of the BindingSet.
+        BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
+        byte[] serialized = converter.convert(originalBindingSet, varOrder);
+
+        // Deserialize the byte[] back into the binding set.
+        BindingSet deserialized = converter.convert(serialized, varOrder);
+
+        // Ensure the deserialized value matches the original.
+        assertEquals(originalBindingSet, deserialized);
+    }
+
+    /**
+     * The BindingSet has more Bindings than there are variables in the variable order.
+     * This is the case where a Group By clause does not include all of the Bindings that
+     * are in the Binding Set.
+     */
+    @Test
+    public void serialize_bindingNotInVariableOrder() throws RyaTypeResolverException, BindingSetConversionException {
+        // Setup the Binding Set.
+        final MapBindingSet originalBindingSet = new MapBindingSet();
+        originalBindingSet.addBinding("x", new URIImpl("http://a"));
+        originalBindingSet.addBinding("y", new URIImpl("http://b"));
+        originalBindingSet.addBinding("z", new URIImpl("http://d"));
+
+        // Setup the variable order.
+        final VariableOrder varOrder = new VariableOrder("x", "y");
+
+        // Serialize the Binding Set.
+        BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
+        byte[] serialized = converter.convert(originalBindingSet, varOrder);
+        
+        // Deserialize it again.
+        BindingSet deserialized = converter.convert(serialized, varOrder);
+        
+        // Show that it only contains the bindings that were part of the Variable Order.
+        MapBindingSet expected = new MapBindingSet();
+        expected.addBinding("x", new URIImpl("http://a"));
+        expected.addBinding("y", new URIImpl("http://b"));
+        
+        assertEquals(expected, deserialized);
+    }
+
+	@Test
+	public void basicShortUriBsTest() throws BindingSetConversionException {
+		final QueryBindingSet bs = new QueryBindingSet();
+		bs.addBinding("X",new URIImpl("http://uri1"));
+		bs.addBinding("Y",new URIImpl("http://uri2"));
+		final VariableOrder varOrder = new VariableOrder("X","Y");
+
+		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
+		final byte[] byteVal = converter.convert(bs, varOrder);
+		final BindingSet newBs = converter.convert(byteVal, varOrder);
+		assertEquals(bs, newBs);
+	}
+
+	@Test
+	public void basicLongUriBsTest() throws BindingSetConversionException {
+		final QueryBindingSet bs = new QueryBindingSet();
+		bs.addBinding("X",new URIImpl("http://uri1"));
+		bs.addBinding("Y",new URIImpl("http://uri2"));
+		bs.addBinding("Z",new URIImpl("http://uri3"));
+		bs.addBinding("A",new URIImpl("http://uri4"));
+		bs.addBinding("B",new URIImpl("http://uri5"));
+		final VariableOrder varOrder = new VariableOrder("X","Y","Z","A","B");
+
+		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
+		final byte[] byteVal = converter.convert(bs, varOrder);
+		final BindingSet newBs = converter.convert(byteVal, varOrder);
+		assertEquals(bs, newBs);
+	}
+
+	@Test
+	public void basicShortStringLiteralBsTest() throws BindingSetConversionException {
+		final QueryBindingSet bs = new QueryBindingSet();
+		bs.addBinding("X",new LiteralImpl("literal1"));
+		bs.addBinding("Y",new LiteralImpl("literal2"));
+		final VariableOrder varOrder = new VariableOrder("X","Y");
+
+		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
+		final byte[] byteVal = converter.convert(bs, varOrder);
+		final BindingSet newBs = converter.convert(byteVal, varOrder);
+		assertEquals(bs, newBs);
+	}
+
+	@Test
+	public void basicShortMixLiteralBsTest() throws BindingSetConversionException {
+		final QueryBindingSet bs = new QueryBindingSet();
+		bs.addBinding("X",new LiteralImpl("literal1"));
+		bs.addBinding("Y",new LiteralImpl("5", new URIImpl("http://www.w3.org/2001/XMLSchema#integer")));
+		final VariableOrder varOrder = new VariableOrder("X","Y");
+
+		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
+		final byte[] byteVal = converter.convert(bs, varOrder);
+		final BindingSet newBs = converter.convert(byteVal, varOrder);
+		assertEquals(bs, newBs);
+	}
+
+	@Test
+	public void basicLongMixLiteralBsTest() throws BindingSetConversionException {
+		final QueryBindingSet bs = new QueryBindingSet();
+		bs.addBinding("X",new LiteralImpl("literal1"));
+		bs.addBinding("Y",new LiteralImpl("5", new URIImpl("http://www.w3.org/2001/XMLSchema#integer")));
+		bs.addBinding("Z",new LiteralImpl("5.0", new URIImpl("http://www.w3.org/2001/XMLSchema#double")));
+		bs.addBinding("W",new LiteralImpl("1000", new URIImpl("http://www.w3.org/2001/XMLSchema#long")));
+		final VariableOrder varOrder = new VariableOrder("W","X","Y","Z");
+
+		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
+		final byte[] byteVal = converter.convert(bs, varOrder);
+		final BindingSet newBs = converter.convert(byteVal, varOrder);
+		assertEquals(bs, newBs);
+	}
+
+	@Test
+	public void basicMixUriLiteralBsTest() throws BindingSetConversionException {
+		final QueryBindingSet bs = new QueryBindingSet();
+		bs.addBinding("X",new LiteralImpl("literal1"));
+		bs.addBinding("Y",new LiteralImpl("5", new URIImpl("http://www.w3.org/2001/XMLSchema#integer")));
+		bs.addBinding("Z",new LiteralImpl("5.0", new URIImpl("http://www.w3.org/2001/XMLSchema#double")));
+		bs.addBinding("W",new LiteralImpl("1000", new URIImpl("http://www.w3.org/2001/XMLSchema#long")));
+		bs.addBinding("A",new URIImpl("http://uri1"));
+		bs.addBinding("B",new URIImpl("http://uri2"));
+		bs.addBinding("C",new URIImpl("http://uri3"));
+		final VariableOrder varOrder = new VariableOrder("A","W","X","Y","Z","B","C");
+
+		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
+		final byte[] byteVal = converter.convert(bs, varOrder);
+		final BindingSet newBs = converter.convert(byteVal, varOrder);
+		assertEquals(bs, newBs);
+	}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerialzerTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerialzerTest.java b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerialzerTest.java
deleted file mode 100644
index d69205e..0000000
--- a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPcjSerialzerTest.java
+++ /dev/null
@@ -1,175 +0,0 @@
-package org.apache.rya.indexing.pcj.storage.accumulo;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjSerializer;
-import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter;
-import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
-import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
-import org.junit.Test;
-import org.openrdf.model.impl.LiteralImpl;
-import org.openrdf.model.impl.URIImpl;
-import org.openrdf.query.BindingSet;
-import org.openrdf.query.algebra.evaluation.QueryBindingSet;
-import org.openrdf.query.impl.MapBindingSet;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.rya.api.resolver.RyaTypeResolverException;
-
-/**
- * Tests the methods of {@link AccumuloPcjSerialzer}.
- */
-public class AccumuloPcjSerialzerTest {
-
-    /**
-     * The BindingSet has fewer Bindings than there are variables in the variable
-     * order, but they are all in the variable order. This is the case where
-     * the missing bindings were optional.
-     */
-    @Test
-    public void serialize_bindingsSubsetOfVarOrder() throws BindingSetConversionException {
-        // Setup the Binding Set.
-        final MapBindingSet originalBindingSet = new MapBindingSet();
-        originalBindingSet.addBinding("x", new URIImpl("http://a"));
-        originalBindingSet.addBinding("y", new URIImpl("http://b"));
-
-        // Setup the variable order.
-        final VariableOrder varOrder = new VariableOrder("x", "a", "y", "b");
-
-        // Create the byte[] representation of the BindingSet.
-        BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
-        byte[] serialized = converter.convert(originalBindingSet, varOrder);
-
-        // Deserialize the byte[] back into the binding set.
-        BindingSet deserialized = converter.convert(serialized, varOrder);
-
-        // Ensure the deserialized value matches the original.
-        assertEquals(originalBindingSet, deserialized);
-    }
-
-    /**
-     * The BindingSet has a Binding whose name is not in the variable order.
-     * This is illegal.
-     */
-    @Test(expected = IllegalArgumentException.class)
-    public void serialize_bindingNotInVariableOrder() throws RyaTypeResolverException, BindingSetConversionException {
-        // Setup the Binding Set.
-        final MapBindingSet originalBindingSet = new MapBindingSet();
-        originalBindingSet.addBinding("x", new URIImpl("http://a"));
-        originalBindingSet.addBinding("y", new URIImpl("http://b"));
-        originalBindingSet.addBinding("z", new URIImpl("http://d"));
-
-        // Setup the variable order.
-        final VariableOrder varOrder = new VariableOrder("x", "y");
-
-        // Create the byte[] representation of the BindingSet. This will throw an exception.
-        BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
-        converter.convert(originalBindingSet, varOrder);
-    }
-
-	@Test
-	public void basicShortUriBsTest() throws BindingSetConversionException {
-		final QueryBindingSet bs = new QueryBindingSet();
-		bs.addBinding("X",new URIImpl("http://uri1"));
-		bs.addBinding("Y",new URIImpl("http://uri2"));
-		final VariableOrder varOrder = new VariableOrder("X","Y");
-
-		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
-		final byte[] byteVal = converter.convert(bs, varOrder);
-		final BindingSet newBs = converter.convert(byteVal, varOrder);
-		assertEquals(bs, newBs);
-	}
-
-	@Test
-	public void basicLongUriBsTest() throws BindingSetConversionException {
-		final QueryBindingSet bs = new QueryBindingSet();
-		bs.addBinding("X",new URIImpl("http://uri1"));
-		bs.addBinding("Y",new URIImpl("http://uri2"));
-		bs.addBinding("Z",new URIImpl("http://uri3"));
-		bs.addBinding("A",new URIImpl("http://uri4"));
-		bs.addBinding("B",new URIImpl("http://uri5"));
-		final VariableOrder varOrder = new VariableOrder("X","Y","Z","A","B");
-
-		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
-		final byte[] byteVal = converter.convert(bs, varOrder);
-		final BindingSet newBs = converter.convert(byteVal, varOrder);
-		assertEquals(bs, newBs);
-	}
-
-	@Test
-	public void basicShortStringLiteralBsTest() throws BindingSetConversionException {
-		final QueryBindingSet bs = new QueryBindingSet();
-		bs.addBinding("X",new LiteralImpl("literal1"));
-		bs.addBinding("Y",new LiteralImpl("literal2"));
-		final VariableOrder varOrder = new VariableOrder("X","Y");
-
-		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
-		final byte[] byteVal = converter.convert(bs, varOrder);
-		final BindingSet newBs = converter.convert(byteVal, varOrder);
-		assertEquals(bs, newBs);
-	}
-
-	@Test
-	public void basicShortMixLiteralBsTest() throws BindingSetConversionException {
-		final QueryBindingSet bs = new QueryBindingSet();
-		bs.addBinding("X",new LiteralImpl("literal1"));
-		bs.addBinding("Y",new LiteralImpl("5", new URIImpl("http://www.w3.org/2001/XMLSchema#integer")));
-		final VariableOrder varOrder = new VariableOrder("X","Y");
-
-		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
-		final byte[] byteVal = converter.convert(bs, varOrder);
-		final BindingSet newBs = converter.convert(byteVal, varOrder);
-		assertEquals(bs, newBs);
-	}
-
-	@Test
-	public void basicLongMixLiteralBsTest() throws BindingSetConversionException {
-		final QueryBindingSet bs = new QueryBindingSet();
-		bs.addBinding("X",new LiteralImpl("literal1"));
-		bs.addBinding("Y",new LiteralImpl("5", new URIImpl("http://www.w3.org/2001/XMLSchema#integer")));
-		bs.addBinding("Z",new LiteralImpl("5.0", new URIImpl("http://www.w3.org/2001/XMLSchema#double")));
-		bs.addBinding("W",new LiteralImpl("1000", new URIImpl("http://www.w3.org/2001/XMLSchema#long")));
-		final VariableOrder varOrder = new VariableOrder("W","X","Y","Z");
-
-		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
-		final byte[] byteVal = converter.convert(bs, varOrder);
-		final BindingSet newBs = converter.convert(byteVal, varOrder);
-		assertEquals(bs, newBs);
-	}
-
-	@Test
-	public void basicMixUriLiteralBsTest() throws BindingSetConversionException {
-		final QueryBindingSet bs = new QueryBindingSet();
-		bs.addBinding("X",new LiteralImpl("literal1"));
-		bs.addBinding("Y",new LiteralImpl("5", new URIImpl("http://www.w3.org/2001/XMLSchema#integer")));
-		bs.addBinding("Z",new LiteralImpl("5.0", new URIImpl("http://www.w3.org/2001/XMLSchema#double")));
-		bs.addBinding("W",new LiteralImpl("1000", new URIImpl("http://www.w3.org/2001/XMLSchema#long")));
-		bs.addBinding("A",new URIImpl("http://uri1"));
-		bs.addBinding("B",new URIImpl("http://uri2"));
-		bs.addBinding("C",new URIImpl("http://uri3"));
-		final VariableOrder varOrder = new VariableOrder("A","W","X","Y","Z","B","C");
-
-		BindingSetConverter<byte[]> converter = new AccumuloPcjSerializer();
-		final byte[] byteVal = converter.convert(bs, varOrder);
-		final BindingSet newBs = converter.convert(byteVal, varOrder);
-		assertEquals(bs, newBs);
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverterTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverterTest.java b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverterTest.java
index e01e7de..b263038 100644
--- a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverterTest.java
+++ b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/BindingSetStringConverterTest.java
@@ -23,10 +23,7 @@ import static org.junit.Assert.assertEquals;
 import java.math.BigDecimal;
 import java.math.BigInteger;
 
-import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter;
 import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
-import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetStringConverter;
-import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.junit.Test;
 import org.openrdf.model.impl.BooleanLiteralImpl;
 import org.openrdf.model.impl.DecimalLiteralImpl;
@@ -41,6 +38,23 @@ import org.openrdf.query.impl.MapBindingSet;
 public class BindingSetStringConverterTest {
 
     @Test
+    public void noBindings() throws BindingSetConversionException {
+        // Create a BindingSet that doesn't have any bindings.
+        final MapBindingSet original = new MapBindingSet();
+
+        // Convert it to a String.
+        final VariableOrder varOrder = new VariableOrder();
+        final BindingSetConverter<String> converter = new BindingSetStringConverter();
+        final String bindingSetString = converter.convert(original, varOrder);
+
+        // Convert it back to a binding set.
+        final BindingSet converted = converter.convert(bindingSetString, varOrder);
+
+        // Ensure it is still an empty BindingSet.
+        assertEquals(original, converted);
+    }
+
+    @Test
     public void toString_URIs() throws BindingSetConversionException {
         // Setup the binding set that will be converted.
         final MapBindingSet originalBindingSet = new MapBindingSet();
@@ -53,7 +67,7 @@ public class BindingSetStringConverterTest {
         final BindingSetConverter<String> converter = new BindingSetStringConverter();
         final String bindingSetString = converter.convert(originalBindingSet, varOrder);
 
-        // Ensure it converted to the expected result.l
+        // Ensure it converted to the expected result.
         final String expected =
                 "http://b<<~>>http://www.w3.org/2001/XMLSchema#anyURI:::" +
                 "http://c<<~>>http://www.w3.org/2001/XMLSchema#anyURI:::" +
@@ -163,26 +177,6 @@ public class BindingSetStringConverterTest {
         assertEquals(expected, bindingSetString);
     }
 
-    /**
-     * The BindingSet has a Binding whose name is not in the variable order.
-     * This is illegal.
-     */
-    @Test(expected = IllegalArgumentException.class)
-    public void toString_bindingNotInVariableOrder() throws BindingSetConversionException {
-        // Setup the Binding Set.
-        final MapBindingSet originalBindingSet = new MapBindingSet();
-        originalBindingSet.addBinding("x", new URIImpl("http://a"));
-        originalBindingSet.addBinding("y", new URIImpl("http://b"));
-        originalBindingSet.addBinding("z", new URIImpl("http://d"));
-
-        // Setup the variable order.
-        final VariableOrder varOrder = new VariableOrder("x", "y");
-
-        // Create the String representation of the BindingSet. This will throw an exception.
-        final BindingSetConverter<String> converter = new BindingSetStringConverter();
-        converter.convert(originalBindingSet, varOrder);
-    }
-
     @Test
     public void fromString() throws BindingSetConversionException {
         // Setup the String that will be converted.

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTablesIntegrationTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTablesIntegrationTest.java b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTablesIntegrationTest.java
index 623526e..2bcce65 100644
--- a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTablesIntegrationTest.java
+++ b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTablesIntegrationTest.java
@@ -38,10 +38,17 @@ import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+import org.apache.rya.accumulo.AccumuloRyaDAO;
+import org.apache.rya.accumulo.MiniAccumuloClusterInstance;
+import org.apache.rya.api.RdfCloudTripleStoreConfiguration;
 import org.apache.rya.indexing.pcj.storage.PcjException;
 import org.apache.rya.indexing.pcj.storage.PcjMetadata;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
 import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
+import org.apache.rya.rdftriplestore.RdfCloudTripleStore;
+import org.apache.rya.rdftriplestore.RyaSailRepository;
 import org.apache.zookeeper.ClientCnxn;
 import org.junit.After;
 import org.junit.Before;
@@ -63,13 +70,6 @@ import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Multimap;
 import com.google.common.collect.Sets;
 
-import org.apache.rya.accumulo.AccumuloRdfConfiguration;
-import org.apache.rya.accumulo.AccumuloRyaDAO;
-import org.apache.rya.accumulo.MiniAccumuloClusterInstance;
-import org.apache.rya.api.RdfCloudTripleStoreConfiguration;
-import org.apache.rya.rdftriplestore.RdfCloudTripleStore;
-import org.apache.rya.rdftriplestore.RyaSailRepository;
-
 /**
  * Performs integration test using {@link MiniAccumuloCluster} to ensure the
  * functions of {@link PcjTables} work within a cluster setting.
@@ -237,7 +237,7 @@ public class PcjTablesIntegrationTest {
     }
 
     @Test
-    public void listResults() throws PCJStorageException, AccumuloException, AccumuloSecurityException {
+    public void listResults() throws Exception {
         final String sparql =
                 "SELECT ?name ?age " +
                 "{" +
@@ -274,8 +274,14 @@ public class PcjTablesIntegrationTest {
 
         // Fetch the Binding Sets that have been stored in the PCJ table.
         final Set<BindingSet> results = new HashSet<>();
-        for(final BindingSet result : pcjs.listResults(accumuloConn, pcjTableName, new Authorizations())) {
-            results.add( result );
+
+        final CloseableIterator<BindingSet> resultsIt = pcjs.listResults(accumuloConn, pcjTableName, new Authorizations());
+        try {
+            while(resultsIt.hasNext()) {
+                results.add( resultsIt.next() );
+            }
+        } finally {
+            resultsIt.close();
         }
 
         // Verify the fetched results match the expected ones.

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/accumulo/AccumuloPcjStorageIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/accumulo/AccumuloPcjStorageIT.java b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/accumulo/AccumuloPcjStorageIT.java
index 7eeff1d..98ed4c7 100644
--- a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/accumulo/AccumuloPcjStorageIT.java
+++ b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/accumulo/AccumuloPcjStorageIT.java
@@ -31,8 +31,15 @@ import java.util.Set;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
+import org.apache.rya.accumulo.AccumuloRyaITBase;
+import org.apache.rya.accumulo.instance.AccumuloRyaInstanceDetailsRepository;
+import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails;
+import org.apache.rya.api.instance.RyaDetailsRepository;
+import org.apache.rya.api.instance.RyaDetailsRepository.NotInitializedException;
+import org.apache.rya.api.instance.RyaDetailsRepository.RyaDetailsRepositoryException;
 import org.apache.rya.indexing.pcj.storage.PcjMetadata;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.ShiftVarOrderFactory;
@@ -46,13 +53,6 @@ import org.openrdf.query.impl.MapBindingSet;
 
 import com.google.common.collect.ImmutableMap;
 
-import org.apache.rya.accumulo.AccumuloRyaITBase;
-import org.apache.rya.accumulo.instance.AccumuloRyaInstanceDetailsRepository;
-import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails;
-import org.apache.rya.api.instance.RyaDetailsRepository;
-import org.apache.rya.api.instance.RyaDetailsRepository.NotInitializedException;
-import org.apache.rya.api.instance.RyaDetailsRepository.RyaDetailsRepositoryException;
-
 /**
  * Integration tests the methods of {@link AccumuloPcjStorage}.
  * </p>
@@ -66,23 +66,23 @@ public class AccumuloPcjStorageIT extends AccumuloRyaITBase {
         // Setup the PCJ storage that will be tested against.
         final Connector connector = super.getClusterInstance().getConnector();
         final String ryaInstanceName = super.getRyaInstanceName();
-        final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName);
-
-        // Create a PCJ.
-        final String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
 
-        // Ensure the Rya details have been updated to include the PCJ's ID.
-        final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(connector, ryaInstanceName);
+            // Ensure the Rya details have been updated to include the PCJ's ID.
+            final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(connector, ryaInstanceName);
 
-        final ImmutableMap<String, PCJDetails> detailsMap = detailsRepo.getRyaInstanceDetails()
-                .getPCJIndexDetails()
-                .getPCJDetails();
+            final ImmutableMap<String, PCJDetails> detailsMap = detailsRepo.getRyaInstanceDetails()
+                    .getPCJIndexDetails()
+                    .getPCJDetails();
 
-        final PCJDetails expectedDetails = PCJDetails.builder()
-                .setId( pcjId )
-                .build();
+            final PCJDetails expectedDetails = PCJDetails.builder()
+                    .setId( pcjId )
+                    .build();
 
-        assertEquals(expectedDetails, detailsMap.get(pcjId));
+            assertEquals(expectedDetails, detailsMap.get(pcjId));
+        }
     }
 
     @Test
@@ -90,22 +90,22 @@ public class AccumuloPcjStorageIT extends AccumuloRyaITBase {
         // Setup the PCJ storage that will be tested against.
         final Connector connector = super.getClusterInstance().getConnector();
         final String ryaInstanceName = super.getRyaInstanceName();
-        final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName);
-
-        // Create a PCJ.
-        final String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
 
-        // Delete the PCJ that was just created.
-        pcjStorage.dropPcj(pcjId);
+            // Delete the PCJ that was just created.
+            pcjStorage.dropPcj(pcjId);
 
-        // Ensure the Rya details have been updated to no longer include the PCJ's ID.
-        final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(connector, ryaInstanceName);
+            // Ensure the Rya details have been updated to no longer include the PCJ's ID.
+            final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(connector, ryaInstanceName);
 
-        final ImmutableMap<String, PCJDetails> detailsMap = detailsRepo.getRyaInstanceDetails()
-                .getPCJIndexDetails()
-                .getPCJDetails();
+            final ImmutableMap<String, PCJDetails> detailsMap = detailsRepo.getRyaInstanceDetails()
+                    .getPCJIndexDetails()
+                    .getPCJDetails();
 
-        assertFalse( detailsMap.containsKey(pcjId) );
+            assertFalse( detailsMap.containsKey(pcjId) );
+        }
     }
 
     @Test
@@ -113,27 +113,27 @@ public class AccumuloPcjStorageIT extends AccumuloRyaITBase {
         // Setup the PCJ storage that will be tested against.
         final Connector connector = super.getClusterInstance().getConnector();
         final String ryaInstanceName = super.getRyaInstanceName();
-        final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName);
-
-        // Create a few PCJs and hold onto their IDs.
-        final List<String> expectedIds = new ArrayList<>();
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a few PCJs and hold onto their IDs.
+            final List<String> expectedIds = new ArrayList<>();
 
-        String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
-        expectedIds.add( pcjId );
+            String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+            expectedIds.add( pcjId );
 
-        pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
-        expectedIds.add( pcjId );
+            pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+            expectedIds.add( pcjId );
 
-        pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
-        expectedIds.add( pcjId );
+            pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+            expectedIds.add( pcjId );
 
-        // Fetch the PCJ names
-        final List<String> pcjIds = pcjStorage.listPcjs();
+            // Fetch the PCJ names
+            final List<String> pcjIds = pcjStorage.listPcjs();
 
-        // Ensure the expected IDs match the fetched IDs.
-        Collections.sort(expectedIds);
-        Collections.sort(pcjIds);
-        assertEquals(expectedIds, pcjIds);
+            // Ensure the expected IDs match the fetched IDs.
+            Collections.sort(expectedIds);
+            Collections.sort(pcjIds);
+            assertEquals(expectedIds, pcjIds);
+        }
     }
 
     @Test
@@ -141,19 +141,19 @@ public class AccumuloPcjStorageIT extends AccumuloRyaITBase {
         // Setup the PCJ storage that will be tested against.
         final Connector connector = super.getClusterInstance().getConnector();
         final String ryaInstanceName = super.getRyaInstanceName();
-        final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName);
-
-        // Create a PCJ.
-        final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
-        final String pcjId = pcjStorage.createPcj(sparql);
-
-        // Fetch the PCJ's metadata.
-        final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
-
-        // Ensure it has the expected values.
-        final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
-        final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 0L, varOrders);
-        assertEquals(expectedMetadata, metadata);
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
+            final String pcjId = pcjStorage.createPcj(sparql);
+
+            // Fetch the PCJ's metadata.
+            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
+
+            // Ensure it has the expected values.
+            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
+            final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 0L, varOrders);
+            assertEquals(expectedMetadata, metadata);
+        }
     }
 
     @Test
@@ -161,112 +161,116 @@ public class AccumuloPcjStorageIT extends AccumuloRyaITBase {
         // Setup the PCJ storage that will be tested against.
         final Connector connector = super.getClusterInstance().getConnector();
         final String ryaInstanceName = super.getRyaInstanceName();
-        final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName);
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
+            final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Create a PCJ.
-        final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
-        final String pcjId = pcjStorage.createPcj(sparql);
+            // Add some binding sets to it.
+            final Set<VisibilityBindingSet> results = new HashSet<>();
 
-        // Add some binding sets to it.
-        final Set<VisibilityBindingSet> results = new HashSet<>();
+            final MapBindingSet aliceBS = new MapBindingSet();
+            aliceBS.addBinding("a", new URIImpl("http://Alice"));
+            aliceBS.addBinding("b", new URIImpl("http://Person"));
+            results.add( new VisibilityBindingSet(aliceBS, "") );
 
-        final MapBindingSet aliceBS = new MapBindingSet();
-        aliceBS.addBinding("a", new URIImpl("http://Alice"));
-        aliceBS.addBinding("b", new URIImpl("http://Person"));
-        results.add( new VisibilityBindingSet(aliceBS, "") );
+            final MapBindingSet charlieBS = new MapBindingSet();
+            charlieBS.addBinding("a", new URIImpl("http://Charlie"));
+            charlieBS.addBinding("b", new URIImpl("http://Comedian"));
+            results.add( new VisibilityBindingSet(charlieBS, "") );
 
-        final MapBindingSet charlieBS = new MapBindingSet();
-        charlieBS.addBinding("a", new URIImpl("http://Charlie"));
-        charlieBS.addBinding("b", new URIImpl("http://Comedian"));
-        results.add( new VisibilityBindingSet(charlieBS, "") );
+            pcjStorage.addResults(pcjId, results);
 
-        pcjStorage.addResults(pcjId, results);
+            // Make sure the PCJ metadata was updated.
+            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
 
-        // Make sure the PCJ metadata was updated.
-        final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
-
-        final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
-        final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 2L, varOrders);
-        assertEquals(expectedMetadata, metadata);
+            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
+            final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 2L, varOrders);
+            assertEquals(expectedMetadata, metadata);
+        }
     }
 
     @Test
-    public void listResults() throws AccumuloException, AccumuloSecurityException, PCJStorageException {
+    public void listResults() throws Exception {
         // Setup the PCJ storage that will be tested against.
         final Connector connector = super.getClusterInstance().getConnector();
         final String ryaInstanceName = super.getRyaInstanceName();
-        final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName);
-
-        // Create a PCJ.
-        final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
-        final String pcjId = pcjStorage.createPcj(sparql);
-
-        // Add some binding sets to it.
-        final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
-
-        final MapBindingSet aliceBS = new MapBindingSet();
-        aliceBS.addBinding("a", new URIImpl("http://Alice"));
-        aliceBS.addBinding("b", new URIImpl("http://Person"));
-        expectedResults.add( new VisibilityBindingSet(aliceBS, "") );
-
-        final MapBindingSet charlieBS = new MapBindingSet();
-        charlieBS.addBinding("a", new URIImpl("http://Charlie"));
-        charlieBS.addBinding("b", new URIImpl("http://Comedian"));
-        expectedResults.add( new VisibilityBindingSet(charlieBS, "") );
-
-        pcjStorage.addResults(pcjId, expectedResults);
-
-        // List the results that were stored.
-        final Set<BindingSet> results = new HashSet<>();
-        for(final BindingSet result : pcjStorage.listResults(pcjId)) {
-            results.add( result );
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
+            final String pcjId = pcjStorage.createPcj(sparql);
+
+            // Add some binding sets to it.
+            final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
+
+            final MapBindingSet aliceBS = new MapBindingSet();
+            aliceBS.addBinding("a", new URIImpl("http://Alice"));
+            aliceBS.addBinding("b", new URIImpl("http://Person"));
+            expectedResults.add( new VisibilityBindingSet(aliceBS, "") );
+
+            final MapBindingSet charlieBS = new MapBindingSet();
+            charlieBS.addBinding("a", new URIImpl("http://Charlie"));
+            charlieBS.addBinding("b", new URIImpl("http://Comedian"));
+            expectedResults.add( new VisibilityBindingSet(charlieBS, "") );
+
+            pcjStorage.addResults(pcjId, expectedResults);
+
+            // List the results that were stored.
+            final Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add( resultsIt.next() );
+                }
+            }
+
+            assertEquals(expectedResults, results);
         }
-
-        assertEquals(expectedResults, results);
     }
 
     @Test
-    public void purge() throws AccumuloException, AccumuloSecurityException, PCJStorageException, MalformedQueryException {
+    public void purge() throws Exception {
         // Setup the PCJ storage that will be tested against.
         final Connector connector = super.getClusterInstance().getConnector();
         final String ryaInstanceName = super.getRyaInstanceName();
-        final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName);
-
-        // Create a PCJ.
-        final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
-        final String pcjId = pcjStorage.createPcj(sparql);
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
+            final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Add some binding sets to it.
-        final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
+            // Add some binding sets to it.
+            final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
 
-        final MapBindingSet aliceBS = new MapBindingSet();
-        aliceBS.addBinding("a", new URIImpl("http://Alice"));
-        aliceBS.addBinding("b", new URIImpl("http://Person"));
-        expectedResults.add( new VisibilityBindingSet(aliceBS, "") );
+            final MapBindingSet aliceBS = new MapBindingSet();
+            aliceBS.addBinding("a", new URIImpl("http://Alice"));
+            aliceBS.addBinding("b", new URIImpl("http://Person"));
+            expectedResults.add( new VisibilityBindingSet(aliceBS, "") );
 
-        final MapBindingSet charlieBS = new MapBindingSet();
-        charlieBS.addBinding("a", new URIImpl("http://Charlie"));
-        charlieBS.addBinding("b", new URIImpl("http://Comedian"));
-        expectedResults.add( new VisibilityBindingSet(charlieBS, "") );
+            final MapBindingSet charlieBS = new MapBindingSet();
+            charlieBS.addBinding("a", new URIImpl("http://Charlie"));
+            charlieBS.addBinding("b", new URIImpl("http://Comedian"));
+            expectedResults.add( new VisibilityBindingSet(charlieBS, "") );
 
-        pcjStorage.addResults(pcjId, expectedResults);
+            pcjStorage.addResults(pcjId, expectedResults);
 
-        // Purge the PCJ.
-        pcjStorage.purge(pcjId);
+            // Purge the PCJ.
+            pcjStorage.purge(pcjId);
 
-        // List the results that were stored.
-        final Set<BindingSet> results = new HashSet<>();
-        for(final BindingSet result : pcjStorage.listResults(pcjId)) {
-            results.add( result );
-        }
+            // List the results that were stored.
+            final Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add( resultsIt.next() );
+                }
+            }
 
-        assertTrue( results.isEmpty() );
+            assertTrue( results.isEmpty() );
 
-        // Make sure the PCJ metadata was updated.
-        final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
+            // Make sure the PCJ metadata was updated.
+            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
 
-        final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
-        final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 0L, varOrders);
-        assertEquals(expectedMetadata, metadata);
+            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
+            final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 0L, varOrders);
+            assertEquals(expectedMetadata, metadata);
+        }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java b/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java
index 86f5b48..1de0813 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java
@@ -22,9 +22,9 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkNotNull;
 import static java.util.Objects.requireNonNull;
 
+import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.Transaction;
+import org.apache.log4j.Logger;
 import org.apache.rya.accumulo.AccumuloRdfConfiguration;
 import org.apache.rya.accumulo.query.AccumuloRyaQueryEngine;
 import org.apache.rya.api.domain.RyaStatement;
@@ -52,15 +53,14 @@ import org.apache.rya.indexing.pcj.fluo.app.query.StatementPatternMetadata;
 import org.apache.rya.indexing.pcj.storage.PcjException;
 import org.apache.rya.indexing.pcj.storage.PcjMetadata;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.calrissian.mango.collect.CloseableIterable;
 import org.openrdf.model.Resource;
 import org.openrdf.model.URI;
 import org.openrdf.model.Value;
 import org.openrdf.query.MalformedQueryException;
-import org.openrdf.query.QueryEvaluationException;
 import org.openrdf.query.algebra.StatementPattern;
 import org.openrdf.query.parser.ParsedQuery;
 import org.openrdf.query.parser.sparql.SPARQLParser;
-import org.openrdf.sail.SailException;
 
 import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
 import edu.umd.cs.findbugs.annotations.NonNull;
@@ -82,6 +82,7 @@ import edu.umd.cs.findbugs.annotations.NonNull;
  */
 @DefaultAnnotation(NonNull.class)
 public class CreatePcj {
+    private static final Logger log = Logger.getLogger(CreatePcj.class);
 
     /**
      * The default Statement Pattern batch insert size is 1000.
@@ -113,7 +114,51 @@ public class CreatePcj {
         this.spInsertBatchSize = spInsertBatchSize;
     }
 
-    
+    /**
+     * Tells the Fluo PCJ Updater application to maintain a new PCJ.
+     *
+     * @param pcjId - Identifies the PCJ that will be updated by the Fluo app. (not null)
+     * @param pcjStorage - Provides access to the PCJ index. (not null)
+     * @param fluo - A connection to the Fluo application that updates the PCJ index. (not null)
+     * @return The metadata that was written to the Fluo application for the PCJ.
+     * @throws MalformedQueryException The SPARQL query stored for the {@code pcjId} is malformed.
+     * @throws PcjException The PCJ Metadata for {@code pcjId} could not be read from {@code pcjStorage}.
+     */
+    public FluoQuery createPcj(
+            final String pcjId,
+            final PrecomputedJoinStorage pcjStorage,
+            final FluoClient fluo) throws MalformedQueryException, PcjException {
+        requireNonNull(pcjId);
+        requireNonNull(pcjStorage);
+        requireNonNull(fluo);
+
+        // Keeps track of the IDs that are assigned to each of the query's nodes in Fluo.
+        // We use these IDs later when scanning Rya for historic Statement Pattern matches
+        // as well as setting up automatic exports.
+        final NodeIds nodeIds = new NodeIds();
+
+        // Parse the query's structure for the metadata that will be written to fluo.
+        final PcjMetadata pcjMetadata = pcjStorage.getPcjMetadata(pcjId);
+        final String sparql = pcjMetadata.getSparql();
+        final ParsedQuery parsedQuery = new SPARQLParser().parseQuery(sparql, null);
+        final FluoQuery fluoQuery = new SparqlFluoQueryBuilder().make(parsedQuery, nodeIds);
+
+        try (Transaction tx = fluo.newTransaction()) {
+            // Write the query's structure to Fluo.
+            new FluoQueryMetadataDAO().write(tx, fluoQuery);
+
+            // The results of the query are eventually exported to an instance of Rya, so store the Rya ID for the PCJ.
+            final String queryId = fluoQuery.getQueryMetadata().getNodeId();
+            tx.set(queryId, FluoQueryColumns.RYA_PCJ_ID, pcjId);
+            tx.set(pcjId, FluoQueryColumns.PCJ_ID_QUERY_ID, queryId);
+
+            // Flush the changes to Fluo.
+            tx.commit();
+        }
+
+        return fluoQuery;
+    }
+
     /**
      * Tells the Fluo PCJ Updater application to maintain a new PCJ.
      * <p>
@@ -126,147 +171,115 @@ public class CreatePcj {
      * @param pcjStorage - Provides access to the PCJ index. (not null)
      * @param fluo - A connection to the Fluo application that updates the PCJ index. (not null)
      * @param queryEngine - QueryEngine for a given Rya Instance, (not null)
-     *
+     * @return The Fluo application's Query ID of the query that was created.
      * @throws MalformedQueryException The SPARQL query stored for the {@code pcjId} is malformed.
      * @throws PcjException The PCJ Metadata for {@code pcjId} could not be read from {@code pcjStorage}.
-     * @throws SailException Historic PCJ results could not be loaded because of a problem with {@code rya}.
-     * @throws QueryEvaluationException Historic PCJ results could not be loaded because of a problem with {@code rya}.
+     * @throws RyaDAOException Historic PCJ results could not be loaded because of a problem with {@code rya}.
      */
-    public String  withRyaIntegration(final String pcjId, final PrecomputedJoinStorage pcjStorage, final FluoClient fluo,
-            final Connector accumulo, String ryaInstance )
-                    throws MalformedQueryException, PcjException, SailException, QueryEvaluationException, RyaDAOException {
+    public String withRyaIntegration(
+            final String pcjId,
+            final PrecomputedJoinStorage pcjStorage,
+            final FluoClient fluo,
+            final Connector accumulo,
+            final String ryaInstance ) throws MalformedQueryException, PcjException, RyaDAOException {
         requireNonNull(pcjId);
         requireNonNull(pcjStorage);
         requireNonNull(fluo);
-		requireNonNull(accumulo);
-		requireNonNull(ryaInstance);
-		
-		//Create AccumuloRyaQueryEngine to query for historic results
-		AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
-		conf.setTablePrefix(ryaInstance);
-		conf.setAuths(getAuths(accumulo));
-		AccumuloRyaQueryEngine queryEngine = new AccumuloRyaQueryEngine(accumulo, conf);
-		
-
-		// Keeps track of the IDs that are assigned to each of the query's nodes
-		// in Fluo.
-		// We use these IDs later when scanning Rya for historic Statement
-		// Pattern matches
-		// as well as setting up automatic exports.
-		final NodeIds nodeIds = new NodeIds();
-
-		// Parse the query's structure for the metadata that will be written to
-		// fluo.
-		final PcjMetadata pcjMetadata = pcjStorage.getPcjMetadata(pcjId);
-		final String sparql = pcjMetadata.getSparql();
-		final ParsedQuery parsedQuery = new SPARQLParser().parseQuery(sparql, null);
-		final FluoQuery fluoQuery = new SparqlFluoQueryBuilder().make(parsedQuery, nodeIds);
+        requireNonNull(accumulo);
+        requireNonNull(ryaInstance);
+
+        // Write the SPARQL query's structure to the Fluo Application.
+        final FluoQuery fluoQuery = createPcj(pcjId, pcjStorage, fluo);
+
+        // Reuse the same set object while performing batch inserts.
+        final Set<RyaStatement> queryBatch = new HashSet<>();
+
+        // Iterate through each of the statement patterns and insert their historic matches into Fluo.
+        for (final StatementPatternMetadata patternMetadata : fluoQuery.getStatementPatternMetadata()) {
+            // Get an iterator over all of the binding sets that match the statement pattern.
+            final StatementPattern pattern = FluoStringConverter.toStatementPattern(patternMetadata.getStatementPattern());
+            queryBatch.add(spToRyaStatement(pattern));
+        }
+
+        //Create AccumuloRyaQueryEngine to query for historic results
+        final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
+        conf.setTablePrefix(ryaInstance);
+        conf.setAuths(getAuths(accumulo));
+
+        try(final AccumuloRyaQueryEngine queryEngine = new AccumuloRyaQueryEngine(accumulo, conf);
+                CloseableIterable<RyaStatement> queryIterable = queryEngine.query(new BatchRyaQuery(queryBatch))) {
+            final Set<RyaStatement> triplesBatch = new HashSet<>();
+
+            // Insert batches of the binding sets into Fluo.
+            for(final RyaStatement ryaStatement : queryIterable) {
+                if (triplesBatch.size() == spInsertBatchSize) {
+                    writeBatch(fluo, triplesBatch);
+                    triplesBatch.clear();
+                }
+
+                triplesBatch.add(ryaStatement);
+            }
+
+            if (!triplesBatch.isEmpty()) {
+                writeBatch(fluo, triplesBatch);
+                triplesBatch.clear();
+            }
+        } catch (final IOException e) {
+            log.warn("Ignoring IOException thrown while closing the AccumuloRyaQueryEngine used by CreatePCJ.", e);
+        }
 
         // return queryId to the caller for later monitoring from the export.
-        String queryId = null;
-
-		try (Transaction tx = fluo.newTransaction()) {
-			// Write the query's structure to Fluo.
-			new FluoQueryMetadataDAO().write(tx, fluoQuery);
-
-			// The results of the query are eventually exported to an instance
-			// of Rya, so store the Rya ID for the PCJ.
-            queryId = fluoQuery.getQueryMetadata().getNodeId();
-			tx.set(queryId, FluoQueryColumns.RYA_PCJ_ID, pcjId);
-			tx.set(pcjId, FluoQueryColumns.PCJ_ID_QUERY_ID, queryId);
-
-			// Flush the changes to Fluo.
-			tx.commit();
-		}
-
-		// Reuse the same set object while performing batch inserts.
-		final Set<RyaStatement> queryBatch = new HashSet<>();
-
-		// Iterate through each of the statement patterns and insert their
-		// historic matches into Fluo.
-		for (final StatementPatternMetadata patternMetadata : fluoQuery.getStatementPatternMetadata()) {
-			// Get an iterator over all of the binding sets that match the
-			// statement pattern.
-			final StatementPattern pattern = FluoStringConverter
-					.toStatementPattern(patternMetadata.getStatementPattern());
-			queryBatch.add(spToRyaStatement(pattern));
-		}
-
-		Iterator<RyaStatement> triples = queryEngine.query(new BatchRyaQuery(queryBatch)).iterator();
-		Set<RyaStatement> triplesBatch = new HashSet<>();
-
-		// Insert batches of the binding sets into Fluo.
-		while (triples.hasNext()) {
-			if (triplesBatch.size() == spInsertBatchSize) {
-				writeBatch(fluo, triplesBatch);
-				triplesBatch.clear();
-			}
-
-			triplesBatch.add(triples.next());
-		}
-
-		if (!triplesBatch.isEmpty()) {
-			writeBatch(fluo, triplesBatch);
-			triplesBatch.clear();
-		}
-        return queryId;
+        return fluoQuery.getQueryMetadata().getNodeId();
     }
-    
+
     private static void writeBatch(final FluoClient fluo, final Set<RyaStatement> batch) {
         checkNotNull(fluo);
         checkNotNull(batch);
-        
         new InsertTriples().insert(fluo, batch);
+    }
+
+    private static RyaStatement spToRyaStatement(final StatementPattern sp) {
+        final Value subjVal = sp.getSubjectVar().getValue();
+        final Value predVal = sp.getPredicateVar().getValue();
+        final Value objVal = sp.getObjectVar().getValue();
+
+        RyaURI subjURI = null;
+        RyaURI predURI = null;
+        RyaType objType = null;
 
+        if(subjVal != null) {
+            if(!(subjVal instanceof Resource)) {
+                throw new AssertionError("Subject must be a Resource.");
+            }
+            subjURI = RdfToRyaConversions.convertResource((Resource) subjVal);
+        }
+
+        if (predVal != null) {
+            if(!(predVal instanceof URI)) {
+                throw new AssertionError("Predicate must be a URI.");
+            }
+            predURI = RdfToRyaConversions.convertURI((URI) predVal);
+        }
+
+        if (objVal != null ) {
+            objType = RdfToRyaConversions.convertValue(objVal);
+        }
+
+        return new RyaStatement(subjURI, predURI, objType);
     }
-    
-    
-    private static RyaStatement spToRyaStatement(StatementPattern sp) {
-    
-    	Value subjVal = sp.getSubjectVar().getValue();
-    	Value predVal = sp.getPredicateVar().getValue();
-    	Value objVal = sp.getObjectVar().getValue();
-    	
-    	RyaURI subjURI = null;
-    	RyaURI predURI = null;
-    	RyaType objType = null;
-    	
-    	if(subjVal != null) {
-    		if(!(subjVal instanceof Resource)) {
-    			throw new AssertionError("Subject must be a Resource.");
-    		}
-    		subjURI = RdfToRyaConversions.convertResource((Resource) subjVal);
-    	}
-    	
-		if (predVal != null) {
-			if(!(predVal instanceof URI)) {
-    			throw new AssertionError("Predicate must be a URI.");
-    		}
-			predURI = RdfToRyaConversions.convertURI((URI) predVal);
-		}
-		
-		if (objVal != null ) {
-			objType = RdfToRyaConversions.convertValue(objVal);
-		}
-		
-    	return new RyaStatement(subjURI, predURI, objType);
+
+    private String[] getAuths(final Connector accumulo) {
+        Authorizations auths;
+        try {
+            auths = accumulo.securityOperations().getUserAuthorizations(accumulo.whoami());
+            final List<byte[]> authList = auths.getAuthorizations();
+            final String[] authArray = new String[authList.size()];
+            for(int i = 0; i < authList.size(); i++){
+                authArray[i] = new String(authList.get(i), "UTF-8");
+            }
+            return authArray;
+        } catch (AccumuloException | AccumuloSecurityException | UnsupportedEncodingException e) {
+            throw new RuntimeException("Cannot read authorizations for user: " + accumulo.whoami());
+        }
     }
-    
-    
-    private String[] getAuths(Connector accumulo) {
-   	 Authorizations auths;
-		try {
-			auths = accumulo.securityOperations().getUserAuthorizations(accumulo.whoami());
-			List<byte[]> authList = auths.getAuthorizations();
-	         String[] authArray = new String[authList.size()];
-	         for(int i = 0; i < authList.size(); i++){
-	         	authArray[i] = new String(authList.get(i), "UTF-8");
-	         }
-	         return authArray;
-		} catch (AccumuloException | AccumuloSecurityException | UnsupportedEncodingException e) {
-			throw new RuntimeException("Cannot read authorizations for user: " + accumulo.whoami());
-		}
-   }
-    
-    
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java b/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java
index 1d92262..c11f9fb 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java
@@ -25,10 +25,15 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.Transaction;
+import org.apache.fluo.api.client.scanner.CellScanner;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.RowColumnValue;
+import org.apache.fluo.api.data.Span;
 import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FilterMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
@@ -36,13 +41,8 @@ import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.QueryMetadata;
 import org.openrdf.query.BindingSet;
 
-import org.apache.fluo.api.client.FluoClient;
-import org.apache.fluo.api.client.Transaction;
-import org.apache.fluo.api.client.scanner.CellScanner;
-import org.apache.fluo.api.data.Bytes;
-import org.apache.fluo.api.data.Column;
-import org.apache.fluo.api.data.RowColumnValue;
-import org.apache.fluo.api.data.Span;
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
 
 /**
  * Deletes a Pre-computed Join (PCJ) from Fluo.
@@ -154,6 +154,12 @@ public class DeletePcj {
                 nodeIds.add(filterChild);
                 getChildNodeIds(tx, filterChild, nodeIds);
                 break;
+            case AGGREGATION:
+                final AggregationMetadata aggMeta = dao.readAggregationMetadata(tx, nodeId);
+                final String aggChild = aggMeta.getChildNodeId();
+                nodeIds.add(aggChild);
+                getChildNodeIds(tx, aggChild, nodeIds);
+                break;
             case STATEMENT_PATTERN:
                 break;
         }
@@ -254,7 +260,7 @@ public class DeletePcj {
 
         try(Transaction ntx = tx) {
           int count = 0;
-          Iterator<RowColumnValue> iter = scanner.iterator();
+          final Iterator<RowColumnValue> iter = scanner.iterator();
           while (iter.hasNext() && count < batchSize) {
             final Bytes row = iter.next().getRow();
             count++;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml b/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml
index 343713c..38fff95 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml
@@ -64,6 +64,14 @@ under the License.
             	</exclusion>
             </exclusions>
         </dependency>
+        <dependency>
+            <groupId>org.apache.fluo</groupId>
+            <artifactId>fluo-recipes-core</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.fluo</groupId>
+            <artifactId>fluo-recipes-accumulo</artifactId>
+        </dependency>
         
         <dependency>
           <groupId>org.apache.kafka</groupId>
@@ -81,7 +89,7 @@ under the License.
                 </exclusion>
             </exclusions>
         </dependency>
-                <dependency>
+        <dependency>
             <groupId>com.esotericsoftware</groupId>
             <artifactId>kryo</artifactId>
             <version>${kryo.version}</version>


[2/9] incubator-rya git commit: RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java
index f3f486c..747f6e5 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java
@@ -23,26 +23,29 @@ import static org.junit.Assert.assertEquals;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.Connector;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.api.domain.RyaStatement;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.rya.api.domain.RyaURI;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.junit.Test;
-import org.openrdf.model.impl.URIImpl;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
 import org.openrdf.query.BindingSet;
-import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.query.impl.MapBindingSet;
 
 import com.google.common.base.Optional;
 import com.google.common.collect.Sets;
 
 /**
  * Performs integration tests over the Fluo application geared towards Rya PCJ exporting.
- * <p>
- * These tests are being ignore so that they will not run as unit tests while building the application.
  */
-public class RyaExportIT extends ITBase {
+public class RyaExportIT extends RyaExportITBase {
 
     @Test
     public void resultsExported() throws Exception {
@@ -57,59 +60,69 @@ public class RyaExportIT extends ITBase {
                 "}";
 
         // Triples that will be streamed into Fluo after the PCJ has been created.
+        final ValueFactory vf = new ValueFactoryImpl();
         final Set<RyaStatement> streamedTriples = Sets.newHashSet(
-                makeRyaStatement("http://Alice", "http://talksTo", "http://Bob"),
-                makeRyaStatement("http://Bob", "http://livesIn", "http://London"),
-                makeRyaStatement("http://Bob", "http://worksAt", "http://Chipotle"),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"), new RyaURI("http://Bob")),
+                new RyaStatement(new RyaURI("http://Bob"), new RyaURI("http://livesIn"), new RyaURI("http://London")),
+                new RyaStatement(new RyaURI("http://Bob"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")),
 
-                makeRyaStatement("http://Alice", "http://talksTo", "http://Charlie"),
-                makeRyaStatement("http://Charlie", "http://livesIn", "http://London"),
-                makeRyaStatement("http://Charlie", "http://worksAt", "http://Chipotle"),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"), new RyaURI("http://Charlie")),
+                new RyaStatement(new RyaURI("http://Charlie"), new RyaURI("http://livesIn"), new RyaURI("http://London")),
+                new RyaStatement(new RyaURI("http://Charlie"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")),
 
-                makeRyaStatement("http://Alice", "http://talksTo", "http://David"),
-                makeRyaStatement("http://David", "http://livesIn", "http://London"),
-                makeRyaStatement("http://David", "http://worksAt", "http://Chipotle"),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"), new RyaURI("http://David")),
+                new RyaStatement(new RyaURI("http://David"), new RyaURI("http://livesIn"), new RyaURI("http://London")),
+                new RyaStatement(new RyaURI("http://David"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")),
 
-                makeRyaStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeRyaStatement("http://Eve", "http://livesIn", "http://Leeds"),
-                makeRyaStatement("http://Eve", "http://worksAt", "http://Chipotle"),
+                new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"), new RyaURI("http://Eve")),
+                new RyaStatement(new RyaURI("http://Eve"), new RyaURI("http://livesIn"), new RyaURI("http://Leeds")),
+                new RyaStatement(new RyaURI("http://Eve"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")),
 
-                makeRyaStatement("http://Frank", "http://talksTo", "http://Alice"),
-                makeRyaStatement("http://Frank", "http://livesIn", "http://London"),
-                makeRyaStatement("http://Frank", "http://worksAt", "http://Chipotle"));
+                new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://talksTo"), new RyaURI("http://Alice")),
+                new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://livesIn"), new RyaURI("http://London")),
+                new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://worksAt"), new RyaURI("http://Chipotle")));
 
         // The expected results of the SPARQL query once the PCJ has been computed.
         final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Bob")),
-                new BindingImpl("city", new URIImpl("http://London"))));
-        expected.add(makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Charlie")),
-                new BindingImpl("city", new URIImpl("http://London"))));
-        expected.add(makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://David")),
-                new BindingImpl("city", new URIImpl("http://London"))));
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("customer", vf.createURI("http://Alice"));
+        bs.addBinding("worker", vf.createURI("http://Bob"));
+        bs.addBinding("city", vf.createURI("http://London"));
+        expected.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("customer", vf.createURI("http://Alice"));
+        bs.addBinding("worker", vf.createURI("http://Charlie"));
+        bs.addBinding("city", vf.createURI("http://London"));
+        expected.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("customer", vf.createURI("http://Alice"));
+        bs.addBinding("worker", vf.createURI("http://David"));
+        bs.addBinding("city", vf.createURI("http://London"));
+        expected.add(bs);
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
 
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+            // Stream the data into Fluo.
+            new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
 
-        // Fetch the exported results from Accumulo once the observers finish working.
-        fluo.waitForObservers();
+            // Fetch the exported results from Accumulo once the observers finish working.
+            super.getMiniFluo().waitForObservers();
 
-        // Fetch expected results from the PCJ table that is in Accumulo.
-        final Set<BindingSet> results = Sets.newHashSet( pcjStorage.listResults(pcjId) );
+            // Fetch expected results from the PCJ table that is in Accumulo.
+            final Set<BindingSet> results = Sets.newHashSet( pcjStorage.listResults(pcjId) );
 
-        // Verify the end results of the query match the expected results.
-        assertEquals(expected, results);
+            // Verify the end results of the query match the expected results.
+            assertEquals(expected, results);
+        }
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java
index fd70a19..3c74b13 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java
@@ -23,86 +23,104 @@ import static org.junit.Assert.assertEquals;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.Connector;
 import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.accumulo.AccumuloRyaDAO;
 import org.apache.rya.indexing.external.PrecomputedJoinIndexer;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.apache.rya.indexing.pcj.update.PrecomputedJoinUpdater;
 import org.junit.Test;
 import org.openrdf.model.Statement;
-import org.openrdf.model.impl.URIImpl;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
 import org.openrdf.query.BindingSet;
-import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.query.impl.MapBindingSet;
 import org.openrdf.repository.RepositoryConnection;
+import org.openrdf.repository.sail.SailRepositoryConnection;
 
 import com.google.common.collect.Sets;
 
-
 /**
- * This test ensures that the correct updates are pushed by Fluo
- * to the external PCJ table as triples are added to Rya through
- * the {@link RepositoryConnection}.  The key difference between these
- * tests and those in {@link InputIT} is that streaming triples are added through
- * the RepositoryConnection and not through the {@link FluoClient}.  These tests are
- * designed to verify that the {@link AccumuloRyaDAO} has been integrated
- * with the {@link PrecomputedJoinIndexer} and that the associated {@link PrecomputedJoinUpdater} updates
- * Fluo accordingly.
- *
+ * This test ensures that the correct updates are pushed by Fluo to the external PCJ table as triples are added to Rya
+ * through the {@link RepositoryConnection}.  The key difference between these tests and those in {@link InputIT} is
+ * that streaming triples are added through the RepositoryConnection and not through the {@link FluoClient}.  These
+ * tests are designed to verify that the {@link AccumuloRyaDAO} has been integrated with the {@link PrecomputedJoinIndexer}
+ * and that the associated {@link PrecomputedJoinUpdater} updates Fluo accordingly.
  */
-
-public class RyaInputIncrementalUpdateIT extends ITBase {
+public class RyaInputIncrementalUpdateIT extends RyaExportITBase {
 
     /**
      * Ensure historic matches are included in the result.
      */
     @Test
     public void streamResultsThroughRya() throws Exception {
-
         // A query that finds people who talk to Eve and work at Chipotle.
-        final String sparql = "SELECT ?x " + "WHERE { " + "?x <http://talksTo> <http://Eve>. "
-                + "?x <http://worksAt> <http://Chipotle>." + "}";
+        final String sparql =
+                "SELECT ?x " + "WHERE { " +
+                    "?x <http://talksTo> <http://Eve>. " +
+                    "?x <http://worksAt> <http://Chipotle>." +
+                "}";
 
         // Triples that are loaded into Rya before the PCJ is created.
+        final ValueFactory vf = new ValueFactoryImpl();
         final Set<Statement> historicTriples = Sets.newHashSet(
-                makeStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeStatement("http://Bob", "http://talksTo", "http://Eve"),
-                makeStatement("http://Charlie", "http://talksTo", "http://Eve"),
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
 
-                makeStatement("http://Eve", "http://helps", "http://Kevin"),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://helps"), vf.createURI("http://Kevin")),
 
-                makeStatement("http://Bob", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://Charlie", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://Eve", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://David", "http://worksAt", "http://Chipotle"));
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
 
         // The expected results of the SPARQL query once the PCJ has been
         // computed.
         final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Bob"))));
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Charlie"))));
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("x", vf.createURI("http://Bob"));
+        expected.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("x", vf.createURI("http://Charlie"));
+        expected.add(bs);
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
 
-        // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
+            // Verify the end results of the query match the expected results.
+            super.getMiniFluo().waitForObservers();
 
-        // Load the historic data into Rya.
-        for (final Statement triple : historicTriples) {
-            ryaConn.add(triple);
-        }
+            // Load the historic data into Rya.
+            final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
+            for (final Statement triple : historicTriples) {
+                ryaConn.add(triple);
+            }
+
+            super.getMiniFluo().waitForObservers();
+
+            final Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultIt = pcjStorage.listResults(pcjId)) {
+                while(resultIt.hasNext()) {
+                    results.add( resultIt.next() );
+                }
+            }
 
-        fluo.waitForObservers();
-        
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
+            assertEquals(expected, results);
+        }
     }
 
     /**
@@ -115,97 +133,146 @@ public class RyaInputIncrementalUpdateIT extends ITBase {
     @Test
     public void historicThenStreamedResults() throws Exception {
         // A query that finds people who talk to Eve and work at Chipotle.
-        final String sparql = "SELECT ?x " + "WHERE { " + "?x <http://talksTo> <http://Eve>. "
-                + "?x <http://worksAt> <http://Chipotle>." + "}";
+        final String sparql =
+                "SELECT ?x " + "WHERE { " +
+                    "?x <http://talksTo> <http://Eve>. " +
+                    "?x <http://worksAt> <http://Chipotle>." +
+                "}";
 
         // Triples that are loaded into Rya before the PCJ is created.
+        final ValueFactory vf = new ValueFactoryImpl();
         final Set<Statement> historicTriples = Sets.newHashSet(
-                makeStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeStatement("http://Alice", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://Joe", "http://worksAt", "http://Chipotle"));
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://Joe"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
 
         // Triples that will be streamed into Fluo after the PCJ has been
         final Set<Statement> streamedTriples = Sets.newHashSet(
-                makeStatement("http://Frank", "http://talksTo", "http://Eve"),
-                makeStatement("http://Joe", "http://talksTo", "http://Eve"),
-                makeStatement("http://Frank", "http://worksAt", "http://Chipotle"));
+                vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Joe"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
 
         // Load the historic data into Rya.
+        final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
         for (final Statement triple : historicTriples) {
             ryaConn.add(triple);
         }
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
 
-        fluo.waitForObservers();
+            super.getMiniFluo().waitForObservers();
 
-        // Load the streaming data into Rya.
-        for (final Statement triple : streamedTriples) {
-            ryaConn.add(triple);
-        }
+            // Load the streaming data into Rya.
+            for (final Statement triple : streamedTriples) {
+                ryaConn.add(triple);
+            }
 
-        // Ensure Alice is a match.
-        fluo.waitForObservers();
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Alice"))));
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Frank"))));
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Joe"))));
+            // Ensure Alice is a match.
+            super.getMiniFluo().waitForObservers();
 
-        Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
+            final Set<BindingSet> expected = new HashSet<>();
+            MapBindingSet bs = new MapBindingSet();
+            bs.addBinding("x", vf.createURI("http://Alice"));
+            expected.add(bs);
+
+            bs = new MapBindingSet();
+            bs.addBinding("x", vf.createURI("http://Frank"));
+            expected.add(bs);
+
+            bs = new MapBindingSet();
+            bs.addBinding("x", vf.createURI("http://Joe"));
+            expected.add(bs);
+
+            final Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultIt = pcjStorage.listResults(pcjId)) {
+                while(resultIt.hasNext()) {
+                    results.add( resultIt.next() );
+                }
+            }
+
+            assertEquals(expected, results);
+        }
     }
 
     @Test
     public void historicAndStreamMultiVariables() throws Exception {
-        // A query that finds people who talk to Eve and work at Chipotle.
-        // A query that finds people who talk to Eve and work at Chipotle.
-        final String sparql = "SELECT ?x ?y " + "WHERE { " + "?x <http://talksTo> ?y. "
-                + "?x <http://worksAt> <http://Chipotle>." + "}";
+        // A query that finds people who talk to other people and work at Chipotle.
+        final String sparql =
+                "SELECT ?x ?y " + "WHERE { " +
+                    "?x <http://talksTo> ?y. " +
+                    "?x <http://worksAt> <http://Chipotle>." +
+                 "}";
 
         // Triples that are loaded into Rya before the PCJ is created.
+        final ValueFactory vf = new ValueFactoryImpl();
         final Set<Statement> historicTriples = Sets.newHashSet(
-                makeStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeStatement("http://Alice", "http://worksAt", "http://Chipotle"),
-                makeStatement("http://Joe", "http://worksAt", "http://Chipotle"));
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                vf.createStatement(vf.createURI("http://Joe"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
 
         // Triples that will be streamed into Fluo after the PCJ has been
         final Set<Statement> streamedTriples = Sets.newHashSet(
-                makeStatement("http://Frank", "http://talksTo", "http://Betty"),
-                makeStatement("http://Joe", "http://talksTo", "http://Alice"),
-                makeStatement("http://Frank", "http://worksAt", "http://Chipotle"));
+                vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://talksTo"), vf.createURI("http://Betty")),
+                vf.createStatement(vf.createURI("http://Joe"), vf.createURI("http://talksTo"), vf.createURI("http://Alice")),
+                vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
 
         // Load the historic data into Rya.
+        final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
         for (final Statement triple : historicTriples) {
             ryaConn.add(triple);
         }
 
         // Create the PCJ table.
+        final Connector accumuloConn = super.getAccumuloConnector();
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Tell the Fluo app to maintain the PCJ.
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
 
-        fluo.waitForObservers();
+            super.getMiniFluo().waitForObservers();
 
-        // Load the streaming data into Rya.
-        for (final Statement triple : streamedTriples) {
-            ryaConn.add(triple);
-        }
+            // Load the streaming data into Rya.
+            for (final Statement triple : streamedTriples) {
+                ryaConn.add(triple);
+            }
 
-        // Ensure Alice is a match.
-        fluo.waitForObservers();
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Alice")), new BindingImpl("y", new URIImpl("http://Eve"))));
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Frank")), new BindingImpl("y", new URIImpl("http://Betty"))));
-        expected.add(makeBindingSet(new BindingImpl("x", new URIImpl("http://Joe")), new BindingImpl("y", new URIImpl("http://Alice"))));
+            // Ensure Alice is a match.
+            super.getMiniFluo().waitForObservers();
+
+            final Set<BindingSet> expected = new HashSet<>();
+
+            MapBindingSet bs = new MapBindingSet();
+            bs.addBinding("x", vf.createURI("http://Alice"));
+            bs.addBinding("y", vf.createURI("http://Eve"));
+            expected.add(bs);
 
-        Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected, results);
+            bs = new MapBindingSet();
+            bs.addBinding("x", vf.createURI("http://Frank"));
+            bs.addBinding("y", vf.createURI("http://Betty"));
+            expected.add(bs);
+
+            bs = new MapBindingSet();
+            bs.addBinding("x", vf.createURI("http://Joe"));
+            bs.addBinding("y", vf.createURI("http://Alice"));
+            expected.add(bs);
+
+            final Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultIt = pcjStorage.listResults(pcjId)) {
+                while(resultIt.hasNext()) {
+                    results.add( resultIt.next() );
+                }
+            }
+
+            assertEquals(expected, results);
+        }
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java
index 092e8a9..52d6caa 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java
@@ -18,24 +18,21 @@
  */
 package org.apache.rya.indexing.pcj.fluo.integration;
 
+import static org.junit.Assert.assertEquals;
+
 import java.util.HashSet;
 import java.util.Set;
 import java.util.UUID;
 
-import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
 import org.apache.log4j.Logger;
-import org.apache.rya.accumulo.AccumuloRdfConfiguration;
-import org.apache.rya.api.RdfCloudTripleStoreConfiguration;
-import org.apache.rya.indexing.accumulo.ConfigUtils;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
-import org.apache.rya.indexing.pcj.storage.accumulo.PcjTables;
-import org.apache.rya.sail.config.RyaSailFactory;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
 import org.openrdf.model.Resource;
 import org.openrdf.model.Statement;
@@ -43,95 +40,58 @@ import org.openrdf.model.impl.LiteralImpl;
 import org.openrdf.model.impl.StatementImpl;
 import org.openrdf.model.impl.URIImpl;
 import org.openrdf.query.BindingSet;
-import org.openrdf.repository.RepositoryException;
-import org.openrdf.repository.sail.SailRepository;
-import org.openrdf.repository.sail.SailRepositoryConnection;
-import org.openrdf.sail.Sail;
-import org.openrdf.sail.SailException;
 
+public class StreamingTestIT extends RyaExportITBase {
 
-public class StreamingTestIT extends ITBase {
+	private static final Logger log = Logger.getLogger(StreamingTestIT.class);
 
-	private static final Logger log = Logger.getLogger(ITBase.class);
-	private static String query = "select ?name ?uuid where {   ?uuid <http://pred1> ?name ; <http://pred2> \"literal\".}";
-	private static String uuidPrefix = "http://uuid_";
-	private static String name = "number_";
-	private static String pred1 = "http://pred1";
-	private static String pred2 = "http://pred2";
-	
-	private PcjTables pcjTables = new PcjTables();
-	private String pcjTableName;
-	
-	private Sail sail;
-	private SailRepository repo;
-	private SailRepositoryConnection conn;
-	
-	
-	@Before
-	public void init() throws Exception {
-		AccumuloRdfConfiguration conf = makeConfig(instanceName, zookeepers);
-		conf.set(RdfCloudTripleStoreConfiguration.CONF_QUERY_AUTH, "U");
-		conf.set(RdfCloudTripleStoreConfiguration.CONF_CV, "U");
-		accumuloConn.securityOperations().changeUserAuthorizations("root", new Authorizations("U"));
-		sail =  RyaSailFactory.getInstance(conf);
-		repo = new SailRepository(sail);
-		conn = repo.getConnection();
-	}
-	
-	@After
-	public void close() throws RepositoryException, SailException {
-		conn.close();
-		repo.shutDown();
-		sail.shutDown();
-	}
-	
-	
 	@Test
 	public void testRandomStreamingIngest() throws Exception {
-		
-		pcjTableName = createPcj(query);
-		log.info("Adding Join Pairs...");
-		addRandomQueryStatementPairs(100);
-		Assert.assertEquals(100, countPcjs());
-		
-	}
-	
-	private String createPcj(String pcj) throws Exception {
-		accumuloConn.securityOperations().changeUserAuthorizations("root", new Authorizations("U"));
-	    // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(pcj);
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-		String tableName = RYA_INSTANCE_NAME + "INDEX_" + pcjId;
-		
-		return tableName;
+	    final String sparql =
+	            "select ?name ?uuid where { " +
+                    "?uuid <http://pred1> ?name ; "  +
+                    "<http://pred2> \"literal\"." +
+                "}";
+
+	    try (FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+	        // Create the PCJ table.
+	        final Connector accumuloConn = super.getAccumuloConnector();
+	        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
+	        final String pcjId = pcjStorage.createPcj(sparql);
+
+	        // Task the Fluo app with the PCJ.
+	        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+
+	        // Add Statements to the Fluo app.
+	        log.info("Adding Join Pairs...");
+	        addRandomQueryStatementPairs(100);
+
+	        super.getMiniFluo().waitForObservers();
+
+	        int resultCount = 0;
+	        try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+	            while(resultsIt.hasNext()) {
+	                resultCount++;
+	                resultsIt.next();
+	            }
+	        }
+
+	        // Show the correct number of Binding Sets were created for the PCJ.
+	        assertEquals(100, resultCount);
+	    }
 	}
-	
-	private void addRandomQueryStatementPairs(int numPairs) throws Exception {
 
-		Set<Statement> statementPairs = new HashSet<>();
+	private void addRandomQueryStatementPairs(final int numPairs) throws Exception {
+		final Set<Statement> statementPairs = new HashSet<>();
 		for (int i = 0; i < numPairs; i++) {
-			String uri = uuidPrefix + UUID.randomUUID().toString();
-			Statement statement1 = new StatementImpl(new URIImpl(uri), new URIImpl(pred1),
-					new LiteralImpl(name + (i + 1)));
-			Statement statement2 = new StatementImpl(new URIImpl(uri), new URIImpl(pred2), new LiteralImpl("literal"));
+			final String uri = "http://uuid_" + UUID.randomUUID().toString();
+			final Statement statement1 = new StatementImpl(new URIImpl(uri), new URIImpl("http://pred1"),
+					new LiteralImpl("number_" + (i + 1)));
+			final Statement statement2 = new StatementImpl(new URIImpl(uri), new URIImpl("http://pred2"), new LiteralImpl("literal"));
 			statementPairs.add(statement1);
 			statementPairs.add(statement2);
 		}
-		conn.add(statementPairs, new Resource[0]);
-		fluo.waitForObservers();
-	}
-	
-	private int countPcjs() throws Exception {
-		Iterable<BindingSet> bindingsets = pcjTables.listResults(accumuloConn, pcjTableName, new Authorizations("U"));
-		int count = 0;
-		for (BindingSet bs : bindingsets) {
-//			System.out.println(bs);
-			count++;
-		}
-//		IncUpdateDAO.printAll(fluoClient);
-		return count;
+		super.getRyaSailRepository().getConnection().add(statementPairs, new Resource[0]);
+		super.getMiniFluo().waitForObservers();
 	}
-	
-	
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java
index 6f4596f..30b6842 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java
@@ -22,23 +22,27 @@ import java.io.UnsupportedEncodingException;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.accumulo.AccumuloRdfConfiguration;
 import org.apache.rya.accumulo.AccumuloRyaDAO;
 import org.apache.rya.api.RdfCloudTripleStoreConfiguration;
 import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.api.resolver.RdfToRyaConversions;
 import org.apache.rya.indexing.accumulo.ConfigUtils;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.junit.Assert;
 import org.junit.Test;
 import org.openrdf.model.Statement;
-import org.openrdf.model.impl.URIImpl;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
 import org.openrdf.query.BindingSet;
-import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.query.impl.MapBindingSet;
 
 import com.google.common.collect.Sets;
 
@@ -47,7 +51,7 @@ import com.google.common.collect.Sets;
  * <p>
  * These tests are being ignore so that they will not run as unit tests while building the application.
  */
-public class HistoricStreamingVisibilityIT extends ITBase {
+public class HistoricStreamingVisibilityIT extends RyaExportITBase {
 
     /**
      * Ensure historic matches are included in the result.
@@ -61,70 +65,74 @@ public class HistoricStreamingVisibilityIT extends ITBase {
                 "?x <http://talksTo> <http://Eve>. " +
                 "?x <http://worksAt> <http://Chipotle>." +
               "}";
-        
+
+        final Connector accumuloConn = super.getAccumuloConnector();
         accumuloConn.securityOperations().changeUserAuthorizations(ACCUMULO_USER, new Authorizations("U","V","W"));
-        AccumuloRyaDAO dao = new AccumuloRyaDAO();
+        final AccumuloRyaDAO dao = new AccumuloRyaDAO();
         dao.setConnector(accumuloConn);
         dao.setConf(makeConfig());
         dao.init();
 
         // Triples that are loaded into Rya before the PCJ is created.
+        final ValueFactory vf = new ValueFactoryImpl();
+
         final Set<RyaStatement> historicTriples = Sets.newHashSet(
-                makeRyaStatement(makeStatement("http://Alice", "http://talksTo", "http://Eve"),"U"),
-                makeRyaStatement(makeStatement("http://Bob", "http://talksTo", "http://Eve"),"V"),
-                makeRyaStatement(makeStatement("http://Charlie", "http://talksTo", "http://Eve"),"W"),
+                makeRyaStatement(vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),"U"),
+                makeRyaStatement(vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),"V"),
+                makeRyaStatement(vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),"W"),
 
-                makeRyaStatement(makeStatement("http://Eve", "http://helps", "http://Kevin"), "U"),
+                makeRyaStatement(vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://helps"), vf.createURI("http://Kevin")), "U"),
 
-                makeRyaStatement(makeStatement("http://Bob", "http://worksAt", "http://Chipotle"), "W"),
-                makeRyaStatement(makeStatement("http://Charlie", "http://worksAt", "http://Chipotle"), "V"),
-                makeRyaStatement(makeStatement("http://Eve", "http://worksAt", "http://Chipotle"), "U"),
-                makeRyaStatement(makeStatement("http://David", "http://worksAt", "http://Chipotle"), "V"));
+                makeRyaStatement(vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")), "W"),
+                makeRyaStatement(vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")), "V"),
+                makeRyaStatement(vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")), "U"),
+                makeRyaStatement(vf.createStatement(vf.createURI("http://David"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")), "V"));
 
         dao.add(historicTriples.iterator());
         dao.flush();
-        
+
         // The expected results of the SPARQL query once the PCJ has been computed.
         final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(
-                new BindingImpl("x", new URIImpl("http://Bob"))));
-        expected.add(makeBindingSet(
-                new BindingImpl("x", new URIImpl("http://Charlie"))));
-        
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("x", vf.createURI("http://Bob"));
+        expected.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("x", vf.createURI("http://Charlie"));
+        expected.add(bs);
+
         // Create the PCJ table.
         final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = pcjStorage.createPcj(sparql);
 
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+        }
 
         // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        Set<BindingSet> results = Sets.newHashSet(pcjStorage.listResults(pcjId));
+        super.getMiniFluo().waitForObservers();
+
+        final Set<BindingSet> results = Sets.newHashSet(pcjStorage.listResults(pcjId));
         Assert.assertEquals(expected, results);
     }
-    
-    
+
     private AccumuloRdfConfiguration makeConfig() {
         final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
         conf.setTablePrefix(RYA_INSTANCE_NAME);
         // Accumulo connection information.
         conf.set(ConfigUtils.CLOUDBASE_USER, ACCUMULO_USER);
         conf.set(ConfigUtils.CLOUDBASE_PASSWORD, ACCUMULO_PASSWORD);
-        conf.set(ConfigUtils.CLOUDBASE_INSTANCE, instanceName);
-        conf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, zookeepers);
+        conf.set(ConfigUtils.CLOUDBASE_INSTANCE, super.getMiniAccumuloCluster().getInstanceName());
+        conf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, super.getMiniAccumuloCluster().getZooKeepers());
         conf.set(RdfCloudTripleStoreConfiguration.CONF_QUERY_AUTH, "U,V,W");
 
         return conf;
     }
-    
-    
-    private static RyaStatement makeRyaStatement(Statement statement, String visibility) throws UnsupportedEncodingException {
-    	
-    	RyaStatement ryaStatement = RdfToRyaConversions.convertStatement(statement);
+
+    private static RyaStatement makeRyaStatement(final Statement statement, final String visibility) throws UnsupportedEncodingException {
+    	final RyaStatement ryaStatement = RdfToRyaConversions.convertStatement(statement);
     	ryaStatement.setColumnVisibility(visibility.getBytes("UTF-8"));
     	return ryaStatement;
-    	
     }
-
-
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java
index ccc2c20..e7ced90 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java
@@ -37,6 +37,9 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.rya.accumulo.AccumuloRdfConfiguration;
 import org.apache.rya.api.RdfCloudTripleStoreConfiguration;
@@ -44,11 +47,13 @@ import org.apache.rya.api.client.RyaClient;
 import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
 import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
 import org.apache.rya.api.domain.RyaStatement;
+import org.apache.rya.api.domain.RyaURI;
 import org.apache.rya.indexing.accumulo.ConfigUtils;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.PcjTableNameFactory;
 import org.apache.rya.rdftriplestore.RyaSailRepository;
@@ -56,10 +61,9 @@ import org.apache.rya.sail.config.RyaSailFactory;
 import org.junit.Test;
 import org.openrdf.model.URI;
 import org.openrdf.model.ValueFactory;
-import org.openrdf.model.impl.URIImpl;
 import org.openrdf.model.impl.ValueFactoryImpl;
 import org.openrdf.query.BindingSet;
-import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.query.impl.MapBindingSet;
 import org.openrdf.repository.RepositoryConnection;
 import org.openrdf.sail.Sail;
 
@@ -70,7 +74,7 @@ import com.google.common.base.Optional;
  * Integration tests that ensure the Fluo Application properly exports PCJ
  * results with the correct Visibility values.
  */
-public class PcjVisibilityIT extends ITBase {
+public class PcjVisibilityIT extends RyaExportITBase {
 
     private static final ValueFactory VF = new ValueFactoryImpl();
 
@@ -94,6 +98,10 @@ public class PcjVisibilityIT extends ITBase {
                   "?worker <" + WORKS_AT + "> <" + BURGER_JOINT + ">. " +
                 "}";
 
+        final Connector accumuloConn = super.getAccumuloConnector();
+        final String instanceName = super.getMiniAccumuloCluster().getInstanceName();
+        final String zookeepers = super.getMiniAccumuloCluster().getZooKeepers();
+
         final RyaClient ryaClient = AccumuloRyaClientFactory.build(new AccumuloConnectionDetails(
                 ACCUMULO_USER,
                 ACCUMULO_PASSWORD.toCharArray(),
@@ -103,7 +111,7 @@ public class PcjVisibilityIT extends ITBase {
         final String pcjId = ryaClient.getCreatePCJ().createPCJ(RYA_INSTANCE_NAME, sparql);
 
         // Grant the root user the "u" authorization.
-        accumuloConn.securityOperations().changeUserAuthorizations(ACCUMULO_USER, new Authorizations("u"));
+        super.getAccumuloConnector().securityOperations().changeUserAuthorizations(ACCUMULO_USER, new Authorizations("u"));
 
         // Setup a connection to the Rya instance that uses the "u" authorizations. This ensures
         // any statements that are inserted will have the "u" authorization on them and that the
@@ -127,7 +135,7 @@ public class PcjVisibilityIT extends ITBase {
             ryaConn.add(VF.createStatement(BOB, WORKS_AT, BURGER_JOINT));
 
             // Wait for Fluo to finish processing.
-            fluo.waitForObservers();
+            super.getMiniFluo().waitForObservers();
 
             // Fetch the exported result and show that its column visibility has been simplified.
             final String pcjTableName = new PcjTableNameFactory().makeTableName(RYA_INSTANCE_NAME, pcjId);
@@ -171,41 +179,46 @@ public class PcjVisibilityIT extends ITBase {
 
         // Triples that will be streamed into Fluo after the PCJ has been created.
         final Map<RyaStatement, String> streamedTriples = new HashMap<>();
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Alice", "http://talksTo", "http://Bob"), "A&B");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Bob", "http://livesIn", "http://London"), "A");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Bob", "http://worksAt", "http://Chipotle"), "B");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"),new RyaURI("http://Bob")), "A&B");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Bob"), new RyaURI("http://livesIn"),new RyaURI("http://London")), "A");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Bob"), new RyaURI("http://worksAt"),new RyaURI("http://Chipotle")), "B");
+
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"),new RyaURI("http://Charlie")), "B&C");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Charlie"), new RyaURI("http://livesIn"),new RyaURI("http://London")), "B");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Charlie"), new RyaURI("http://worksAt"),new RyaURI("http://Chipotle")), "C");
 
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Alice", "http://talksTo", "http://Charlie"), "B&C");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Charlie", "http://livesIn", "http://London"), "B");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Charlie", "http://worksAt", "http://Chipotle"), "C");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"),new RyaURI("http://David")), "C&D");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://David"), new RyaURI("http://livesIn"),new RyaURI("http://London")), "C");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://David"), new RyaURI("http://worksAt"),new RyaURI("http://Chipotle")), "D");
 
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Alice", "http://talksTo", "http://David"), "C&D");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://David", "http://livesIn", "http://London"), "C");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://David", "http://worksAt", "http://Chipotle"), "D");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Alice"), new RyaURI("http://talksTo"),new RyaURI("http://Eve")), "D&E");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Eve"), new RyaURI("http://livesIn"),new RyaURI("http://Leeds")), "D");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Eve"), new RyaURI("http://worksAt"),new RyaURI("http://Chipotle")), "E");
 
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Alice", "http://talksTo", "http://Eve"), "D&E");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Eve", "http://livesIn", "http://Leeds"), "D");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Eve", "http://worksAt", "http://Chipotle"), "E");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://talksTo"),new RyaURI("http://Alice")), "");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://livesIn"),new RyaURI("http://London")), "");
+        addStatementVisibilityEntry(streamedTriples, new RyaStatement(new RyaURI("http://Frank"), new RyaURI("http://worksAt"),new RyaURI("http://Chipotle")), "");
 
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Frank", "http://talksTo", "http://Alice"), "");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Frank", "http://livesIn", "http://London"), "");
-        addStatementVisibilityEntry(streamedTriples, makeRyaStatement("http://Frank", "http://worksAt", "http://Chipotle"), "");
+        final Connector accumuloConn = super.getAccumuloConnector();
 
         // Create the PCJ Table in Accumulo.
         final PrecomputedJoinStorage rootStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
         final String pcjId = rootStorage.createPcj(sparql);
 
-        // Create the PCJ in Fluo.
-        new CreatePcj().withRyaIntegration(pcjId, rootStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
 
-        // Stream the data into Fluo.
-        for(final RyaStatement statement : streamedTriples.keySet()) {
-            final Optional<String> visibility = Optional.of(streamedTriples.get(statement));
-            new InsertTriples().insert(fluoClient, statement, visibility);
+        try( final FluoClient fluoClient = FluoFactory.newClient( super.getFluoConfiguration() )) {
+            // Create the PCJ in Fluo.
+            new CreatePcj().withRyaIntegration(pcjId, rootStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
+
+            // Stream the data into Fluo.
+            for(final RyaStatement statement : streamedTriples.keySet()) {
+                final Optional<String> visibility = Optional.of(streamedTriples.get(statement));
+                new InsertTriples().insert(fluoClient, statement, visibility);
+            }
         }
 
         // Fetch the exported results from Accumulo once the observers finish working.
-        fluo.waitForObservers();
+        super.getMiniFluo().waitForObservers();
 
         setupTestUsers(accumuloConn, RYA_INSTANCE_NAME, pcjId);
 
@@ -213,80 +226,98 @@ public class PcjVisibilityIT extends ITBase {
         final Set<BindingSet> rootResults = toSet( rootStorage.listResults(pcjId));
 
         final Set<BindingSet> rootExpected = Sets.newHashSet();
-        rootExpected.add( makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Bob")),
-                new BindingImpl("city", new URIImpl("http://London"))));
-        rootExpected.add( makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Charlie")),
-                new BindingImpl("city", new URIImpl("http://London"))));
-        rootExpected.add( makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Eve")),
-                new BindingImpl("city", new URIImpl("http://Leeds"))));
-        rootExpected.add( makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://David")),
-                new BindingImpl("city", new URIImpl("http://London"))));
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("customer", VF.createURI("http://Alice"));
+        bs.addBinding("worker", VF.createURI("http://Bob"));
+        bs.addBinding("city", VF.createURI("http://London"));
+        rootExpected.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("customer", VF.createURI("http://Alice"));
+        bs.addBinding("worker", VF.createURI("http://Charlie"));
+        bs.addBinding("city", VF.createURI("http://London"));
+        rootExpected.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("customer", VF.createURI("http://Alice"));
+        bs.addBinding("worker", VF.createURI("http://Eve"));
+        bs.addBinding("city", VF.createURI("http://Leeds"));
+        rootExpected.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("customer", VF.createURI("http://Alice"));
+        bs.addBinding("worker", VF.createURI("http://David"));
+        bs.addBinding("city", VF.createURI("http://London"));
+        rootExpected.add(bs);
 
         assertEquals(rootExpected, rootResults);
 
+        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
+
         // Verify AB
         final Connector abConn = cluster.getConnector("abUser", "password");
-        final PrecomputedJoinStorage abStorage = new AccumuloPcjStorage(abConn, RYA_INSTANCE_NAME);
-        final Set<BindingSet> abResults = toSet( abStorage.listResults(pcjId) );
+        try(final PrecomputedJoinStorage abStorage = new AccumuloPcjStorage(abConn, RYA_INSTANCE_NAME)) {
+            final Set<BindingSet> abResults = toSet( abStorage.listResults(pcjId) );
 
-        final Set<BindingSet> abExpected = Sets.newHashSet();
-        abExpected.add( makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Bob")),
-                new BindingImpl("city", new URIImpl("http://London"))));
+            final Set<BindingSet> abExpected = Sets.newHashSet();
+            bs = new MapBindingSet();
+            bs.addBinding("customer", VF.createURI("http://Alice"));
+            bs.addBinding("worker", VF.createURI("http://Bob"));
+            bs.addBinding("city", VF.createURI("http://London"));
+            abExpected.add(bs);
 
-        assertEquals(abExpected, abResults);
+            assertEquals(abExpected, abResults);
+        }
 
         // Verify ABC
         final Connector abcConn = cluster.getConnector("abcUser", "password");
-        final PrecomputedJoinStorage abcStorage = new AccumuloPcjStorage(abcConn, RYA_INSTANCE_NAME);
-        final Set<BindingSet> abcResults = toSet( abcStorage.listResults(pcjId) );
-
-        final Set<BindingSet> abcExpected = Sets.newHashSet();
-        abcExpected.add(makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Bob")),
-                new BindingImpl("city", new URIImpl("http://London"))));
-        abcExpected.add(makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Charlie")),
-                new BindingImpl("city", new URIImpl("http://London"))));
-
-        assertEquals(abcExpected, abcResults);
+        try(final PrecomputedJoinStorage abcStorage = new AccumuloPcjStorage(abcConn, RYA_INSTANCE_NAME)) {
+            final Set<BindingSet> abcResults = toSet( abcStorage.listResults(pcjId) );
+
+            final Set<BindingSet> abcExpected = Sets.newHashSet();
+            bs = new MapBindingSet();
+            bs.addBinding("customer", VF.createURI("http://Alice"));
+            bs.addBinding("worker", VF.createURI("http://Bob"));
+            bs.addBinding("city", VF.createURI("http://London"));
+            abcExpected.add(bs);
+
+            bs = new MapBindingSet();
+            bs.addBinding("customer", VF.createURI("http://Alice"));
+            bs.addBinding("worker", VF.createURI("http://Charlie"));
+            bs.addBinding("city", VF.createURI("http://London"));
+            abcExpected.add(bs);
+
+            assertEquals(abcExpected, abcResults);
+        }
 
         // Verify ADE
         final Connector adeConn = cluster.getConnector("adeUser", "password");
-        final PrecomputedJoinStorage adeStorage = new AccumuloPcjStorage(adeConn, RYA_INSTANCE_NAME);
-        final Set<BindingSet> adeResults = toSet( adeStorage.listResults(pcjId) );
+        try(final PrecomputedJoinStorage adeStorage = new AccumuloPcjStorage(adeConn, RYA_INSTANCE_NAME)) {
+            final Set<BindingSet> adeResults = toSet( adeStorage.listResults(pcjId) );
 
-        final Set<BindingSet> adeExpected = Sets.newHashSet();
-        adeExpected.add(makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Eve")),
-                new BindingImpl("city", new URIImpl("http://Leeds"))));
+            final Set<BindingSet> adeExpected = Sets.newHashSet();
+            bs = new MapBindingSet();
+            bs.addBinding("customer", VF.createURI("http://Alice"));
+            bs.addBinding("worker", VF.createURI("http://Eve"));
+            bs.addBinding("city", VF.createURI("http://Leeds"));
+            adeExpected.add(bs);
 
-        assertEquals(adeExpected, adeResults);
+            assertEquals(adeExpected, adeResults);
+        }
 
         // Verify no auths.
         final Connector noAuthConn = cluster.getConnector("noAuth", "password");
-        final PrecomputedJoinStorage noAuthStorage = new AccumuloPcjStorage(noAuthConn, RYA_INSTANCE_NAME);
-        final Set<BindingSet> noAuthResults = toSet( noAuthStorage.listResults(pcjId) );
-        assertTrue( noAuthResults.isEmpty() );
+        try(final PrecomputedJoinStorage noAuthStorage = new AccumuloPcjStorage(noAuthConn, RYA_INSTANCE_NAME)) {
+            final Set<BindingSet> noAuthResults = toSet( noAuthStorage.listResults(pcjId) );
+            assertTrue( noAuthResults.isEmpty() );
+        }
     }
 
     private void setupTestUsers(final Connector accumuloConn, final String ryaInstanceName, final String pcjId) throws AccumuloException, AccumuloSecurityException {
         final PasswordToken pass = new PasswordToken("password");
         final SecurityOperations secOps = accumuloConn.securityOperations();
 
-        // XXX We need the table name so that we can update security for the users.
+        // We need the table name so that we can update security for the users.
         final String pcjTableName = new PcjTableNameFactory().makeTableName(ryaInstanceName, pcjId);
 
         // Give the 'roor' user authorizations to see everything.
@@ -317,11 +348,15 @@ public class PcjVisibilityIT extends ITBase {
         triplesMap.put(statement, visibility);
     }
 
-    private Set<BindingSet> toSet(final Iterable<BindingSet> bindingSets) {
+    private Set<BindingSet> toSet(final CloseableIterator<BindingSet> bindingSets) throws Exception {
         final Set<BindingSet> set = new HashSet<>();
-        for(final BindingSet bindingSet : bindingSets) {
-            set.add( bindingSet );
+        try {
+            while(bindingSets.hasNext()) {
+                set.add( bindingSets.next() );
+            }
+        } finally {
+            bindingSets.close();
         }
         return set;
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/rya.pcj.functions.geo/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/rya.pcj.functions.geo/pom.xml b/extras/rya.pcj.fluo/rya.pcj.functions.geo/pom.xml
index 8aa257b..885a076 100644
--- a/extras/rya.pcj.fluo/rya.pcj.functions.geo/pom.xml
+++ b/extras/rya.pcj.fluo/rya.pcj.functions.geo/pom.xml
@@ -82,11 +82,8 @@ under the License.
             <scope>test</scope>
         </dependency>
         <dependency>
-            <!--  BaseIT exists here, needs test jar to be generated. -->
-            <groupId>org.apache.rya</groupId>
-            <artifactId>rya.pcj.fluo.integration</artifactId>
-            <version>${project.version}</version>
-            <classifier>tests</classifier>
+             <groupId>org.apache.fluo</groupId>
+            <artifactId>fluo-recipes-test</artifactId>
             <scope>test</scope>
         </dependency>
     </dependencies>
@@ -125,5 +122,42 @@ under the License.
                     </configuration>
                 </plugin>
         </plugins>
+        <pluginManagement>
+            <plugins>
+                <!--This plugin's configuration is used to store Eclipse m2e settings only. It has no influence on the Maven build itself.-->
+                <plugin>
+                    <groupId>org.eclipse.m2e</groupId>
+                    <artifactId>lifecycle-mapping</artifactId>
+                    <version>1.0.0</version>
+                    <configuration>
+                        <lifecycleMappingMetadata>
+                            <pluginExecutions>
+                                <pluginExecution>
+                                    <pluginExecutionFilter>
+                                        <groupId>
+                                            org.codehaus.mojo
+                                        </groupId>
+                                        <artifactId>
+                                            properties-maven-plugin
+                                        </artifactId>
+                                        <versionRange>
+                                            [1.0.0,)
+                                        </versionRange>
+                                        <goals>
+                                            <goal>
+                                                set-system-properties
+                                            </goal>
+                                        </goals>
+                                    </pluginExecutionFilter>
+                                    <action>
+                                        <ignore></ignore>
+                                    </action>
+                                </pluginExecution>
+                            </pluginExecutions>
+                        </lifecycleMappingMetadata>
+                    </configuration>
+                </plugin>
+            </plugins>
+        </pluginManagement>
     </build>
 </project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/rya.pcj.functions.geo/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/rya.pcj.functions.geo/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java b/extras/rya.pcj.fluo/rya.pcj.functions.geo/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java
new file mode 100644
index 0000000..5fe999f
--- /dev/null
+++ b/extras/rya.pcj.fluo/rya.pcj.functions.geo/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.fluo.api.config.ObserverSpecification;
+import org.apache.fluo.recipes.test.AccumuloExportITBase;
+import org.apache.log4j.BasicConfigurator;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.indexing.accumulo.ConfigUtils;
+import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig;
+import org.apache.rya.indexing.pcj.fluo.app.export.rya.RyaExportParameters;
+import org.apache.rya.indexing.pcj.fluo.app.observers.AggregationObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.FilterObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.JoinObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.QueryResultObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.StatementPatternObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.TripleObserver;
+import org.apache.rya.rdftriplestore.RyaSailRepository;
+import org.apache.rya.sail.config.RyaSailFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.openrdf.sail.Sail;
+
+/**
+ * The base Integration Test class used for Fluo applications that export to a Rya PCJ Index.
+ */
+public class RyaExportITBase extends AccumuloExportITBase {
+
+    protected static final String RYA_INSTANCE_NAME = "test_";
+
+    private RyaSailRepository ryaSailRepo = null;
+
+    public RyaExportITBase() {
+        // Indicates that MiniFluo should be started before each test.
+        super(true);
+    }
+
+    @BeforeClass
+    public static void setupLogging() {
+        BasicConfigurator.configure();
+        Logger.getRootLogger().setLevel(Level.ERROR);
+    }
+
+    @Override
+    protected void preFluoInitHook() throws Exception {
+        // Setup the observers that will be used by the Fluo PCJ Application.
+        final List<ObserverSpecification> observers = new ArrayList<>();
+        observers.add(new ObserverSpecification(TripleObserver.class.getName()));
+        observers.add(new ObserverSpecification(StatementPatternObserver.class.getName()));
+        observers.add(new ObserverSpecification(JoinObserver.class.getName()));
+        observers.add(new ObserverSpecification(FilterObserver.class.getName()));
+        observers.add(new ObserverSpecification(AggregationObserver.class.getName()));
+
+        // Configure the export observer to export new PCJ results to the mini accumulo cluster.
+        final HashMap<String, String> exportParams = new HashMap<>();
+        final RyaExportParameters ryaParams = new RyaExportParameters(exportParams);
+        ryaParams.setExportToRya(true);
+        ryaParams.setRyaInstanceName(RYA_INSTANCE_NAME);
+        ryaParams.setAccumuloInstanceName(super.getMiniAccumuloCluster().getInstanceName());
+        ryaParams.setZookeeperServers(super.getMiniAccumuloCluster().getZooKeepers());
+        ryaParams.setExporterUsername(ACCUMULO_USER);
+        ryaParams.setExporterPassword(ACCUMULO_PASSWORD);
+
+        final ObserverSpecification exportObserverConfig = new ObserverSpecification(QueryResultObserver.class.getName(), exportParams);
+        observers.add(exportObserverConfig);
+
+        // Add the observers to the Fluo Configuration.
+        super.getFluoConfiguration().addObservers(observers);
+    }
+
+    @Before
+    public void setupRya() throws Exception {
+        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
+        final String instanceName = cluster.getInstanceName();
+        final String zookeepers = cluster.getZooKeepers();
+
+        // Install the Rya instance to the mini accumulo cluster.
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
+                new AccumuloConnectionDetails(
+                    ACCUMULO_USER,
+                    ACCUMULO_PASSWORD.toCharArray(),
+                    instanceName,
+                    zookeepers),
+                super.getAccumuloConnector());
+
+        ryaClient.getInstall().install(RYA_INSTANCE_NAME, InstallConfiguration.builder()
+                .setEnableTableHashPrefix(false)
+                .setEnableFreeTextIndex(false)
+                .setEnableEntityCentricIndex(false)
+                .setEnableGeoIndex(false)
+                .setEnableTemporalIndex(false)
+                .setEnablePcjIndex(true)
+                .setFluoPcjAppName( super.getFluoConfiguration().getApplicationName() )
+                .build());
+
+        // Connect to the Rya instance that was just installed.
+        final AccumuloRdfConfiguration conf = makeConfig(instanceName, zookeepers);
+        final Sail sail = RyaSailFactory.getInstance(conf);
+        ryaSailRepo = new RyaSailRepository(sail);
+    }
+
+    @After
+    public void teardownRya() throws Exception {
+        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
+        final String instanceName = cluster.getInstanceName();
+        final String zookeepers = cluster.getZooKeepers();
+
+        // Uninstall the instance of Rya.
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
+                new AccumuloConnectionDetails(
+                    ACCUMULO_USER,
+                    ACCUMULO_PASSWORD.toCharArray(),
+                    instanceName,
+                    zookeepers),
+                super.getAccumuloConnector());
+
+        ryaClient.getUninstall().uninstall(RYA_INSTANCE_NAME);
+
+        // Shutdown the repo.
+        ryaSailRepo.shutDown();
+    }
+
+    /**
+     * @return A {@link RyaSailRepository} that is connected to the Rya instance that statements are loaded into.
+     */
+    protected RyaSailRepository getRyaSailRepository() throws Exception {
+        return ryaSailRepo;
+    }
+
+    protected AccumuloRdfConfiguration makeConfig(final String instanceName, final String zookeepers) {
+        final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
+        conf.setTablePrefix(RYA_INSTANCE_NAME);
+
+        // Accumulo connection information.
+        conf.setAccumuloUser(AccumuloExportITBase.ACCUMULO_USER);
+        conf.setAccumuloPassword(AccumuloExportITBase.ACCUMULO_PASSWORD);
+        conf.setAccumuloInstance(super.getAccumuloConnector().getInstance().getInstanceName());
+        conf.setAccumuloZookeepers(super.getAccumuloConnector().getInstance().getZooKeepers());
+        conf.setAuths("");
+
+        // PCJ configuration information.
+        conf.set(ConfigUtils.USE_PCJ, "true");
+        conf.set(ConfigUtils.USE_PCJ_UPDATER_INDEX, "true");
+        conf.set(ConfigUtils.FLUO_APP_NAME, super.getFluoConfiguration().getApplicationName());
+        conf.set(ConfigUtils.PCJ_STORAGE_TYPE,
+                PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType.ACCUMULO.toString());
+        conf.set(ConfigUtils.PCJ_UPDATER_TYPE,
+                PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType.FLUO.toString());
+
+        conf.setDisplayQueryPlan(true);
+
+        return conf;
+    }
+}
\ No newline at end of file


[3/9] incubator-rya git commit: RYA-260 Fluo PCJ application has had Aggregation support added to it. Also fixed a bunch of resource leaks that were causing integration tests to fail. Closes #156.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java
index 5e12fac..7fa28ab 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java
@@ -18,58 +18,39 @@
  */
 package org.apache.rya.indexing.pcj.fluo.integration;
 
+import static java.util.Objects.requireNonNull;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
 
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.Properties;
+import java.util.Map;
 import java.util.Set;
+import java.util.UUID;
 
-import org.I0Itec.zkclient.ZkClient;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.rya.api.domain.RyaStatement;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
-import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
-import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
-import org.apache.rya.indexing.pcj.fluo.app.export.kafka.KafkaExportParameters;
-import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
-import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.indexing.pcj.fluo.KafkaExportITBase;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
 import org.junit.Test;
-import org.openrdf.model.impl.URIImpl;
-import org.openrdf.query.Binding;
+import org.openrdf.model.Statement;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.model.vocabulary.XMLSchema;
 import org.openrdf.query.BindingSet;
-import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.query.impl.MapBindingSet;
+import org.openrdf.repository.sail.SailRepositoryConnection;
 
-import com.google.common.base.Optional;
 import com.google.common.collect.Sets;
 
-import kafka.admin.AdminUtils;
-import kafka.admin.RackAwareMode;
-import kafka.server.KafkaConfig;
-import kafka.server.KafkaServer;
-import kafka.utils.MockTime;
-import kafka.utils.TestUtils;
-import kafka.utils.Time;
-import kafka.utils.ZKStringSerializer$;
-import kafka.utils.ZkUtils;
-import kafka.zk.EmbeddedZookeeper;
-
 /**
  * Performs integration tests over the Fluo application geared towards Kafka PCJ exporting.
  * <p>
@@ -78,215 +59,463 @@ import kafka.zk.EmbeddedZookeeper;
  * $ cd rya/extras/rya.pcj.fluo/pcj.fluo.integration
  * $ mvn surefire:test -Dtest=KafkaExportIT
  */
-public class KafkaExportIT extends ITBase {
-    private static final Log logger = LogFactory.getLog(KafkaExportIT.class);
-
-    private static final String ZKHOST = "127.0.0.1";
-    private static final String BROKERHOST = "127.0.0.1";
-    private static final String BROKERPORT = "9092";
-    private static final String TOPIC = "testTopic";
-    private ZkUtils zkUtils;
-    private KafkaServer kafkaServer;
-    private EmbeddedZookeeper zkServer;
-    private ZkClient zkClient;
-
-
-        /**
-     * setup mini kafka and call the super to setup mini fluo
-     * 
-     * @see org.apache.rya.indexing.pcj.fluo.ITBase#setupMiniResources()
-     */
-    @Override
-    public void setupMiniResources() throws Exception {
-        super.setupMiniResources();
-
-        zkServer = new EmbeddedZookeeper();
-        String zkConnect = ZKHOST + ":" + zkServer.port();
-        zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
-        zkUtils = ZkUtils.apply(zkClient, false);
-
-        // setup Broker
-        Properties brokerProps = new Properties();
-        brokerProps.setProperty("zookeeper.connect", zkConnect);
-        brokerProps.setProperty("broker.id", "0");
-        brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
-        brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
-        KafkaConfig config = new KafkaConfig(brokerProps);
-        Time mock = new MockTime();
-        kafkaServer = TestUtils.createServer(config, mock);
-
-        logger.trace("setup kafka and fluo.");
-    }
-
-    /**
-     * Test kafka without rya code to make sure kafka works in this environment.
-     * If this test fails then its a testing environment issue, not with Rya.
-     * Source: https://github.com/asmaier/mini-kafka
-     * 
-     * @throws InterruptedException
-     * @throws IOException
-     */
-        @Test
-        public void embeddedKafkaTest() throws InterruptedException, IOException {
-            // create topic
-            AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
-
-            // setup producer
-            Properties producerProps = new Properties();
-            producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
-            producerProps.setProperty("key.serializer","org.apache.kafka.common.serialization.IntegerSerializer");
-            producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
-            KafkaProducer<Integer, byte[]> producer = new KafkaProducer<Integer, byte[]>(producerProps);
-
-            // setup consumer
-            Properties consumerProps = new Properties();
-            consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
-            consumerProps.setProperty("group.id", "group0");
-            consumerProps.setProperty("client.id", "consumer0");
-            consumerProps.setProperty("key.deserializer","org.apache.kafka.common.serialization.IntegerDeserializer");
-            consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
-            consumerProps.put("auto.offset.reset", "earliest");  // to make sure the consumer starts from the beginning of the topic
-            KafkaConsumer<Integer, byte[]> consumer = new KafkaConsumer<>(consumerProps);
-            consumer.subscribe(Arrays.asList(TOPIC));
-
-            // send message
-            ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(TOPIC, 42, "test-message".getBytes(StandardCharsets.UTF_8));
-            producer.send(data);
-            producer.close();
-
-            // starting consumer
-        ConsumerRecords<Integer, byte[]> records = consumer.poll(3000);
-            assertEquals(1, records.count());
-            Iterator<ConsumerRecord<Integer, byte[]>> recordIterator = records.iterator();
-            ConsumerRecord<Integer, byte[]> record = recordIterator.next();
-        logger.trace(String.format("offset = %d, key = %s, value = %s", record.offset(), record.key(), record.value()));
-            assertEquals(42, (int) record.key());
-            assertEquals("test-message", new String(record.value(), StandardCharsets.UTF_8));
-        consumer.close();
-    }
+public class KafkaExportIT extends KafkaExportITBase {
 
     @Test
     public void newResultsExportedTest() throws Exception {
-        final String sparql = "SELECT ?customer ?worker ?city " + "{ " + "FILTER(?customer = <http://Alice>) " + "FILTER(?city = <http://London>) " + "?customer <http://talksTo> ?worker. " + "?worker <http://livesIn> ?city. " + "?worker <http://worksAt> <http://Chipotle>. " + "}";
-    
+        final String sparql =
+                "SELECT ?customer ?worker ?city { " +
+                    "FILTER(?customer = <http://Alice>) " +
+                    "FILTER(?city = <http://London>) " +
+                    "?customer <http://talksTo> ?worker. " +
+                    "?worker <http://livesIn> ?city. " +
+                    "?worker <http://worksAt> <http://Chipotle>. " +
+                "}";
+
         // Triples that will be streamed into Fluo after the PCJ has been created.
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(makeRyaStatement("http://Alice", "http://talksTo", "http://Bob"), makeRyaStatement("http://Bob", "http://livesIn", "http://London"), makeRyaStatement("http://Bob", "http://worksAt", "http://Chipotle"),
-                        makeRyaStatement("http://Alice", "http://talksTo", "http://Charlie"), makeRyaStatement("http://Charlie", "http://livesIn", "http://London"), makeRyaStatement("http://Charlie", "http://worksAt", "http://Chipotle"),
-                        makeRyaStatement("http://Alice", "http://talksTo", "http://David"), makeRyaStatement("http://David", "http://livesIn", "http://London"), makeRyaStatement("http://David", "http://worksAt", "http://Chipotle"),
-                        makeRyaStatement("http://Alice", "http://talksTo", "http://Eve"), makeRyaStatement("http://Eve", "http://livesIn", "http://Leeds"), makeRyaStatement("http://Eve", "http://worksAt", "http://Chipotle"),
-                        makeRyaStatement("http://Frank", "http://talksTo", "http://Alice"), makeRyaStatement("http://Frank", "http://livesIn", "http://London"), makeRyaStatement("http://Frank", "http://worksAt", "http://Chipotle"));
-    
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements =
+                Sets.newHashSet(
+                        vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Bob")),
+                        vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://livesIn"), vf.createURI("http://London")),
+                        vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                        vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Charlie")),
+                        vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://livesIn"), vf.createURI("http://London")),
+                        vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                        vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://David")),
+                        vf.createStatement(vf.createURI("http://David"), vf.createURI("http://livesIn"), vf.createURI("http://London")),
+                        vf.createStatement(vf.createURI("http://David"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                        vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                        vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://livesIn"), vf.createURI("http://Leeds")),
+                        vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+                        vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://talksTo"), vf.createURI("http://Alice")),
+                        vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://livesIn"), vf.createURI("http://London")),
+                        vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
         // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("customer", new URIImpl("http://Alice")), new BindingImpl("worker", new URIImpl("http://Bob")), new BindingImpl("city", new URIImpl("http://London"))));
-        expected.add(makeBindingSet(new BindingImpl("customer", new URIImpl("http://Alice")), new BindingImpl("worker", new URIImpl("http://Charlie")), new BindingImpl("city", new URIImpl("http://London"))));
-        expected.add(makeBindingSet(new BindingImpl("customer", new URIImpl("http://Alice")), new BindingImpl("worker", new URIImpl("http://David")), new BindingImpl("city", new URIImpl("http://London"))));
-    
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(sparql);
-    
-        // Tell the Fluo app to maintain the PCJ.
-        CreatePcj createPcj = new CreatePcj();
-        String QueryIdIsTopicName = createPcj.withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String> absent());
-    
-        // Fetch the exported results from Accumulo once the observers finish working.
-        fluo.waitForObservers();
-
-        /// KafkaConsumer<Integer, byte[]> consumer = makeConsumer(QueryIdIsTopicName);
-        KafkaConsumer<Integer, VisibilityBindingSet> consumer = makeConsumer(QueryIdIsTopicName);
-
-        // starting consumer polling for messages
-        /// ConsumerRecords<Integer, byte[]> records = consumer.poll(3000);
-        ConsumerRecords<Integer, VisibilityBindingSet> records = consumer.poll(3000);
-        /// Iterator<ConsumerRecord<Integer, byte[]>> recordIterator = records.iterator();
-        Iterator<ConsumerRecord<Integer, VisibilityBindingSet>> recordIterator = records.iterator();
-        boolean allExpected = true;
-        ConsumerRecord<Integer, VisibilityBindingSet> unexpectedRecord = null;
-        while (recordIterator.hasNext()) {
-            ConsumerRecord<Integer, VisibilityBindingSet> record = recordIterator.next();
-            logger.trace(String.format("Consumed offset = %d, key = %s, value = %s", record.offset(), record.key(), record.value().toString()));
-            boolean expectedThis = expected.contains(record.value());
-            if (!expectedThis) {
-                logger.trace("This consumed record is not expected.");
-                unexpectedRecord = record;
-            }
-            allExpected = allExpected && expectedThis;
-        }
-        assertTrue("Must consume expected record: not expected:" + unexpectedRecord, allExpected);
-        assertNotEquals("Should get some results", 0, records.count());
-        // assertEquals(42, (int) record.key());
-        // assertEquals("test-message", new String(record.value(), StandardCharsets.UTF_8));
+        final Set<BindingSet> expectedResult = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("customer", vf.createURI("http://Alice"));
+        bs.addBinding("worker", vf.createURI("http://Bob"));
+        bs.addBinding("city", vf.createURI("http://London"));
+        expectedResult.add( new VisibilityBindingSet(bs) );
+
+        bs = new MapBindingSet();
+        bs.addBinding("customer", vf.createURI("http://Alice"));
+        bs.addBinding("worker", vf.createURI("http://Charlie"));
+        bs.addBinding("city", vf.createURI("http://London"));
+        expectedResult.add( new VisibilityBindingSet(bs) );
+
+        bs = new MapBindingSet();
+        bs.addBinding("customer", vf.createURI("http://Alice"));
+        bs.addBinding("worker", vf.createURI("http://David"));
+        bs.addBinding("city", vf.createURI("http://London"));
+        expectedResult.add( new VisibilityBindingSet(bs) );
+
+        // Ensure the last result matches the expected result.
+        final Set<VisibilityBindingSet> result = readAllResults(pcjId);
+        assertEquals(expectedResult, result);
+    }
+
+    @Test
+    public void min() throws Exception {
+        // A query that finds the minimum price for an item within the inventory.
+        final String sparql =
+                "SELECT (min(?price) as ?minPrice) { " +
+                    "?item <urn:price> ?price . " +
+                "}";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:price"), vf.createLiteral(2.50)),
+                vf.createStatement(vf.createURI("urn:gum"), vf.createURI("urn:price"), vf.createLiteral(0.99)),
+                vf.createStatement(vf.createURI("urn:sandwich"), vf.createURI("urn:price"), vf.createLiteral(4.99)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final MapBindingSet expectedResult = new MapBindingSet();
+        expectedResult.addBinding("minPrice", vf.createLiteral(0.99));
+
+        // Ensure the last result matches the expected result.
+        final VisibilityBindingSet result = readLastResult(pcjId);
+        assertEquals(expectedResult, result);
+    }
+
+    @Test
+    public void max() throws Exception {
+        // A query that finds the maximum price for an item within the inventory.
+        final String sparql =
+                "SELECT (max(?price) as ?maxPrice) { " +
+                    "?item <urn:price> ?price . " +
+                "}";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:price"), vf.createLiteral(2.50)),
+                vf.createStatement(vf.createURI("urn:gum"), vf.createURI("urn:price"), vf.createLiteral(0.99)),
+                vf.createStatement(vf.createURI("urn:sandwich"), vf.createURI("urn:price"), vf.createLiteral(4.99)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final MapBindingSet expectedResult = new MapBindingSet();
+        expectedResult.addBinding("maxPrice", vf.createLiteral(4.99));
+
+        // Ensure the last result matches the expected result.
+        final VisibilityBindingSet result = readLastResult(pcjId);
+        assertEquals(expectedResult, result);
+    }
+
+    @Test
+    public void count() throws Exception {
+        // A query that counts the number of unique items that are in the inventory.
+        final String sparql =
+                "SELECT (count(?item) as ?itemCount) { " +
+                    "?item <urn:id> ?id . " +
+                "}";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                // Three that are part of the count.
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:id"), vf.createLiteral(UUID.randomUUID().toString())),
+                vf.createStatement(vf.createURI("urn:gum"), vf.createURI("urn:id"), vf.createLiteral(UUID.randomUUID().toString())),
+                vf.createStatement(vf.createURI("urn:sandwich"), vf.createURI("urn:id"), vf.createLiteral(UUID.randomUUID().toString())),
+
+                // One that is not.
+                vf.createStatement(vf.createURI("urn:sandwich"), vf.createURI("urn:price"), vf.createLiteral(3.99)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final MapBindingSet expectedResult = new MapBindingSet();
+        expectedResult.addBinding("itemCount", vf.createLiteral("3", XMLSchema.INTEGER));
+
+        // Ensure the last result matches the expected result.
+        final VisibilityBindingSet result = readLastResult(pcjId);
+        assertEquals(expectedResult, result);
+    }
+
+    @Test
+    public void sum() throws Exception {
+        // A query that sums the counts of all of the items that are in the inventory.
+        final String sparql =
+                "SELECT (sum(?count) as ?itemSum) { " +
+                    "?item <urn:count> ?count . " +
+                "}";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:count"), vf.createLiteral(5)),
+                vf.createStatement(vf.createURI("urn:gum"), vf.createURI("urn:count"), vf.createLiteral(7)),
+                vf.createStatement(vf.createURI("urn:sandwich"), vf.createURI("urn:count"), vf.createLiteral(2)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final MapBindingSet expectedResult = new MapBindingSet();
+        expectedResult.addBinding("itemSum", vf.createLiteral("14", XMLSchema.INTEGER));
+
+        // Ensure the last result matches the expected result.
+        final VisibilityBindingSet result = readLastResult(pcjId);
+        assertEquals(expectedResult, result);
+    }
+
+    @Test
+    public void average() throws Exception  {
+        // A query that finds the average price for an item that is in the inventory.
+        final String sparql =
+                "SELECT (avg(?price) as ?averagePrice) { " +
+                    "?item <urn:price> ?price . " +
+                "}";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:price"), vf.createLiteral(3)),
+                vf.createStatement(vf.createURI("urn:gum"), vf.createURI("urn:price"), vf.createLiteral(4)),
+                vf.createStatement(vf.createURI("urn:sandwich"), vf.createURI("urn:price"), vf.createLiteral(8)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final MapBindingSet expectedResult = new MapBindingSet();
+        expectedResult.addBinding("averagePrice", vf.createLiteral("5", XMLSchema.DECIMAL));
+
+        // Ensure the last result matches the expected result.
+        final VisibilityBindingSet result = readLastResult(pcjId);
+        assertEquals(expectedResult, result);
+    }
+
+    @Test
+    public void aggregateWithFilter() throws Exception {
+        // A query that filters results from a statement pattern before applying the aggregation function.
+        final String sparql =
+                "SELECT (min(?price) as ?minPrice) { " +
+                    "FILTER(?price > 1.00) " +
+                    "?item <urn:price> ?price . " +
+                "}";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:price"), vf.createLiteral(2.50)),
+                vf.createStatement(vf.createURI("urn:gum"), vf.createURI("urn:price"), vf.createLiteral(0.99)),
+                vf.createStatement(vf.createURI("urn:sandwich"), vf.createURI("urn:price"), vf.createLiteral(4.99)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final MapBindingSet expectedResult = new MapBindingSet();
+        expectedResult.addBinding("minPrice", vf.createLiteral(2.50));
 
+        // Ensure the last result matches the expected result.
+        final VisibilityBindingSet result = readLastResult(pcjId);
+        assertEquals(expectedResult, result);
     }
 
-    /**
-     * A helper function for creating a {@link BindingSet} from an array of
-     * {@link Binding}s.
-     *
-     * @param bindings
-     *            - The bindings to include in the set. (not null)
-     * @return A {@link BindingSet} holding the bindings.
-     */
-    protected static BindingSet makeBindingSet(final Binding... bindings) {
-        return new VisibilityBindingSet(ITBase.makeBindingSet(bindings));
+    @Test
+    public void multipleAggregations() throws Exception {
+        // A query that both counts the number of items being averaged and finds the average price.
+        final String sparql =
+                "SELECT (count(?item) as ?itemCount) (avg(?price) as ?averagePrice) {" +
+                    "?item <urn:price> ?price . " +
+                "}";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:price"), vf.createLiteral(5.25)),
+                vf.createStatement(vf.createURI("urn:gum"), vf.createURI("urn:price"), vf.createLiteral(7)),
+                vf.createStatement(vf.createURI("urn:sandwich"), vf.createURI("urn:price"), vf.createLiteral(2.75)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final MapBindingSet expectedResult = new MapBindingSet();
+        expectedResult.addBinding("itemCount", vf.createLiteral("3", XMLSchema.INTEGER));
+        expectedResult.addBinding("averagePrice", vf.createLiteral("5.0", XMLSchema.DECIMAL));
+
+        // Ensure the last result matches the expected result.
+        final VisibilityBindingSet result = readLastResult(pcjId);
+        assertEquals(expectedResult, result);
     }
 
-    /**
-     * @param TopicName
-     * @return
-     */
-    protected KafkaConsumer<Integer, VisibilityBindingSet> makeConsumer(String TopicName) {
-        // setup consumer
-        Properties consumerProps = new Properties();
-        consumerProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
-        consumerProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group0");
-        consumerProps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "consumer0");
-        consumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.IntegerDeserializer");
-        consumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.rya.indexing.pcj.fluo.app.export.kafka.KryoVisibilityBindingSetSerializer");
-        // "org.apache.kafka.common.serialization.ByteArrayDeserializer");
-        consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // to make sure the consumer starts from the beginning of the topic
-        /// KafkaConsumer<Integer, byte[]> consumer = new KafkaConsumer<>(consumerProps);
-        KafkaConsumer<Integer, VisibilityBindingSet> consumer = new KafkaConsumer<>(consumerProps);
-        consumer.subscribe(Arrays.asList(TopicName));
-        return consumer;
+    @Test
+    public void groupBySingleBinding() throws Exception {
+        // A query that groups what is aggregated by one of the keys.
+        final String sparql =
+                "SELECT ?item (avg(?price) as ?averagePrice) {" +
+                    "?item <urn:price> ?price . " +
+                "} " +
+                "GROUP BY ?item";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:price"), vf.createLiteral(5.25)),
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:price"), vf.createLiteral(7)),
+                vf.createStatement(vf.createURI("urn:apple"), vf.createURI("urn:price"), vf.createLiteral(2.75)),
+                vf.createStatement(vf.createURI("urn:banana"), vf.createURI("urn:price"), vf.createLiteral(2.75)),
+                vf.createStatement(vf.createURI("urn:banana"), vf.createURI("urn:price"), vf.createLiteral(1.99)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("item", vf.createURI("urn:apple"));
+        bs.addBinding("averagePrice", vf.createLiteral("5.0", XMLSchema.DECIMAL));
+        expectedResults.add( new VisibilityBindingSet(bs) );
+
+        bs = new MapBindingSet();
+        bs.addBinding("item", vf.createURI("urn:banana"));
+        bs.addBinding("averagePrice", vf.createLiteral("2.37", XMLSchema.DECIMAL));
+        expectedResults.add( new VisibilityBindingSet(bs) );
+
+        // Verify the end results of the query match the expected results.
+        final Set<VisibilityBindingSet> results = readGroupedResults(pcjId, new VariableOrder("item"));
+        assertEquals(expectedResults, results);
     }
 
-    /**
-     * Add info about the kafka queue/topic to receive the export.
-     * Call super to get the Rya parameters.
-     * 
-     * @see org.apache.rya.indexing.pcj.fluo.ITBase#setExportParameters(java.util.HashMap)
-     */
-    @Override
-    protected void setExportParameters(HashMap<String, String> exportParams) {
-        // Get the defaults
-        super.setExportParameters(exportParams);
-        // Add the kafka parameters
-        final KafkaExportParameters kafkaParams = new KafkaExportParameters(exportParams);
-        kafkaParams.setExportToKafka(true);
-        // Configure the Producer
-        Properties producerConfig = new Properties();
-        producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
-        producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
-        producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.rya.indexing.pcj.fluo.app.export.kafka.KryoVisibilityBindingSetSerializer");
-        // "org.apache.kafka.common.serialization.StringSerializer");
-        kafkaParams.addAllProducerConfig(producerConfig);
+    @Test
+    public void groupByManyBindings_avaerages() throws Exception {
+        // A query that groups what is aggregated by two of the keys.
+        final String sparql =
+                "SELECT ?type ?location (avg(?price) as ?averagePrice) {" +
+                    "?id <urn:type> ?type . " +
+                    "?id <urn:location> ?location ." +
+                    "?id <urn:price> ?price ." +
+                "} " +
+                "GROUP BY ?type ?location";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                // American items that will be averaged.
+                vf.createStatement(vf.createURI("urn:1"), vf.createURI("urn:type"), vf.createLiteral("apple")),
+                vf.createStatement(vf.createURI("urn:1"), vf.createURI("urn:location"), vf.createLiteral("USA")),
+                vf.createStatement(vf.createURI("urn:1"), vf.createURI("urn:price"), vf.createLiteral(2.50)),
+
+                vf.createStatement(vf.createURI("urn:2"), vf.createURI("urn:type"), vf.createLiteral("cheese")),
+                vf.createStatement(vf.createURI("urn:2"), vf.createURI("urn:location"), vf.createLiteral("USA")),
+                vf.createStatement(vf.createURI("urn:2"), vf.createURI("urn:price"), vf.createLiteral(.99)),
+
+                vf.createStatement(vf.createURI("urn:3"), vf.createURI("urn:type"), vf.createLiteral("cheese")),
+                vf.createStatement(vf.createURI("urn:3"), vf.createURI("urn:location"), vf.createLiteral("USA")),
+                vf.createStatement(vf.createURI("urn:3"), vf.createURI("urn:price"), vf.createLiteral(5.25)),
+
+                // French items that will be averaged.
+                vf.createStatement(vf.createURI("urn:4"), vf.createURI("urn:type"), vf.createLiteral("cheese")),
+                vf.createStatement(vf.createURI("urn:4"), vf.createURI("urn:location"), vf.createLiteral("France")),
+                vf.createStatement(vf.createURI("urn:4"), vf.createURI("urn:price"), vf.createLiteral(8.5)),
+
+                vf.createStatement(vf.createURI("urn:5"), vf.createURI("urn:type"), vf.createLiteral("cigarettes")),
+                vf.createStatement(vf.createURI("urn:5"), vf.createURI("urn:location"), vf.createLiteral("France")),
+                vf.createStatement(vf.createURI("urn:5"), vf.createURI("urn:price"), vf.createLiteral(3.99)),
+
+                vf.createStatement(vf.createURI("urn:6"), vf.createURI("urn:type"), vf.createLiteral("cigarettes")),
+                vf.createStatement(vf.createURI("urn:6"), vf.createURI("urn:location"), vf.createLiteral("France")),
+                vf.createStatement(vf.createURI("urn:6"), vf.createURI("urn:price"), vf.createLiteral(4.99)));
+
+        // Create the PCJ in Fluo and load the statements into Rya.
+        final String pcjId = loadData(sparql, statements);
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("type", vf.createLiteral("apple", XMLSchema.STRING));
+        bs.addBinding("location", vf.createLiteral("USA", XMLSchema.STRING));
+        bs.addBinding("averagePrice", vf.createLiteral("2.5", XMLSchema.DECIMAL));
+        expectedResults.add( new VisibilityBindingSet(bs) );
+
+        bs = new MapBindingSet();
+        bs.addBinding("type", vf.createLiteral("cheese", XMLSchema.STRING));
+        bs.addBinding("location", vf.createLiteral("USA", XMLSchema.STRING));
+        bs.addBinding("averagePrice", vf.createLiteral("3.12", XMLSchema.DECIMAL));
+        expectedResults.add( new VisibilityBindingSet(bs) );
+
+        bs = new MapBindingSet();
+        bs.addBinding("type", vf.createLiteral("cheese", XMLSchema.STRING));
+        bs.addBinding("location", vf.createLiteral("France", XMLSchema.STRING));
+        bs.addBinding("averagePrice", vf.createLiteral("8.5", XMLSchema.DECIMAL));
+        expectedResults.add( new VisibilityBindingSet(bs));
+
+        bs = new MapBindingSet();
+        bs.addBinding("type", vf.createLiteral("cigarettes", XMLSchema.STRING));
+        bs.addBinding("location", vf.createLiteral("France", XMLSchema.STRING));
+        bs.addBinding("averagePrice", vf.createLiteral("4.49", XMLSchema.DECIMAL));
+        expectedResults.add( new VisibilityBindingSet(bs) );
+
+        // Verify the end results of the query match the expected results.
+        final Set<VisibilityBindingSet> results = readGroupedResults(pcjId, new VariableOrder("type", "location"));
+        assertEquals(expectedResults, results);
     }
 
-    /**
-     * Close all the Kafka mini server and mini-zookeeper
-     * 
-     * @see org.apache.rya.indexing.pcj.fluo.ITBase#shutdownMiniResources()
-     */
-    @Override
-    public void shutdownMiniResources() {
-        super.shutdownMiniResources();
-        kafkaServer.shutdown();
-        zkClient.close();
-        zkServer.shutdown();
+    private String loadData(final String sparql, final Collection<Statement> statements) throws Exception {
+        requireNonNull(sparql);
+        requireNonNull(statements);
+
+        // Register the PCJ with Rya.
+        final Instance accInstance = super.getAccumuloConnector().getInstance();
+        final Connector accumuloConn = super.getAccumuloConnector();
+
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(new AccumuloConnectionDetails(
+                ACCUMULO_USER,
+                ACCUMULO_PASSWORD.toCharArray(),
+                accInstance.getInstanceName(),
+                accInstance.getZooKeepers()), accumuloConn);
+
+        final String pcjId = ryaClient.getCreatePCJ().createPCJ(RYA_INSTANCE_NAME, sparql);
+
+        // Write the data to Rya.
+        final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
+        ryaConn.begin();
+        ryaConn.add(statements);
+        ryaConn.commit();
+        ryaConn.close();
+
+        // Wait for the Fluo application to finish computing the end result.
+        super.getMiniFluo().waitForObservers();
+
+        // The PCJ Id is the topic name the results will be written to.
+        return pcjId;
+    }
+
+    private Set<VisibilityBindingSet> readAllResults(final String pcjId) throws Exception {
+        requireNonNull(pcjId);
+
+        // Read all of the results from the Kafka topic.
+        final Set<VisibilityBindingSet> results = new HashSet<>();
+
+        try(final KafkaConsumer<Integer, VisibilityBindingSet> consumer = makeConsumer(pcjId)) {
+            final ConsumerRecords<Integer, VisibilityBindingSet> records = consumer.poll(5000);
+            final Iterator<ConsumerRecord<Integer, VisibilityBindingSet>> recordIterator = records.iterator();
+            while (recordIterator.hasNext()) {
+                results.add( recordIterator.next().value() );
+            }
+        }
+
+        return results;
+    }
+
+    private VisibilityBindingSet readLastResult(final String pcjId) throws Exception {
+        requireNonNull(pcjId);
+
+        // Read the results from the Kafka topic. The last one has the final aggregation result.
+        VisibilityBindingSet result = null;
+
+        try(final KafkaConsumer<Integer, VisibilityBindingSet> consumer = makeConsumer(pcjId)) {
+            final ConsumerRecords<Integer, VisibilityBindingSet> records = consumer.poll(5000);
+            final Iterator<ConsumerRecord<Integer, VisibilityBindingSet>> recordIterator = records.iterator();
+            while (recordIterator.hasNext()) {
+                result = recordIterator.next().value();
+            }
+        }
+
+        return result;
+    }
+
+    private Set<VisibilityBindingSet> readGroupedResults(final String pcjId, final VariableOrder groupByVars) {
+        requireNonNull(pcjId);
+
+        // Read the results from the Kafka topic. The last one for each set of Group By values is an aggregation result.
+        // The key in this map is a Binding Set containing only the group by variables.
+        final Map<BindingSet, VisibilityBindingSet> results = new HashMap<>();
+
+        try(final KafkaConsumer<Integer, VisibilityBindingSet> consumer = makeConsumer(pcjId)) {
+            final ConsumerRecords<Integer, VisibilityBindingSet> records = consumer.poll(5000);
+            final Iterator<ConsumerRecord<Integer, VisibilityBindingSet>> recordIterator = records.iterator();
+            while (recordIterator.hasNext()) {
+                final VisibilityBindingSet visBindingSet = recordIterator.next().value();
+
+                final MapBindingSet key = new MapBindingSet();
+                for(final String groupByBar : groupByVars) {
+                    key.addBinding( visBindingSet.getBinding(groupByBar) );
+                }
+
+                results.put(key, visBindingSet);
+            }
+        }
+
+        return Sets.newHashSet( results.values() );
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/c941aea8/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java
index 648c5b9..08bf2e1 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java
@@ -18,44 +18,47 @@
  */
 package org.apache.rya.indexing.pcj.fluo.integration;
 
+import static java.util.Objects.requireNonNull;
 import static org.junit.Assert.assertEquals;
 
 import java.math.BigDecimal;
+import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.rya.api.domain.RyaStatement;
-import org.apache.rya.api.domain.RyaType;
-import org.apache.rya.indexing.pcj.fluo.ITBase;
-import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
-import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
+import javax.xml.datatype.DatatypeFactory;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.junit.Test;
 import org.openrdf.model.Literal;
+import org.openrdf.model.Statement;
 import org.openrdf.model.URI;
 import org.openrdf.model.Value;
 import org.openrdf.model.ValueFactory;
 import org.openrdf.model.datatypes.XMLDatatypeUtil;
 import org.openrdf.model.impl.BooleanLiteralImpl;
-import org.openrdf.model.impl.LiteralImpl;
-import org.openrdf.model.impl.NumericLiteralImpl;
-import org.openrdf.model.impl.URIImpl;
+import org.openrdf.model.impl.ValueFactoryImpl;
 import org.openrdf.model.vocabulary.XMLSchema;
 import org.openrdf.query.BindingSet;
 import org.openrdf.query.algebra.evaluation.ValueExprEvaluationException;
 import org.openrdf.query.algebra.evaluation.function.Function;
 import org.openrdf.query.algebra.evaluation.function.FunctionRegistry;
-import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.query.impl.MapBindingSet;
+import org.openrdf.repository.sail.SailRepositoryConnection;
 
-import com.google.common.base.Optional;
 import com.google.common.collect.Sets;
+
 /**
  * Performs integration tests over the Fluo application geared towards various query structures.
- * <p>
- * These tests are being ignore so that they will not run as unit tests while building the application.
  */
-public class QueryIT extends ITBase {
+public class QueryIT extends RyaExportITBase {
 
     @Test
     public void optionalStatements() throws Exception {
@@ -69,40 +72,35 @@ public class QueryIT extends ITBase {
                     "OPTIONAL {?person <http://passedExam> ?exam } . " +
                 "}";
 
-        // Triples that will be streamed into Fluo after the PCJ has been created.
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(
-                makeRyaStatement("http://Alice", "http://hasDegreeIn", "http://Computer Science"),
-                makeRyaStatement("http://Alice", "http://passedExam", "http://Certified Ethical Hacker"),
-                makeRyaStatement("http://Bob", "http://hasDegreeIn", "http://Law"),
-                makeRyaStatement("http://Bob", "http://passedExam", "http://MBE"),
-                makeRyaStatement("http://Bob", "http://passedExam", "http://BAR-Kansas"),
-                makeRyaStatement("http://Charlie", "http://hasDegreeIn", "http://Law"));
-
-        // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add( makeBindingSet(
-                new BindingImpl("person", new URIImpl("http://Bob")),
-                new BindingImpl("exam", new URIImpl("http://MBE"))));
-        expected.add( makeBindingSet(
-                new BindingImpl("person", new URIImpl("http://Bob")),
-                new BindingImpl("exam", new URIImpl("http://BAR-Kansas"))));
-        expected.add( makeBindingSet(
-                new BindingImpl("person", new URIImpl("http://Charlie"))));
-
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(sparql);
-
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://hasDegreeIn"), vf.createURI("http://Computer Science")),
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://passedExam"), vf.createURI("http://Certified Ethical Hacker")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://hasDegreeIn"), vf.createURI("http://Law")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://passedExam"), vf.createURI("http://MBE")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://passedExam"), vf.createURI("http://BAR-Kansas")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://hasDegreeIn"), vf.createURI("http://Law")));
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("person", vf.createURI("http://Bob"));
+        bs.addBinding("exam", vf.createURI("http://MBE"));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("person", vf.createURI("http://Bob"));
+        bs.addBinding("exam", vf.createURI("http://BAR-Kansas"));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("person", vf.createURI("http://Charlie"));
+        expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected,  results);
+        runTest(sparql, statements, expectedResults);
     }
 
     /**
@@ -124,71 +122,66 @@ public class QueryIT extends ITBase {
                   "?candidate <http://talksTo> ?leader. " +
                 "}";
 
-        // Triples that will be streamed into Fluo after the PCJ has been created.
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
                 // Leaders
-                makeRyaStatement("http://Alice", "http://leaderOf", "http://GeekSquad"),
-                makeRyaStatement("http://Bob", "http://leaderOf", "http://GeekSquad"),
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://leaderOf"), vf.createURI("http://GeekSquad")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://leaderOf"), vf.createURI("http://GeekSquad")),
 
                 // Recruiters
-                makeRyaStatement("http://Charlie", "http://recruiterFor", "http://GeekSquad"),
-                makeRyaStatement("http://David", "http://recruiterFor", "http://GeekSquad"),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://recruiterFor"), vf.createURI("http://GeekSquad")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://recruiterFor"), vf.createURI("http://GeekSquad")),
 
                 // Candidates
-                makeRyaStatement("http://Eve", "http://skilledWith", "http://Computers"),
-                makeRyaStatement("http://Eve", "http://livesIn", "USA"),
-                makeRyaStatement("http://Frank", "http://skilledWith", "http://Computers"),
-                makeRyaStatement("http://Frank", "http://livesIn", "USA"),
-                makeRyaStatement("http://George", "http://skilledWith", "http://Computers"),
-                makeRyaStatement("http://George", "http://livesIn", "Germany"),
-                makeRyaStatement("http://Harry", "http://skilledWith", "http://Negotiating"),
-                makeRyaStatement("http://Harry", "http://livesIn", "USA"),
-                makeRyaStatement("http://Ivan", "http://skilledWith", "http://Computers"),
-                makeRyaStatement("http://Ivan", "http://livesIn", "USA"),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://skilledWith"), vf.createURI("http://Computers")),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://livesIn"), vf.createLiteral("USA")),
+                vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://skilledWith"), vf.createURI("http://Computers")),
+                vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://livesIn"), vf.createLiteral("USA")),
+                vf.createStatement(vf.createURI("http://George"), vf.createURI("http://skilledWith"), vf.createURI("http://Computers")),
+                vf.createStatement(vf.createURI("http://George"), vf.createURI("http://livesIn"), vf.createLiteral("Germany")),
+                vf.createStatement(vf.createURI("http://Harry"), vf.createURI("http://skilledWith"), vf.createURI("http://Negotiating")),
+                vf.createStatement(vf.createURI("http://Harry"), vf.createURI("http://livesIn"), vf.createLiteral("USA")),
+                vf.createStatement(vf.createURI("http://Ivan"), vf.createURI("http://skilledWith"), vf.createURI("http://Computers")),
+                vf.createStatement(vf.createURI("http://Ivan"), vf.createURI("http://livesIn"), vf.createLiteral("USA")),
 
                 // Candidates the recruiters talk to.
-                makeRyaStatement("http://Charlie", "http://talksTo", "http://Eve"),
-                makeRyaStatement("http://Charlie", "http://talksTo", "http://George"),
-                makeRyaStatement("http://Charlie", "http://talksTo", "http://Harry"),
-                makeRyaStatement("http://David", "http://talksTo", "http://Eve"),
-                makeRyaStatement("http://David", "http://talksTo", "http://Frank"),
-                makeRyaStatement("http://David", "http://talksTo", "http://Ivan"),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://talksTo"), vf.createURI("http://George")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://talksTo"), vf.createURI("http://Harry")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://talksTo"), vf.createURI("http://Frank")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://talksTo"), vf.createURI("http://Ivan")),
 
                 // Recruits that talk to leaders.
-                makeRyaStatement("http://Eve", "http://talksTo", "http://Alice"),
-                makeRyaStatement("http://George", "http://talksTo", "http://Alice"),
-                makeRyaStatement("http://Harry", "http://talksTo", "http://Bob"),
-                makeRyaStatement("http://Ivan", "http://talksTo", "http://Bob"));
-
-        // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add( makeBindingSet(
-                new BindingImpl("recruiter", new URIImpl("http://Charlie")),
-                new BindingImpl("candidate", new URIImpl("http://Eve")),
-                new BindingImpl("leader", new URIImpl("http://Alice"))));
-        expected.add( makeBindingSet(
-                new BindingImpl("recruiter", new URIImpl("http://David")),
-                new BindingImpl("candidate", new URIImpl("http://Eve")),
-                new BindingImpl("leader", new URIImpl("http://Alice"))));
-        expected.add( makeBindingSet(
-                new BindingImpl("recruiter", new URIImpl("http://David")),
-                new BindingImpl("candidate", new URIImpl("http://Ivan")),
-                new BindingImpl("leader", new URIImpl("http://Bob"))));
-
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(sparql);
-
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://talksTo"), vf.createURI("http://Alice")),
+                vf.createStatement(vf.createURI("http://George"), vf.createURI("http://talksTo"), vf.createURI("http://Alice")),
+                vf.createStatement(vf.createURI("http://Harry"), vf.createURI("http://talksTo"), vf.createURI("http://Bob")),
+                vf.createStatement(vf.createURI("http://Ivan"), vf.createURI("http://talksTo"), vf.createURI("http://Bob")));
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("recruiter", vf.createURI("http://Charlie"));
+        bs.addBinding("candidate", vf.createURI("http://Eve"));
+        bs.addBinding("leader", vf.createURI("http://Alice"));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("recruiter", vf.createURI("http://David"));
+        bs.addBinding("candidate", vf.createURI("http://Eve"));
+        bs.addBinding("leader", vf.createURI("http://Alice"));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("recruiter", vf.createURI("http://David"));
+        bs.addBinding("candidate", vf.createURI("http://Ivan"));
+        bs.addBinding("leader", vf.createURI("http://Bob"));
+        expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected,  results);
+        runTest(sparql, statements, expectedResults);
     }
 
     @Test
@@ -203,57 +196,52 @@ public class QueryIT extends ITBase {
                   "?worker <http://worksAt> <http://Chipotle>. " +
                 "}";
 
-        // Triples that will be streamed into Fluo after the PCJ has been created.
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(
-                makeRyaStatement("http://Alice", "http://talksTo", "http://Bob"),
-                makeRyaStatement("http://Bob", "http://livesIn", "http://London"),
-                makeRyaStatement("http://Bob", "http://worksAt", "http://Chipotle"),
-
-                makeRyaStatement("http://Alice", "http://talksTo", "http://Charlie"),
-                makeRyaStatement("http://Charlie", "http://livesIn", "http://London"),
-                makeRyaStatement("http://Charlie", "http://worksAt", "http://Chipotle"),
-
-                makeRyaStatement("http://Alice", "http://talksTo", "http://David"),
-                makeRyaStatement("http://David", "http://livesIn", "http://London"),
-                makeRyaStatement("http://David", "http://worksAt", "http://Chipotle"),
-
-                makeRyaStatement("http://Alice", "http://talksTo", "http://Eve"),
-                makeRyaStatement("http://Eve", "http://livesIn", "http://Leeds"),
-                makeRyaStatement("http://Eve", "http://worksAt", "http://Chipotle"),
-
-                makeRyaStatement("http://Frank", "http://talksTo", "http://Alice"),
-                makeRyaStatement("http://Frank", "http://livesIn", "http://London"),
-                makeRyaStatement("http://Frank", "http://worksAt", "http://Chipotle"));
-
-        // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add( makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Bob")),
-                new BindingImpl("city", new URIImpl("http://London"))));
-        expected.add( makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://Charlie")),
-                new BindingImpl("city", new URIImpl("http://London"))));
-        expected.add( makeBindingSet(
-                new BindingImpl("customer", new URIImpl("http://Alice")),
-                new BindingImpl("worker", new URIImpl("http://David")),
-                new BindingImpl("city", new URIImpl("http://London"))));
-
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(sparql);
-
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Bob")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://livesIn"), vf.createURI("http://London")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Charlie")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://livesIn"), vf.createURI("http://London")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://David")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://livesIn"), vf.createURI("http://London")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://talksTo"), vf.createURI("http://Eve")),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://livesIn"), vf.createURI("http://Leeds")),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")),
+
+                vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://talksTo"), vf.createURI("http://Alice")),
+                vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://livesIn"), vf.createURI("http://London")),
+                vf.createStatement(vf.createURI("http://Frank"), vf.createURI("http://worksAt"), vf.createURI("http://Chipotle")));
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("customer", vf.createURI("http://Alice"));
+        bs.addBinding("worker", vf.createURI("http://Bob"));
+        bs.addBinding("city", vf.createURI("http://London"));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("customer", vf.createURI("http://Alice"));
+        bs.addBinding("worker", vf.createURI("http://Charlie"));
+        bs.addBinding("city", vf.createURI("http://London"));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("customer", vf.createURI("http://Alice"));
+        bs.addBinding("worker", vf.createURI("http://David"));
+        bs.addBinding("city", vf.createURI("http://London"));
+        expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected,  results);
+        runTest(sparql, statements, expectedResults);
     }
 
     @Test
@@ -266,69 +254,51 @@ public class QueryIT extends ITBase {
                   "?name <http://playsSport> \"Soccer\" " +
                 "}";
 
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(
-                makeRyaStatement("http://Alice", "http://hasAge", 18),
-                makeRyaStatement("http://Bob", "http://hasAge", 30),
-                makeRyaStatement("http://Charlie", "http://hasAge", 14),
-                makeRyaStatement("http://David", "http://hasAge", 16),
-                makeRyaStatement("http://Eve", "http://hasAge", 35),
-
-                makeRyaStatement("http://Alice", "http://playsSport", "Soccer"),
-                makeRyaStatement("http://Bob", "http://playsSport", "Soccer"),
-                makeRyaStatement("http://Charlie", "http://playsSport", "Basketball"),
-                makeRyaStatement("http://Charlie", "http://playsSport", "Soccer"),
-                makeRyaStatement("http://David", "http://playsSport", "Basketball"));
-
-        // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add( makeBindingSet(
-                new BindingImpl("name", new URIImpl("http://Alice")),
-                new BindingImpl("age", new NumericLiteralImpl(18, XMLSchema.INTEGER))));
-        expected.add( makeBindingSet(
-                new BindingImpl("name", new URIImpl("http://Charlie")),
-                new BindingImpl("age", new NumericLiteralImpl(14, XMLSchema.INTEGER))));
-
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(sparql);
-
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://hasAge"), vf.createLiteral(18)),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://hasAge"), vf.createLiteral(30)),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://hasAge"), vf.createLiteral(14)),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://hasAge"), vf.createLiteral(16)),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://hasAge"), vf.createLiteral(35)),
+
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://playsSport"), vf.createLiteral("Soccer")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://playsSport"), vf.createLiteral("Soccer")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://playsSport"), vf.createLiteral("Basketball")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://playsSport"), vf.createLiteral("Soccer")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://playsSport"), vf.createLiteral("Basketball")));
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("name", vf.createURI("http://Alice"));
+        bs.addBinding("age", vf.createLiteral("18", XMLSchema.INTEGER));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("name", vf.createURI("http://Charlie"));
+        bs.addBinding("age", vf.createLiteral("14", XMLSchema.INTEGER));
+        expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected,  results);
+        runTest(sparql, statements, expectedResults);
     }
-    
+
     @Test
     public void withCustomFilters() throws Exception {
-        final String sparql = "prefix ryafunc: <tag:rya.apache.org,2017:function#> \n" //
-                        + "SELECT ?name ?age \n" //
-                        + "{ \n" //
-                        + "FILTER( ryafunc:isTeen(?age) ) . \n" //
-                        + "?name <http://hasAge> ?age . \n" //
-                        + "?name <http://playsSport> \"Soccer\" \n" //
-                        + "}"; //
-
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(
-                makeRyaStatement("http://Alice", "http://hasAge", 18),
-                makeRyaStatement("http://Bob", "http://hasAge", 30),
-                makeRyaStatement("http://Charlie", "http://hasAge", 14),
-                makeRyaStatement("http://David", "http://hasAge", 16),
-                makeRyaStatement("http://Eve", "http://hasAge", 35),
-
-                makeRyaStatement("http://Alice", "http://playsSport", "Soccer"),
-                makeRyaStatement("http://Bob", "http://playsSport", "Soccer"),
-                makeRyaStatement("http://Charlie", "http://playsSport", "Basketball"),
-                makeRyaStatement("http://Charlie", "http://playsSport", "Soccer"),
-                makeRyaStatement("http://David", "http://playsSport", "Basketball"));
-
-        Function fooFunction = new Function() {
+        final String sparql =
+                "prefix ryafunc: <tag:rya.apache.org,2017:function#> " +
+                "SELECT ?name ?age "  +
+                "{ "  +
+                    "FILTER( ryafunc:isTeen(?age) ) . "  +
+                    "?name <http://hasAge> ?age . "  +
+                    "?name <http://playsSport> \"Soccer\" . "  +
+                "}";
 
+        // Register a custom Filter.
+        final Function fooFunction = new Function() {
             @Override
             public String getURI() {
                 return "tag:rya.apache.org,2017:function#isTeen";
@@ -337,24 +307,22 @@ public class QueryIT extends ITBase {
             final static int TEEN_THRESHOLD = 20;
 
             @Override
-            public Value evaluate(ValueFactory valueFactory, Value... args) throws ValueExprEvaluationException {
-
+            public Value evaluate(final ValueFactory valueFactory, final Value... args) throws ValueExprEvaluationException {
                 if (args.length != 1) {
                     throw new ValueExprEvaluationException("isTeen() requires exactly 1 argument, got " + args.length);
                 }
 
                 if (args[0] instanceof Literal) {
-                    Literal literal = (Literal) args[0];
-
-                    URI datatype = literal.getDatatype();
+                    final Literal literal = (Literal) args[0];
+                    final URI datatype = literal.getDatatype();
 
                     // ABS function accepts only numeric literals
                     if (datatype != null && XMLDatatypeUtil.isNumericDatatype(datatype)) {
                         if (XMLDatatypeUtil.isDecimalDatatype(datatype)) {
-                            BigDecimal bigValue = literal.decimalValue();
+                            final BigDecimal bigValue = literal.decimalValue();
                             return BooleanLiteralImpl.valueOf(bigValue.compareTo(new BigDecimal(TEEN_THRESHOLD)) < 0);
                         } else if (XMLDatatypeUtil.isFloatingPointDatatype(datatype)) {
-                            double doubleValue = literal.doubleValue();
+                            final double doubleValue = literal.doubleValue();
                             return BooleanLiteralImpl.valueOf(doubleValue < TEEN_THRESHOLD);
                         } else {
                             throw new ValueExprEvaluationException("unexpected datatype (expect decimal/int or floating) for function operand: " + args[0]);
@@ -371,82 +339,134 @@ public class QueryIT extends ITBase {
         // Add our new function to the registry
         FunctionRegistry.getInstance().add(fooFunction);
 
-        // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add( makeBindingSet(
-                new BindingImpl("name", new URIImpl("http://Alice")),
-                new BindingImpl("age", new NumericLiteralImpl(18, XMLSchema.INTEGER))));
-        expected.add( makeBindingSet(
-                new BindingImpl("name", new URIImpl("http://Charlie")),
-                new BindingImpl("age", new NumericLiteralImpl(14, XMLSchema.INTEGER))));
-
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(sparql);
-
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String>absent());
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://hasAge"), vf.createLiteral(18)),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://hasAge"), vf.createLiteral(30)),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://hasAge"), vf.createLiteral(14)),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://hasAge"), vf.createLiteral(16)),
+                vf.createStatement(vf.createURI("http://Eve"), vf.createURI("http://hasAge"), vf.createLiteral(35)),
+
+                vf.createStatement(vf.createURI("http://Alice"), vf.createURI("http://playsSport"), vf.createLiteral("Soccer")),
+                vf.createStatement(vf.createURI("http://Bob"), vf.createURI("http://playsSport"), vf.createLiteral("Soccer")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://playsSport"), vf.createLiteral("Basketball")),
+                vf.createStatement(vf.createURI("http://Charlie"), vf.createURI("http://playsSport"), vf.createLiteral("Soccer")),
+                vf.createStatement(vf.createURI("http://David"), vf.createURI("http://playsSport"), vf.createLiteral("Basketball")));
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("name", vf.createURI("http://Alice"));
+        bs.addBinding("age", vf.createLiteral("18", XMLSchema.INTEGER));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("name", vf.createURI("http://Charlie"));
+        bs.addBinding("age", vf.createLiteral("14", XMLSchema.INTEGER));
+        expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, sparql);
-        assertEquals(expected,  results);
+        runTest(sparql, statements, expectedResults);
     }
 
     @Test
     public void withTemporal() throws Exception {
+        // A query that finds all stored data after 3 seconds.
         final String dtPredUri = "http://www.w3.org/2006/time#inXSDDateTime";
         final String dtPred = "<" + dtPredUri + ">";
-        final String xmlDateTime = "http://www.w3.org/2001/XMLSchema#dateTime";
-        // Find all stored dates.
-        String selectQuery = "PREFIX time: <http://www.w3.org/2006/time#> \n"//
-                        + "PREFIX xml: <http://www.w3.org/2001/XMLSchema#> \n" //
-                        + "PREFIX tempo: <tag:rya-rdf.org,2015:temporal#> \n"//
-                        + "SELECT ?event ?time \n" //
-                        + "WHERE { \n" //
-                        + "  ?event " + dtPred + " ?time . \n"//
-                        // + " FILTER(?time > '2000-01-01T01:00:00Z'^^xml:dateTime) \n"// all
-                        // + " FILTER(?time < '2007-01-01T01:01:03-08:00'^^xml:dateTime) \n"// after 2007
-                        + " FILTER(?time > '2001-01-01T01:01:03-08:00'^^xml:dateTime) \n"// after 3 seconds
-                        + "}";//
-
-        // create some resources and literals to make statements out of
-        String eventz = "<http://eventz>";
-        final Set<RyaStatement> streamedTriples = Sets.newHashSet(//
-                        makeRyaStatement(eventz, "http://www.w3.org/1999/02/22-rdf-syntax-ns#type", "<http://www.w3.org/2006/time#Instant>"), //
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T01:01:01-08:00")), // one second
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T04:01:02.000-05:00")), // 2 seconds
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T01:01:03-08:00")), // 3 seconds
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T01:01:04-08:00")), // 4seconds
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2001-01-01T09:01:05Z")), // 5 seconds
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2006-01-01")), //
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2007-01-01")), //
-                        makeRyaStatement(eventz, dtPredUri, new RyaType(new URIImpl(xmlDateTime), "2008-01-01")));
-
-        // The expected results of the SPARQL query once the PCJ has been computed.
-        final Set<BindingSet> expected = new HashSet<>();
-        expected.add(makeBindingSet(new BindingImpl("event", new URIImpl(eventz)), new BindingImpl("time", new LiteralImpl("2001-01-01T09:01:04.000Z", new URIImpl(xmlDateTime))))); //
-        expected.add(makeBindingSet(new BindingImpl("event", new URIImpl(eventz)), new BindingImpl("time", new LiteralImpl("2001-01-01T09:01:05.000Z", new URIImpl(xmlDateTime))))); //
-        expected.add(makeBindingSet(new BindingImpl("event", new URIImpl(eventz)), new BindingImpl("time", new LiteralImpl("2006-01-01T05:00:00.000Z", new URIImpl(xmlDateTime))))); //
-        expected.add(makeBindingSet(new BindingImpl("event", new URIImpl(eventz)), new BindingImpl("time", new LiteralImpl("2007-01-01T05:00:00.000Z", new URIImpl(xmlDateTime))))); //
-        expected.add(makeBindingSet(new BindingImpl("event", new URIImpl(eventz)), new BindingImpl("time", new LiteralImpl("2008-01-01T05:00:00.000Z", new URIImpl(xmlDateTime)))));
-
-        // Create the PCJ table.
-        final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME);
-        final String pcjId = pcjStorage.createPcj(selectQuery);
-
-        // Tell the Fluo app to maintain the PCJ.
-        new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, accumuloConn, RYA_INSTANCE_NAME);
-
-        // Stream the data into Fluo.
-        new InsertTriples().insert(fluoClient, streamedTriples, Optional.<String> absent());
+
+        final String sparql =
+                "PREFIX time: <http://www.w3.org/2006/time#> " +
+                "PREFIX xml: <http://www.w3.org/2001/XMLSchema#> "  +
+                "PREFIX tempo: <tag:rya-rdf.org,2015:temporal#> " +
+                "SELECT ?event ?time "  +
+                "WHERE { "  +
+                    "?event " + dtPred + " ?time . " +
+                    "FILTER(?time > '2001-01-01T01:01:03-08:00'^^xml:dateTime) " +
+                "}";
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final DatatypeFactory dtf = DatatypeFactory.newInstance();
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("http://eventz"), vf.createURI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), vf.createURI("http://www.w3.org/2006/time#Instant")),
+                vf.createStatement(vf.createURI("http://eventz"), vf.createURI(dtPredUri), vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T01:01:01-08:00"))), // 1 second
+                vf.createStatement(vf.createURI("http://eventz"), vf.createURI(dtPredUri), vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T04:01:02.000-05:00"))), // 2 second
+                vf.createStatement(vf.createURI("http://eventz"), vf.createURI(dtPredUri), vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T01:01:03-08:00"))), // 3 seconds
+                vf.createStatement(vf.createURI("http://eventz"), vf.createURI(dtPredUri), vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T01:01:04-08:00"))), // 4 seconds
+                vf.createStatement(vf.createURI("http://eventz"), vf.createURI(dtPredUri), vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T09:01:05Z"))), // 5 seconds
+                vf.createStatement(vf.createURI("http://eventz"), vf.createURI(dtPredUri), vf.createLiteral(dtf.newXMLGregorianCalendar("2006-01-01T05:00:00.000Z"))),
+                vf.createStatement(vf.createURI("http://eventz"), vf.createURI(dtPredUri), vf.createLiteral(dtf.newXMLGregorianCalendar("2007-01-01T05:00:00.000Z"))),
+                vf.createStatement(vf.createURI("http://eventz"), vf.createURI(dtPredUri), vf.createLiteral(dtf.newXMLGregorianCalendar("2008-01-01T05:00:00.000Z"))));
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("event", vf.createURI("http://eventz"));
+        bs.addBinding("time", vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T09:01:04.000Z")));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("event", vf.createURI("http://eventz"));
+        bs.addBinding("time", vf.createLiteral(dtf.newXMLGregorianCalendar("2001-01-01T09:01:05.000Z")));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("event", vf.createURI("http://eventz"));
+        bs.addBinding("time", vf.createLiteral(dtf.newXMLGregorianCalendar("2006-01-01T05:00:00.000Z")));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("event", vf.createURI("http://eventz"));
+        bs.addBinding("time", vf.createLiteral(dtf.newXMLGregorianCalendar("2007-01-01T05:00:00.000Z")));
+        expectedResults.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("event", vf.createURI("http://eventz"));
+        bs.addBinding("time", vf.createLiteral(dtf.newXMLGregorianCalendar("2008-01-01T05:00:00.000Z")));
+        expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        fluo.waitForObservers();
-        final Set<BindingSet> results = getQueryBindingSetValues(fluoClient, selectQuery);
-        assertEquals(expected, results);
+        runTest(sparql, statements, expectedResults);
+    }
+
+    public void runTest(final String sparql, final Collection<Statement> statements, final Collection<BindingSet> expectedResults) throws Exception {
+        requireNonNull(sparql);
+        requireNonNull(statements);
+        requireNonNull(expectedResults);
+
+        // Register the PCJ with Rya.
+        final Instance accInstance = super.getAccumuloConnector().getInstance();
+        final Connector accumuloConn = super.getAccumuloConnector();
+
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(new AccumuloConnectionDetails(
+                ACCUMULO_USER,
+                ACCUMULO_PASSWORD.toCharArray(),
+                accInstance.getInstanceName(),
+                accInstance.getZooKeepers()), accumuloConn);
+
+        ryaClient.getCreatePCJ().createPCJ(RYA_INSTANCE_NAME, sparql);
+
+        // Write the data to Rya.
+        final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
+        ryaConn.begin();
+        ryaConn.add(statements);
+        ryaConn.commit();
+        ryaConn.close();
+
+        // Wait for the Fluo application to finish computing the end result.
+        super.getMiniFluo().waitForObservers();
+
+        // Fetch the value that is stored within the PCJ table.
+        try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, RYA_INSTANCE_NAME)) {
+            final String pcjId = pcjStorage.listPcjs().get(0);
+            final Set<BindingSet> results = Sets.newHashSet( pcjStorage.listResults(pcjId) );
+
+            // Ensure the result of the query matches the expected result.
+            assertEquals(expectedResults, results);
+        }
     }
-}
+}
\ No newline at end of file