You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@rya.apache.org by ca...@apache.org on 2017/08/02 21:01:54 UTC

[1/9] incubator-rya git commit: RYA-280-Periodic Query Service. Closes #177.

Repository: incubator-rya
Updated Branches:
  refs/heads/master ab8035a17 -> 2ca854271


http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/processor/TimestampedNotificationProcessor.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/processor/TimestampedNotificationProcessor.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/processor/TimestampedNotificationProcessor.java
new file mode 100644
index 0000000..baeb611
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/processor/TimestampedNotificationProcessor.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.processor;
+
+import java.util.Optional;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
+import org.apache.rya.periodic.notification.api.BinPruner;
+import org.apache.rya.periodic.notification.api.NodeBin;
+import org.apache.rya.periodic.notification.api.NotificationProcessor;
+import org.apache.rya.periodic.notification.exporter.BindingSetRecord;
+import org.apache.rya.periodic.notification.exporter.KafkaPeriodicBindingSetExporter;
+import org.apache.rya.periodic.notification.notification.TimestampedNotification;
+import org.openrdf.query.BindingSet;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Implementation of {@link NotificationProcessor} that uses the id indicated by
+ * the {@link TimestampedNotification} to obtain results from the
+ * {@link PeriodicQueryResultStorage} layer containing the results of the
+ * Periodic Query. The TimestampedNotificationProcessor then parses the results
+ * and adds them to work queues to be processed by the {@link BinPruner} and the
+ * {@link KafkaPeriodicBindingSetExporter}.
+ *
+ */
+public class TimestampedNotificationProcessor implements NotificationProcessor, Runnable {
+
+    private static final Logger log = Logger.getLogger(TimestampedNotificationProcessor.class);
+    private PeriodicQueryResultStorage periodicStorage;
+    private BlockingQueue<TimestampedNotification> notifications; // notifications
+                                                                  // to process
+    private BlockingQueue<NodeBin> bins; // entries to delete from Fluo
+    private BlockingQueue<BindingSetRecord> bindingSets; // query results to export
+    private AtomicBoolean closed = new AtomicBoolean(false);
+    private int threadNumber;
+    
+
+    public TimestampedNotificationProcessor(PeriodicQueryResultStorage periodicStorage,
+            BlockingQueue<TimestampedNotification> notifications, BlockingQueue<NodeBin> bins, BlockingQueue<BindingSetRecord> bindingSets,
+            int threadNumber) {
+        this.notifications = Preconditions.checkNotNull(notifications);
+        this.bins = Preconditions.checkNotNull(bins);
+        this.bindingSets = Preconditions.checkNotNull(bindingSets);
+        this.periodicStorage = periodicStorage;
+        this.threadNumber = threadNumber;
+    }
+
+    /**
+     * Processes the TimestampNotifications by scanning the PCJ tables for
+     * entries in the bin corresponding to
+     * {@link TimestampedNotification#getTimestamp()} and adding them to the
+     * export BlockingQueue. The TimestampNotification is then used to form a
+     * {@link NodeBin} that is passed to the BinPruner BlockingQueue so that the
+     * bins can be deleted from Fluo and Accumulo.
+     */
+    @Override
+    public void processNotification(TimestampedNotification notification) {
+
+        String id = notification.getId();
+        long ts = notification.getTimestamp().getTime();
+        long period = notification.getPeriod();
+        long bin = getBinFromTimestamp(ts, period);
+        NodeBin nodeBin = new NodeBin(id, bin);
+
+        try (CloseableIterator<BindingSet> iter = periodicStorage.listResults(id, Optional.of(bin));) {
+
+            while(iter.hasNext()) {
+                bindingSets.add(new BindingSetRecord(iter.next(), id));
+            }
+            // add NodeBin to BinPruner queue so that bin can be deleted from
+            // Fluo and Accumulo
+            bins.add(nodeBin);
+        } catch (Exception e) {
+            log.debug("Encountered error: " + e.getMessage() + " while accessing periodic results for bin: " + bin + " for query: " + id);
+        }
+    }
+
+    /**
+     * Computes left bin end point containing event time ts
+     * 
+     * @param ts - event time
+     * @param start - time that periodic event began
+     * @param period - length of period
+     * @return left bin end point containing event time ts
+     */
+    private long getBinFromTimestamp(long ts, long period) {
+        Preconditions.checkArgument(period > 0);
+        return (ts / period) * period;
+    }
+
+    @Override
+    public void run() {
+        try {
+            while(!closed.get()) {
+                processNotification(notifications.take());
+            }
+        } catch (Exception e) {
+            log.trace("Thread_" + threadNumber + " is unable to process next notification.");
+            throw new RuntimeException(e);
+        }
+
+    }
+    
+    public void shutdown() {
+        closed.set(true);
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+  
+
+    public static class Builder {
+
+        private PeriodicQueryResultStorage periodicStorage;
+        private BlockingQueue<TimestampedNotification> notifications; // notifications to process
+        private BlockingQueue<NodeBin> bins; // entries to delete from Fluo
+        private BlockingQueue<BindingSetRecord> bindingSets; // query results to export
+                                                       
+        private int threadNumber;
+
+        /**
+         * Set notification queue
+         * @param notifications - work queue containing notifications to be processed
+         * @return this Builder for chaining method calls
+         */
+        public Builder setNotifications(BlockingQueue<TimestampedNotification> notifications) {
+            this.notifications = notifications;
+            return this;
+        }
+
+        /**
+         * Set nodeBin queue
+         * @param bins - work queue containing NodeBins to be pruned
+         * @return this Builder for chaining method calls
+         */
+        public Builder setBins(BlockingQueue<NodeBin> bins) {
+            this.bins = bins;
+            return this;
+        }
+
+        /**
+         * Set BindingSet queue
+         * @param bindingSets - work queue containing BindingSets to be exported
+         * @return this Builder for chaining method calls
+         */
+        public Builder setBindingSets(BlockingQueue<BindingSetRecord> bindingSets) {
+            this.bindingSets = bindingSets;
+            return this;
+        }
+
+        /**
+         * Sets the number of threads used by this processor
+         * @param threadNumber - number of threads used by this processor
+         * @return - number of threads used by this processor
+         */
+        public Builder setThreadNumber(int threadNumber) {
+            this.threadNumber = threadNumber;
+            return this;
+        }
+        
+        /**
+         * Set the PeriodicStorage layer
+         * @param periodicStorage - periodic storage layer that periodic results are read from
+         * @return - this Builder for chaining method calls
+         */
+        public Builder setPeriodicStorage(PeriodicQueryResultStorage periodicStorage) {
+            this.periodicStorage = periodicStorage;
+            return this;
+        }
+
+        /**
+         * Builds a TimestampedNotificationProcessor
+         * @return - TimestampedNotificationProcessor built from arguments passed to this Builder
+         */
+        public TimestampedNotificationProcessor build() {
+            return new TimestampedNotificationProcessor(periodicStorage, notifications, bins, bindingSets, threadNumber);
+        }
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/AccumuloBinPruner.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/AccumuloBinPruner.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/AccumuloBinPruner.java
new file mode 100644
index 0000000..4dac64c
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/AccumuloBinPruner.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.pruner;
+
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryStorageException;
+import org.apache.rya.periodic.notification.api.BinPruner;
+import org.apache.rya.periodic.notification.api.NodeBin;
+
+import jline.internal.Preconditions;
+
+/**
+ * Deletes BindingSets from time bins in the indicated PCJ table
+ */
+public class AccumuloBinPruner implements BinPruner {
+
+    private static final Logger log = Logger.getLogger(AccumuloBinPruner.class);
+    private PeriodicQueryResultStorage periodicStorage;
+
+    public AccumuloBinPruner(PeriodicQueryResultStorage periodicStorage) {
+        Preconditions.checkNotNull(periodicStorage);
+        this.periodicStorage = periodicStorage;
+    }
+
+    /**
+     * This method deletes all BindingSets in the indicated bin from the PCJ
+     * table indicated by the id. It is assumed that all BindingSet entries for
+     * the corresponding bin are written to the PCJ table so that the bin Id
+     * occurs first.
+     * 
+     * @param id
+     *            - pcj table id
+     * @param bin
+     *            - temporal bin the BindingSets are contained in
+     */
+    @Override
+    public void pruneBindingSetBin(NodeBin nodeBin) {
+        Preconditions.checkNotNull(nodeBin);
+        String id = nodeBin.getNodeId();
+        long bin = nodeBin.getBin();
+        try {
+            periodicStorage.deletePeriodicQueryResults(id, bin);
+        } catch (PeriodicQueryStorageException e) {
+            log.trace("Unable to delete results from Peroidic Table: " + id + " for bin: " + bin);
+            throw new RuntimeException(e);
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/FluoBinPruner.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/FluoBinPruner.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/FluoBinPruner.java
new file mode 100644
index 0000000..bee9c02
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/FluoBinPruner.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.pruner;
+
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.Transaction;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.Span;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformationDAO;
+import org.apache.rya.indexing.pcj.fluo.app.batch.SpanBatchDeleteInformation;
+import org.apache.rya.periodic.notification.api.BinPruner;
+import org.apache.rya.periodic.notification.api.NodeBin;
+
+import com.google.common.base.Optional;
+
+/**
+ * Deletes {@link BindingSet}s from the indicated Fluo table.
+ */
+public class FluoBinPruner implements BinPruner {
+
+    private static final Logger log = Logger.getLogger(FluoBinPruner.class);
+    private FluoClient client;
+
+    public FluoBinPruner(FluoClient client) {
+        this.client = client;
+    }
+
+    /**
+     * This method deletes BindingSets in the specified bin from the BindingSet
+     * Column of the indicated Fluo nodeId
+     * 
+     * @param id
+     *            - Fluo nodeId
+     * @param bin
+     *            - bin id
+     */
+    @Override
+    public void pruneBindingSetBin(NodeBin nodeBin) {
+        String id = nodeBin.getNodeId();
+        long bin = nodeBin.getBin();
+        try (Transaction tx = client.newTransaction()) {
+            Optional<NodeType> type = NodeType.fromNodeId(id);
+            if (!type.isPresent()) {
+                log.trace("Unable to determine NodeType from id: " + id);
+                throw new RuntimeException();
+            }
+            Column batchInfoColumn = type.get().getResultColumn();
+            String batchInfoSpanPrefix = id + IncrementalUpdateConstants.NODEID_BS_DELIM + bin;
+            SpanBatchDeleteInformation batchInfo = SpanBatchDeleteInformation.builder().setColumn(batchInfoColumn)
+                    .setSpan(Span.prefix(Bytes.of(batchInfoSpanPrefix))).build();
+            BatchInformationDAO.addBatch(tx, id, batchInfo);
+            tx.commit();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/PeriodicQueryPruner.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/PeriodicQueryPruner.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/PeriodicQueryPruner.java
new file mode 100644
index 0000000..97e3f22
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/PeriodicQueryPruner.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.pruner;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.Snapshot;
+import org.apache.fluo.api.client.SnapshotBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.util.PeriodicQueryUtil;
+import org.apache.rya.periodic.notification.api.BinPruner;
+import org.apache.rya.periodic.notification.api.NodeBin;
+
+import jline.internal.Preconditions;
+
+/**
+ * Implementation of {@link BinPruner} that deletes old, already processed
+ * Periodic Query results from Fluo and the PCJ table to which the Fluo results
+ * are exported.
+ *
+ */
+public class PeriodicQueryPruner implements BinPruner, Runnable {
+
+    private static final Logger log = Logger.getLogger(PeriodicQueryPruner.class);
+    private FluoClient client;
+    private AccumuloBinPruner accPruner;
+    private FluoBinPruner fluoPruner;
+    private BlockingQueue<NodeBin> bins;
+    private AtomicBoolean closed = new AtomicBoolean(false);
+    private int threadNumber;
+
+    public PeriodicQueryPruner(FluoBinPruner fluoPruner, AccumuloBinPruner accPruner, FluoClient client, BlockingQueue<NodeBin> bins, int threadNumber) {
+        this.fluoPruner = Preconditions.checkNotNull(fluoPruner);
+        this.accPruner = Preconditions.checkNotNull(accPruner);
+        this.client = Preconditions.checkNotNull(client);
+        this.bins = Preconditions.checkNotNull(bins);
+        this.threadNumber = threadNumber;
+    }
+    
+    @Override
+    public void run() {
+        try {
+            while (!closed.get()) {
+                pruneBindingSetBin(bins.take());
+            }
+        } catch (InterruptedException e) {
+            log.trace("Thread " + threadNumber + " is unable to prune the next message.");
+            throw new RuntimeException(e);
+        }
+    }
+    
+    /**
+     * Prunes BindingSet bins from the Rya Fluo Application in addition to the BindingSet
+     * bins created in the PCJ tables associated with the give query id.
+     * @param id - QueryResult Id for the Rya Fluo application 
+     * @param bin - bin id for bins to be deleted
+     */
+    @Override
+    public void pruneBindingSetBin(NodeBin nodeBin) {
+        String id = nodeBin.getNodeId();
+        long bin = nodeBin.getBin();
+        try(Snapshot sx = client.newSnapshot()) {
+            String queryId = sx.get(Bytes.of(id), FluoQueryColumns.PCJ_ID_QUERY_ID).toString();
+            Set<String> fluoIds = getNodeIdsFromResultId(sx, queryId);
+            accPruner.pruneBindingSetBin(new NodeBin(id, bin));
+            for(String fluoId: fluoIds) {
+                fluoPruner.pruneBindingSetBin(new NodeBin(fluoId, bin));
+            }
+        } catch (Exception e) {
+            log.trace("Could not successfully initialize PeriodicQueryBinPruner.");
+        }
+    }
+    
+    
+    public void shutdown() {
+        closed.set(true);
+    }
+
+    private Set<String> getNodeIdsFromResultId(SnapshotBase sx, String id) {
+        Set<String> ids = new HashSet<>();
+        PeriodicQueryUtil.getPeriodicQueryNodeAncestorIds(sx, id, ids);
+        return ids;
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/PeriodicQueryPrunerExecutor.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/PeriodicQueryPrunerExecutor.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/PeriodicQueryPrunerExecutor.java
new file mode 100644
index 0000000..1c11f96
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/pruner/PeriodicQueryPrunerExecutor.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.pruner;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.periodic.notification.api.LifeCycle;
+import org.apache.rya.periodic.notification.api.NodeBin;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Executor service that runs {@link PeriodicQueryPruner}s with added functionality
+ * for starting, stopping, and determining if the query pruners are running.
+ */
+public class PeriodicQueryPrunerExecutor implements LifeCycle {
+
+    private static final Logger log = Logger.getLogger(PeriodicQueryPrunerExecutor.class);
+    private FluoClient client;
+    private int numThreads;
+    private ExecutorService executor;
+    private BlockingQueue<NodeBin> bins;
+    private PeriodicQueryResultStorage periodicStorage;
+    private List<PeriodicQueryPruner> pruners;
+    private boolean running = false;
+
+    public PeriodicQueryPrunerExecutor(PeriodicQueryResultStorage periodicStorage, FluoClient client, int numThreads,
+            BlockingQueue<NodeBin> bins) {
+        Preconditions.checkArgument(numThreads > 0);
+        this.periodicStorage = periodicStorage;
+        this.numThreads = numThreads;
+        executor = Executors.newFixedThreadPool(numThreads);
+        this.bins = bins;
+        this.client = client;
+        this.pruners = new ArrayList<>();
+    }
+
+    @Override
+    public void start() {
+        if (!running) {
+            AccumuloBinPruner accPruner = new AccumuloBinPruner(periodicStorage);
+            FluoBinPruner fluoPruner = new FluoBinPruner(client);
+
+            for (int threadNumber = 0; threadNumber < numThreads; threadNumber++) {
+                PeriodicQueryPruner pruner = new PeriodicQueryPruner(fluoPruner, accPruner, client, bins, threadNumber);
+                pruners.add(pruner);
+                executor.submit(pruner);
+            }
+            running = true;
+        }
+    }
+
+    @Override
+    public void stop() {
+        if (pruners != null && pruners.size() > 0) {
+            pruners.forEach(x -> x.shutdown());
+        }
+        if(client != null) {
+            client.close();
+        }
+        if (executor != null) {
+            executor.shutdown();
+            running = false;
+        }
+        try {
+            if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
+                log.info("Timed out waiting for consumer threads to shut down, exiting uncleanly");
+                executor.shutdownNow();
+            }
+        } catch (InterruptedException e) {
+            log.info("Interrupted during shutdown, exiting uncleanly");
+        }
+    }
+
+    @Override
+    public boolean currentlyRunning() {
+        return running;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/recovery/PeriodicNotificationProvider.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/recovery/PeriodicNotificationProvider.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/recovery/PeriodicNotificationProvider.java
new file mode 100644
index 0000000..8e8b1a2
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/recovery/PeriodicNotificationProvider.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.recovery;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.fluo.api.client.Snapshot;
+import org.apache.fluo.api.client.scanner.ColumnScanner;
+import org.apache.fluo.api.client.scanner.RowScanner;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.ColumnValue;
+import org.apache.fluo.api.data.Span;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
+import org.apache.rya.indexing.pcj.fluo.app.query.PeriodicQueryMetadata;
+import org.apache.rya.periodic.notification.api.NotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.coordinator.PeriodicNotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+import org.apache.rya.periodic.notification.notification.CommandNotification.Command;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+
+/**
+ * This class is used by the {@link PeriodicNotificationCoordinatorExecutor}
+ * to add all existing {@link PeriodicNotification}s stored in Fluo when it is
+ * initialized.  This enables the the {@link PeriodicServiceApplication} to be 
+ * recovered from failure by restoring it original state.
+ *
+ */
+public class PeriodicNotificationProvider {
+
+    private FluoQueryMetadataDAO dao;
+    
+    public PeriodicNotificationProvider() {
+        this.dao = new FluoQueryMetadataDAO();
+    }
+    
+    /**
+     * Retrieve all of the information about Periodic Query results already registered
+     * with Fluo.  This is returned in the form of {@link CommandNotification}s that
+     * can be registered with the {@link NotificationCoordinatorExecutor}.
+     * @param sx - snapshot for reading results from Fluo
+     * @return - collection of CommandNotifications that indicate Periodic Query information registered with system
+     */
+    public Collection<CommandNotification> getNotifications(Snapshot sx) {
+        Set<PeriodicQueryMetadata> periodicMetadata = new HashSet<>();
+        RowScanner scanner = sx.scanner().fetch(FluoQueryColumns.PERIODIC_QUERY_NODE_ID)
+                .over(Span.prefix(IncrementalUpdateConstants.PERIODIC_QUERY_PREFIX)).byRow().build();
+        Iterator<ColumnScanner> colScannerIter = scanner.iterator();
+        while (colScannerIter.hasNext()) {
+            ColumnScanner colScanner = colScannerIter.next();
+            Iterator<ColumnValue> values = colScanner.iterator();
+            while (values.hasNext()) {
+                PeriodicQueryMetadata metadata = dao.readPeriodicQueryMetadata(sx, values.next().getsValue());
+                periodicMetadata.add(metadata);
+            }
+        }
+        return getCommandNotifications(sx, periodicMetadata);
+    }
+    
+    /**
+     * Registers all of Periodic Query information already contained within Fluo to the 
+     * {@link NotificationCoordinatorExecutor}.
+     * @param coordinator - coordinator that periodic info will be registered with
+     * @param sx - snapshot for reading results from Fluo
+     */
+    public void processRegisteredNotifications(NotificationCoordinatorExecutor coordinator, Snapshot sx) {
+        coordinator.start();
+        Collection<CommandNotification> notifications = getNotifications(sx);
+        for(CommandNotification notification: notifications) {
+            coordinator.processNextCommandNotification(notification);
+        }
+    }
+    
+    private Collection<CommandNotification> getCommandNotifications(Snapshot sx, Collection<PeriodicQueryMetadata> metadata) {
+        Set<CommandNotification> notifications = new HashSet<>();
+        int i = 1;
+        for(PeriodicQueryMetadata meta:metadata) {
+            //offset initial wait to avoid overloading system
+            PeriodicNotification periodic = new PeriodicNotification(getQueryId(meta.getNodeId(), sx), meta.getPeriod(),TimeUnit.MILLISECONDS,i*5000);
+            notifications.add(new CommandNotification(Command.ADD, periodic));
+            i++;
+        }
+        return notifications;
+    }
+    
+    private String getQueryId(String periodicNodeId, Snapshot sx) {
+        return getQueryIdFromPeriodicId(sx, periodicNodeId);
+    }
+    
+    private String getQueryIdFromPeriodicId(Snapshot sx, String nodeId) {
+        NodeType nodeType = NodeType.fromNodeId(nodeId).orNull();
+        String id = null;
+        switch (nodeType) {
+        case FILTER:
+            id = getQueryIdFromPeriodicId(sx, sx.get(Bytes.of(nodeId), FluoQueryColumns.FILTER_PARENT_NODE_ID).toString());
+            break;
+        case PERIODIC_QUERY:
+            id = getQueryIdFromPeriodicId(sx, sx.get(Bytes.of(nodeId), FluoQueryColumns.PERIODIC_QUERY_PARENT_NODE_ID).toString());
+            break;
+        case QUERY:
+            id = sx.get(Bytes.of(nodeId), FluoQueryColumns.RYA_PCJ_ID).toString();
+            break;
+        case AGGREGATION: 
+            id = getQueryIdFromPeriodicId(sx, sx.get(Bytes.of(nodeId), FluoQueryColumns.AGGREGATION_PARENT_NODE_ID).toString());
+            break;
+        case CONSTRUCT:
+            id = sx.get(Bytes.of(nodeId), FluoQueryColumns.CONSTRUCT_NODE_ID).toString();
+            id = id.split(IncrementalUpdateConstants.CONSTRUCT_PREFIX)[1];
+            break;
+        default:
+            throw new RuntimeException("Invalid NodeType.");
+        }
+        return id;
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/KafkaNotificationProvider.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/KafkaNotificationProvider.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/KafkaNotificationProvider.java
new file mode 100644
index 0000000..f5cd13a
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/KafkaNotificationProvider.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.registration.kafka;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.serialization.Deserializer;
+import org.apache.rya.periodic.notification.api.LifeCycle;
+import org.apache.rya.periodic.notification.api.Notification;
+import org.apache.rya.periodic.notification.api.NotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Consumer group to pull all requests for adding and deleting {@link Notification}s
+ * from Kafka.  This Object executes {@link PeriodicNotificationConsumer}s that retrieve
+ * the {@link CommandNotification}s and register them with the {@link NotificationCoordinatorExecutor}.
+ *
+ */
+public class KafkaNotificationProvider implements LifeCycle {
+    private static final Logger LOG = LoggerFactory.getLogger(KafkaNotificationProvider.class);
+    private String topic;
+    private ExecutorService executor;
+    private NotificationCoordinatorExecutor coord;
+    private Properties props;
+    private int numThreads;
+    private boolean running = false;
+    Deserializer<String> keyDe;
+    Deserializer<CommandNotification> valDe;
+    List<PeriodicNotificationConsumer> consumers;
+
+    /**
+     * Create KafkaNotificationProvider for reading new notification requests form Kafka
+     * @param topic - notification topic    
+     * @param keyDe - Kafka message key deserializer
+     * @param valDe - Kafka message value deserializer
+     * @param props - properties used to creates a {@link KafkaConsumer}
+     * @param coord - {@link NotificationCoordinatorExecutor} for managing and generating notifications
+     * @param numThreads - number of threads used by this notification provider
+     */
+    public KafkaNotificationProvider(String topic, Deserializer<String> keyDe, Deserializer<CommandNotification> valDe, Properties props,
+            NotificationCoordinatorExecutor coord, int numThreads) {
+        this.coord = coord;
+        this.numThreads = numThreads;
+        this.topic = topic;
+        this.props = props;
+        this.consumers = new ArrayList<>();
+        this.keyDe = keyDe;
+        this.valDe = valDe;
+    }
+
+    @Override
+    public void stop() {
+        if (consumers != null && consumers.size() > 0) {
+            for (PeriodicNotificationConsumer consumer : consumers) {
+                consumer.shutdown();
+            }
+        }
+        if (executor != null) {
+            executor.shutdown();
+        }
+        running = false;
+        try {
+            if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
+                LOG.info("Timed out waiting for consumer threads to shut down, exiting uncleanly");
+                executor.shutdownNow();
+            }
+        } catch (InterruptedException e) {
+            LOG.info("Interrupted during shutdown, exiting uncleanly");
+        }
+    }
+
+    public void start() {
+        if (!running) {
+            if (!coord.currentlyRunning()) {
+                coord.start();
+            }
+            // now launch all the threads
+            executor = Executors.newFixedThreadPool(numThreads);
+
+            // now create consumers to consume the messages
+            int threadNumber = 0;
+            for (int i = 0; i < numThreads; i++) {
+                LOG.info("Creating consumer:" + threadNumber);
+                KafkaConsumer<String, CommandNotification> consumer = new KafkaConsumer<String, CommandNotification>(props, keyDe, valDe);
+                PeriodicNotificationConsumer periodicConsumer = new PeriodicNotificationConsumer(topic, consumer, threadNumber, coord);
+                consumers.add(periodicConsumer);
+                executor.submit(periodicConsumer);
+                threadNumber++;
+            }
+            running = true;
+        }
+    }
+
+    @Override
+    public boolean currentlyRunning() {
+        return running;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/KafkaNotificationRegistrationClient.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/KafkaNotificationRegistrationClient.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/KafkaNotificationRegistrationClient.java
new file mode 100644
index 0000000..ec94bb7
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/KafkaNotificationRegistrationClient.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.registration.kafka;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.rya.periodic.notification.api.Notification;
+import org.apache.rya.periodic.notification.api.PeriodicNotificationClient;
+import org.apache.rya.periodic.notification.notification.BasicNotification;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+import org.apache.rya.periodic.notification.notification.CommandNotification.Command;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+
+/**
+ *  Implementation of {@link PeriodicNotificaitonClient} used to register new notification
+ *  requests with the PeriodicQueryService. 
+ *
+ */
+public class KafkaNotificationRegistrationClient implements PeriodicNotificationClient {
+
+    private KafkaProducer<String, CommandNotification> producer;
+    private String topic;
+    
+    public KafkaNotificationRegistrationClient(String topic, KafkaProducer<String, CommandNotification> producer) {
+        this.topic = topic;
+        this.producer = producer;
+    }
+    
+    @Override
+    public void addNotification(PeriodicNotification notification) {
+        processNotification(new CommandNotification(Command.ADD, notification));
+
+    }
+
+    @Override
+    public void deleteNotification(BasicNotification notification) {
+        processNotification(new CommandNotification(Command.DELETE, notification));
+    }
+
+    @Override
+    public void deleteNotification(String notificationId) {
+        processNotification(new CommandNotification(Command.DELETE, new BasicNotification(notificationId)));
+    }
+
+    @Override
+    public void addNotification(String id, long period, long delay, TimeUnit unit) {
+        Notification notification = PeriodicNotification.builder().id(id).period(period).initialDelay(delay).timeUnit(unit).build();
+        processNotification(new CommandNotification(Command.ADD, notification));
+    }
+    
+   
+    private void processNotification(CommandNotification notification) {
+        producer.send(new ProducerRecord<String, CommandNotification>(topic, notification.getId(), notification));
+    }
+    
+    @Override
+    public void close() {
+        producer.close();
+    }
+    
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/PeriodicNotificationConsumer.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/PeriodicNotificationConsumer.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/PeriodicNotificationConsumer.java
new file mode 100644
index 0000000..6785ce8
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/registration/kafka/PeriodicNotificationConsumer.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.registration.kafka;
+
+import java.util.Arrays;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.errors.WakeupException;
+import org.apache.log4j.Logger;
+import org.apache.rya.periodic.notification.api.NotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+
+/**
+ * Consumer for the {@link KafkaNotificationProvider}.  This consumer pull messages
+ * from Kafka and registers them with the {@link NotificationCoordinatorExecutor}.
+ *
+ */
+public class PeriodicNotificationConsumer implements Runnable {
+    private KafkaConsumer<String, CommandNotification> consumer;
+    private int m_threadNumber;
+    private String topic;
+    private final AtomicBoolean closed = new AtomicBoolean(false);
+    private NotificationCoordinatorExecutor coord;
+    private static final Logger LOG = Logger.getLogger(PeriodicNotificationConsumer.class);
+
+    /**
+     * Creates a new PeriodicNotificationConsumer for consuming new notification requests from
+     * Kafka.
+     * @param topic - new notification topic
+     * @param consumer - consumer for pulling new requests from Kafka
+     * @param a_threadNumber - number of consumer threads to be used
+     * @param coord - notification coordinator for managing and generating notifications
+     */
+    public PeriodicNotificationConsumer(String topic, KafkaConsumer<String, CommandNotification> consumer, int a_threadNumber,
+            NotificationCoordinatorExecutor coord) {
+        this.topic = topic;
+        m_threadNumber = a_threadNumber;
+        this.consumer = consumer;
+        this.coord = coord;
+    }
+
+    public void run() {
+        
+        try {
+            LOG.info("Creating kafka stream for consumer:" + m_threadNumber);
+            consumer.subscribe(Arrays.asList(topic));
+            while (!closed.get()) {
+                ConsumerRecords<String, CommandNotification> records = consumer.poll(10000);
+                // Handle new records
+                for(ConsumerRecord<String, CommandNotification> record: records) {
+                    CommandNotification notification = record.value();
+                    LOG.info("Thread " + m_threadNumber + " is adding notification " + notification + " to queue.");
+                    LOG.info("Message: " + notification);
+                    coord.processNextCommandNotification(notification);
+                }
+            }
+        } catch (WakeupException e) {
+            // Ignore exception if closing
+            if (!closed.get()) throw e;
+        } finally {
+            consumer.close();
+        }
+    }
+    
+    public void shutdown() {
+        closed.set(true);
+        consumer.wakeup();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/BasicNotificationTypeAdapter.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/BasicNotificationTypeAdapter.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/BasicNotificationTypeAdapter.java
new file mode 100644
index 0000000..bd29d29
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/BasicNotificationTypeAdapter.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.serialization;
+
+import java.lang.reflect.Type;
+
+import org.apache.rya.periodic.notification.notification.BasicNotification;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+/**
+ * {@link TypeAdapter} for {@link BasicNotification}s.  Used in {@link CommandNotificationTypeAdapter} to
+ * serialize {@link CommandNotification}s.  
+ *
+ */
+public class BasicNotificationTypeAdapter implements JsonDeserializer<BasicNotification>, JsonSerializer<BasicNotification> {
+
+    @Override
+    public JsonElement serialize(BasicNotification arg0, Type arg1, JsonSerializationContext arg2) {
+        JsonObject result = new JsonObject();
+        result.add("id", new JsonPrimitive(arg0.getId()));
+        return result;
+    }
+
+    @Override
+    public BasicNotification deserialize(JsonElement arg0, Type arg1, JsonDeserializationContext arg2) throws JsonParseException {
+        JsonObject json = arg0.getAsJsonObject();
+        String id = json.get("id").getAsString();
+        return new BasicNotification(id);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/BindingSetSerDe.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/BindingSetSerDe.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/BindingSetSerDe.java
new file mode 100644
index 0000000..50180ad
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/BindingSetSerDe.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.serialization;
+
+import java.io.UnsupportedEncodingException;
+import java.util.Arrays;
+import java.util.Map;
+
+import org.apache.kafka.common.serialization.Deserializer;
+import org.apache.kafka.common.serialization.Serializer;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjSerializer;
+import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+
+import com.google.common.base.Joiner;
+import com.google.common.primitives.Bytes;
+
+/**
+ * Kafka {@link Serializer} and {@link Deserializer} for producing and consuming messages
+ * from Kafka.
+ *
+ */
+public class BindingSetSerDe implements Serializer<BindingSet>, Deserializer<BindingSet> {
+
+    private static final Logger log = Logger.getLogger(BindingSetSerDe.class);
+    private static final AccumuloPcjSerializer serializer =  new AccumuloPcjSerializer();
+    private static final byte[] DELIM_BYTE = "\u0002".getBytes();
+    
+    private byte[] toBytes(BindingSet bindingSet) {
+        try {
+            return getBytes(getVarOrder(bindingSet), bindingSet);
+        } catch(Exception e) {
+            log.trace("Unable to serialize BindingSet: " + bindingSet);
+            return new byte[0];
+        }
+    }
+
+    private BindingSet fromBytes(byte[] bsBytes) {
+        try{
+        int firstIndex = Bytes.indexOf(bsBytes, DELIM_BYTE);
+        byte[] varOrderBytes = Arrays.copyOf(bsBytes, firstIndex);
+        byte[] bsBytesNoVarOrder = Arrays.copyOfRange(bsBytes, firstIndex + 1, bsBytes.length);
+        VariableOrder varOrder = new VariableOrder(new String(varOrderBytes,"UTF-8").split(";"));
+        return getBindingSet(varOrder, bsBytesNoVarOrder);
+        } catch(Exception e) {
+            log.trace("Unable to deserialize BindingSet: " + bsBytes);
+            return new QueryBindingSet();
+        }
+    }
+    
+    private VariableOrder getVarOrder(BindingSet bs) {
+        return new VariableOrder(bs.getBindingNames());
+    }
+    
+    private byte[] getBytes(VariableOrder varOrder, BindingSet bs) throws UnsupportedEncodingException, BindingSetConversionException {
+        byte[] bsBytes = serializer.convert(bs, varOrder);
+        String varOrderString = Joiner.on(";").join(varOrder.getVariableOrders());
+        byte[] varOrderBytes = varOrderString.getBytes("UTF-8");
+        return Bytes.concat(varOrderBytes, DELIM_BYTE, bsBytes);
+    }
+    
+    private BindingSet getBindingSet(VariableOrder varOrder, byte[] bsBytes) throws BindingSetConversionException {
+        return serializer.convert(bsBytes, varOrder);
+    }
+
+    @Override
+    public BindingSet deserialize(String topic, byte[] bytes) {
+        return fromBytes(bytes);
+    }
+
+    @Override
+    public void close() {
+        // Do nothing. Nothing to close.
+    }
+
+    @Override
+    public void configure(Map<String, ?> arg0, boolean arg1) {
+        // Do nothing.  Nothing to configure.
+    }
+
+    @Override
+    public byte[] serialize(String topic, BindingSet bs) {
+        return toBytes(bs);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/CommandNotificationSerializer.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/CommandNotificationSerializer.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/CommandNotificationSerializer.java
new file mode 100644
index 0000000..302e1be
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/CommandNotificationSerializer.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.serialization;
+
+import java.io.UnsupportedEncodingException;
+import java.util.Map;
+
+import org.apache.kafka.common.serialization.Deserializer;
+import org.apache.kafka.common.serialization.Serializer;
+import org.apache.rya.periodic.notification.api.Notification;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+/**
+ * Kafka {@link Serializer} and {@link Deserializer} for producing and consuming {@link CommandNotification}s
+ * to and from Kafka.
+ *
+ */
+public class CommandNotificationSerializer implements Serializer<CommandNotification>, Deserializer<CommandNotification> {
+
+    private static Gson gson = new GsonBuilder()
+            .registerTypeHierarchyAdapter(Notification.class, new CommandNotificationTypeAdapter()).create();
+    private static final Logger LOG = LoggerFactory.getLogger(CommandNotificationSerializer.class);
+
+    @Override
+    public CommandNotification deserialize(String topic, byte[] bytes) {
+        String json = null;
+        try {
+            json = new String(bytes, "UTF-8");
+        } catch (UnsupportedEncodingException e) {
+            LOG.info("Unable to deserialize notification for topic: " + topic);
+        }
+        return gson.fromJson(json, CommandNotification.class);
+    }
+
+    @Override
+    public byte[] serialize(String topic, CommandNotification command) {
+        try {
+            return gson.toJson(command).getBytes("UTF-8");
+        } catch (UnsupportedEncodingException e) {
+            LOG.info("Unable to serialize notification: " + command  + "for topic: " + topic);
+            throw new RuntimeException(e);
+        }
+    }
+
+    @Override
+    public void close() {
+        // Do nothing. Nothing to close
+    }
+    
+    @Override
+    public void configure(Map<String, ?> arg0, boolean arg1) {
+        // Do nothing. Nothing to configure
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/CommandNotificationTypeAdapter.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/CommandNotificationTypeAdapter.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/CommandNotificationTypeAdapter.java
new file mode 100644
index 0000000..a9fb7e1
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/CommandNotificationTypeAdapter.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.serialization;
+
+import java.lang.reflect.Type;
+
+import org.apache.rya.periodic.notification.api.Notification;
+import org.apache.rya.periodic.notification.notification.BasicNotification;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+import org.apache.rya.periodic.notification.notification.CommandNotification.Command;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+/**
+ * {@link TypeAdapter} used to serialize and deserialize {@link CommandNotification}s.
+ * This TypeAdapter is used in {@link CommandNotificationSerializer} for producing and 
+ * consuming messages to and from Kafka.
+ *
+ */
+public class CommandNotificationTypeAdapter
+        implements JsonDeserializer<CommandNotification>, JsonSerializer<CommandNotification> {
+
+    @Override
+    public JsonElement serialize(CommandNotification arg0, Type arg1, JsonSerializationContext arg2) {
+        JsonObject result = new JsonObject();
+        result.add("command", new JsonPrimitive(arg0.getCommand().name()));
+        Notification notification = arg0.getNotification();
+        if (notification instanceof PeriodicNotification) {
+            result.add("type", new JsonPrimitive(PeriodicNotification.class.getSimpleName()));
+            PeriodicNotificationTypeAdapter adapter = new PeriodicNotificationTypeAdapter();
+            result.add("notification",
+                    adapter.serialize((PeriodicNotification) notification, PeriodicNotification.class, arg2));
+        } else if (notification instanceof BasicNotification) {
+            result.add("type", new JsonPrimitive(BasicNotification.class.getSimpleName()));
+            BasicNotificationTypeAdapter adapter = new BasicNotificationTypeAdapter();
+            result.add("notification",
+                    adapter.serialize((BasicNotification) notification, BasicNotification.class, arg2));
+        } else {
+            throw new IllegalArgumentException("Invalid notification type.");
+        }
+        return result;
+    }
+
+    @Override
+    public CommandNotification deserialize(JsonElement arg0, Type arg1, JsonDeserializationContext arg2)
+            throws JsonParseException {
+
+        JsonObject json = arg0.getAsJsonObject();
+        Command command = Command.valueOf(json.get("command").getAsString());
+        String type = json.get("type").getAsString();
+        Notification notification = null;
+        if (type.equals(PeriodicNotification.class.getSimpleName())) {
+            notification = (new PeriodicNotificationTypeAdapter()).deserialize(json.get("notification"),
+                    PeriodicNotification.class, arg2);
+        } else if (type.equals(BasicNotification.class.getSimpleName())) {
+            notification = (new BasicNotificationTypeAdapter()).deserialize(json.get("notification"),
+                    BasicNotification.class, arg2);
+        } else {
+            throw new JsonParseException("Cannot deserialize Json");
+        }
+
+        return new CommandNotification(command, notification);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/PeriodicNotificationTypeAdapter.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/PeriodicNotificationTypeAdapter.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/PeriodicNotificationTypeAdapter.java
new file mode 100644
index 0000000..fcc0ba2
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/serialization/PeriodicNotificationTypeAdapter.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.serialization;
+
+import java.lang.reflect.Type;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification.Builder;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+/**
+ * {@link TypeAdapter} used to serialize and deserialize {@link PeriodicNotification}s.
+ * This TypeAdapter is used in {@link CommandNotificationTypeAdapter} which is used in
+ * {@link CommandNotificationSerializer} for producing and consuming messages to and from
+ * Kafka.
+ *
+ */
+public class PeriodicNotificationTypeAdapter
+        implements JsonSerializer<PeriodicNotification>, JsonDeserializer<PeriodicNotification> {
+
+    @Override
+    public PeriodicNotification deserialize(JsonElement arg0, Type arg1, JsonDeserializationContext arg2)
+            throws JsonParseException {
+
+        JsonObject json = arg0.getAsJsonObject();
+        String id = json.get("id").getAsString();
+        long period = json.get("period").getAsLong();
+        TimeUnit periodTimeUnit = TimeUnit.valueOf(json.get("timeUnit").getAsString());
+        long initialDelay = json.get("initialDelay").getAsLong();
+        Builder builder = PeriodicNotification.builder().id(id).period(period)
+                .initialDelay(initialDelay).timeUnit(periodTimeUnit);
+
+        return builder.build();
+    }
+
+    @Override
+    public JsonElement serialize(PeriodicNotification arg0, Type arg1, JsonSerializationContext arg2) {
+
+        JsonObject result = new JsonObject();
+        result.add("id", new JsonPrimitive(arg0.getId()));
+        result.add("period", new JsonPrimitive(arg0.getPeriod()));
+        result.add("initialDelay", new JsonPrimitive(arg0.getInitialDelay()));
+        result.add("timeUnit", new JsonPrimitive(arg0.getTimeUnit().name()));
+
+        return result;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/test/java/org/apache/rya/periodic/notification/serialization/CommandNotificationSerializerTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/test/java/org/apache/rya/periodic/notification/serialization/CommandNotificationSerializerTest.java b/extras/rya.periodic.service/periodic.service.notification/src/test/java/org/apache/rya/periodic/notification/serialization/CommandNotificationSerializerTest.java
new file mode 100644
index 0000000..4aad1c6
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/test/java/org/apache/rya/periodic/notification/serialization/CommandNotificationSerializerTest.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.serialization;
+
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.rya.periodic.notification.notification.BasicNotification;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+import org.apache.rya.periodic.notification.notification.CommandNotification.Command;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+import org.apache.rya.periodic.notification.serialization.CommandNotificationSerializer;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class CommandNotificationSerializerTest {
+
+    private CommandNotificationSerializer serializer = new CommandNotificationSerializer();
+    private static final String topic = "topic";
+
+    @Test
+    public void basicSerializationTest() {
+        PeriodicNotification notification = PeriodicNotification.builder().id(UUID.randomUUID().toString()).period(24)
+                .timeUnit(TimeUnit.DAYS).initialDelay(1).build();
+        CommandNotification command = new CommandNotification(Command.ADD, notification);
+        Assert.assertEquals(command, serializer.deserialize(topic,serializer.serialize(topic, command)));
+
+        PeriodicNotification notification1 = PeriodicNotification.builder().id(UUID.randomUUID().toString()).period(32)
+                .timeUnit(TimeUnit.SECONDS).initialDelay(15).build();
+        CommandNotification command1 = new CommandNotification(Command.ADD, notification1);
+        Assert.assertEquals(command1, serializer.deserialize(topic,serializer.serialize(topic,command1)));
+
+        PeriodicNotification notification2 = PeriodicNotification.builder().id(UUID.randomUUID().toString()).period(32)
+                .timeUnit(TimeUnit.SECONDS).initialDelay(15).build();
+        CommandNotification command2 = new CommandNotification(Command.ADD, notification2);
+        Assert.assertEquals(command2, serializer.deserialize(topic,serializer.serialize(topic,command2)));
+
+        BasicNotification notification3 = new BasicNotification(UUID.randomUUID().toString());
+        CommandNotification command3 = new CommandNotification(Command.ADD, notification3);
+        Assert.assertEquals(command3, serializer.deserialize(topic,serializer.serialize(topic,command3)));
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/pom.xml b/extras/rya.periodic.service/pom.xml
new file mode 100644
index 0000000..fce4996
--- /dev/null
+++ b/extras/rya.periodic.service/pom.xml
@@ -0,0 +1,39 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>rya.periodic.service</artifactId>
+  
+   <parent>
+        <groupId>org.apache.rya</groupId>
+        <artifactId>rya.extras</artifactId>
+        <version>3.2.11-incubating-SNAPSHOT</version>
+    </parent>
+  
+  <name>Apache Rya Periodic Service</name>
+  <description>Parent class for Rya Periodic Service</description>
+  
+  <packaging>pom</packaging>
+
+    <modules>
+        <module>periodic.service.notification</module>
+        <module>periodic.service.integration.tests</module>
+    </modules>
+  
+</project>
\ No newline at end of file


[5/9] incubator-rya git commit: RYA-280-Periodic Query Service. Closes #177.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/FilterFinderTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/FilterFinderTest.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/FilterFinderTest.java
deleted file mode 100644
index 8b38923..0000000
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/FilterFinderTest.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.rya.indexing.pcj.fluo.app;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Arrays;
-
-import org.junit.Test;
-import org.openrdf.model.impl.LiteralImpl;
-import org.openrdf.model.impl.URIImpl;
-import org.openrdf.model.vocabulary.XMLSchema;
-import org.openrdf.query.algebra.Compare;
-import org.openrdf.query.algebra.Compare.CompareOp;
-import org.openrdf.query.algebra.Filter;
-import org.openrdf.query.algebra.ValueConstant;
-import org.openrdf.query.algebra.ValueExpr;
-import org.openrdf.query.algebra.Var;
-
-import com.google.common.base.Optional;
-
-/**
- * Tests the methods of {@link FilterFinder}.
- */
-public class FilterFinderTest {
-
-    @Test
-    public void manyFilters() throws Exception {
-        // The query that will be searched.
-        final String sparql =
-                "SELECT ?person ?age " +
-                "{" +
-                  "FILTER(?age < 30) . " +
-                  "FILTER(?person = <http://Alice>)" +
-                  "?person <http://hasAge> ?age" +
-                "}";
-
-        // Create the expected result.
-        final ValueExpr[] expected = new ValueExpr[2];
-        expected[0] =  new Compare(new Var("person"), new ValueConstant( new URIImpl("http://Alice") ));
-        expected[1] = new Compare(new Var("age"), new ValueConstant( new LiteralImpl("30", XMLSchema.INTEGER) ), CompareOp.LT);
-
-        // Run the test.
-        final FilterFinder finder = new FilterFinder();
-        final ValueExpr[] conditions = new ValueExpr[2];
-        conditions[0] = finder.findFilter(sparql, 0).get().getCondition();
-        conditions[1] = finder.findFilter(sparql, 1).get().getCondition();
-        assertTrue( Arrays.equals(expected, conditions) );
-    }
-
-    @Test
-    public void noFilterAtIndex() throws Exception {
-        // The query that will be searched.
-        final String sparql =
-                "SELECT ?person ?age " +
-                "{" +
-                  "FILTER(?age < 30) . " +
-                  "FILTER(?person = <http://Alice>)" +
-                  "?person <http://hasAge> ?age" +
-                "}";
-
-        // Run the test.
-        final FilterFinder finder = new FilterFinder();
-        final Optional<Filter> filter = finder.findFilter(sparql, 4);
-        assertFalse( filter.isPresent() );
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDeTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDeTest.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDeTest.java
deleted file mode 100644
index 99791ee..0000000
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDeTest.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.rya.indexing.pcj.fluo.app;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.fluo.api.data.Bytes;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.junit.Test;
-import org.openrdf.model.ValueFactory;
-import org.openrdf.model.impl.ValueFactoryImpl;
-import org.openrdf.query.impl.MapBindingSet;
-
-/**
- * Tests the methods of {@link VisibilityBindingSetSerDe}.
- */
-public class VisibilityBindingSetSerDeTest {
-
-    @Test
-    public void rountTrip() throws Exception {
-        final ValueFactory vf = new ValueFactoryImpl();
-
-        final MapBindingSet bs = new MapBindingSet();
-        bs.addBinding("name", vf.createLiteral("Alice"));
-        bs.addBinding("age", vf.createLiteral(5));
-        final VisibilityBindingSet original = new VisibilityBindingSet(bs, "u");
-
-        final VisibilityBindingSetSerDe serde = new VisibilityBindingSetSerDe();
-        final Bytes bytes = serde.serialize(original);
-        final VisibilityBindingSet result = serde.deserialize(bytes);
-
-        assertEquals(original, result);
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationSerializerTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationSerializerTest.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationSerializerTest.java
new file mode 100644
index 0000000..fe89325
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationSerializerTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.batch.serializer;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Arrays;
+import java.util.Optional;
+
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Span;
+import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater.Side;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation.Task;
+import org.apache.rya.indexing.pcj.fluo.app.batch.JoinBatchInformation;
+import org.apache.rya.indexing.pcj.fluo.app.batch.SpanBatchDeleteInformation;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.junit.Test;
+import org.openrdf.model.impl.URIImpl;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+
+public class BatchInformationSerializerTest {
+
+    @Test
+    public void testSpanBatchInformationSerialization() {
+
+        SpanBatchDeleteInformation batch = SpanBatchDeleteInformation.builder().setBatchSize(1000)
+                .setColumn(FluoQueryColumns.PERIODIC_QUERY_BINDING_SET).setSpan(Span.prefix(Bytes.of("prefix"))).build();
+        System.out.println(batch);
+        byte[] batchBytes = BatchInformationSerializer.toBytes(batch);
+        Optional<BatchInformation> decodedBatch = BatchInformationSerializer.fromBytes(batchBytes);
+        System.out.println(decodedBatch);
+        assertEquals(batch, decodedBatch.get());
+    }
+
+    @Test
+    public void testJoinBatchInformationSerialization() {
+
+        QueryBindingSet bs = new QueryBindingSet();
+        bs.addBinding("a", new URIImpl("urn:123"));
+        bs.addBinding("b", new URIImpl("urn:456"));
+        VisibilityBindingSet vBis = new VisibilityBindingSet(bs, "FOUO");
+        
+        JoinBatchInformation batch = JoinBatchInformation.builder().setBatchSize(1000).setTask(Task.Update)
+                .setColumn(FluoQueryColumns.PERIODIC_QUERY_BINDING_SET).setSpan(Span.prefix(Bytes.of("prefix346")))
+                .setJoinType(JoinType.LEFT_OUTER_JOIN).setSide(Side.RIGHT).setVarOrder(new VariableOrder(Arrays.asList("a", "b")))
+                .setBs(vBis).build();
+        
+        byte[] batchBytes = BatchInformationSerializer.toBytes(batch);
+        Optional<BatchInformation> decodedBatch = BatchInformationSerializer.fromBytes(batchBytes);
+        assertEquals(batch, decodedBatch.get());
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryUtilTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryUtilTest.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryUtilTest.java
new file mode 100644
index 0000000..c8ca6af
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryUtilTest.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.query;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.apache.rya.indexing.pcj.fluo.app.query.SparqlFluoQueryBuilder.NodeIds;
+import org.apache.rya.indexing.pcj.fluo.app.util.PeriodicQueryUtil;
+import org.apache.rya.indexing.pcj.fluo.app.util.PeriodicQueryUtil.PeriodicQueryNodeRelocator;
+import org.apache.rya.indexing.pcj.fluo.app.util.PeriodicQueryUtil.PeriodicQueryNodeVisitor;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.junit.Assert;
+import org.junit.Test;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.query.MalformedQueryException;
+import org.openrdf.query.algebra.Filter;
+import org.openrdf.query.algebra.FunctionCall;
+import org.openrdf.query.algebra.Join;
+import org.openrdf.query.algebra.Projection;
+import org.openrdf.query.algebra.QueryModelNode;
+import org.openrdf.query.algebra.TupleExpr;
+import org.openrdf.query.algebra.ValueConstant;
+import org.openrdf.query.algebra.ValueExpr;
+import org.openrdf.query.algebra.Var;
+import org.openrdf.query.algebra.helpers.QueryModelVisitorBase;
+import org.openrdf.query.parser.ParsedQuery;
+import org.openrdf.query.parser.sparql.SPARQLParser;
+
+public class PeriodicQueryUtilTest {
+
+    private static final ValueFactory vf = new ValueFactoryImpl();
+
+   
+    
+    @Test
+    public void periodicNodeNotPresentTest() throws Exception {
+        
+        List<ValueExpr> values = Arrays.asList(new Var("time"), new ValueConstant(vf.createLiteral(12.0)), new ValueConstant(vf.createLiteral(6.0)), new ValueConstant(vf.createURI(PeriodicQueryUtil.temporalNameSpace + "hours")));
+        FunctionCall func = new FunctionCall("uri:func", values);
+        Optional<PeriodicQueryNode> node1 = PeriodicQueryUtil.getPeriodicQueryNode(func, new Join());
+        Assert.assertEquals(false, node1.isPresent());
+    }
+
+    
+    
+    @Test
+    public void periodicNodePresentTest() throws Exception {
+        
+        List<ValueExpr> values = Arrays.asList(new Var("time"), new ValueConstant(vf.createLiteral(12.0)), new ValueConstant(vf.createLiteral(6.0)), new ValueConstant(vf.createURI(PeriodicQueryUtil.temporalNameSpace + "hours")));
+        FunctionCall func = new FunctionCall(PeriodicQueryUtil.PeriodicQueryURI, values);
+        Optional<PeriodicQueryNode> node1 = PeriodicQueryUtil.getPeriodicQueryNode(func, new Join());
+        Assert.assertEquals(true, node1.isPresent());
+        
+        PeriodicQueryNode node2 = new PeriodicQueryNode(12*60*60*1000L, 6*3600*1000L, TimeUnit.MILLISECONDS, "time", new Join());
+        
+        Assert.assertEquals(true, periodicNodesEqualIgnoreArg(node1.get(), node2));
+    }
+    
+    
+    @Test
+    public void periodicNodeFractionalDurationTest() throws Exception {
+        
+        List<ValueExpr> values = Arrays.asList(new Var("time"), new ValueConstant(vf.createLiteral(1)), new ValueConstant(vf.createLiteral(.5)), new ValueConstant(vf.createURI(PeriodicQueryUtil.temporalNameSpace + "hours")));
+        FunctionCall func = new FunctionCall(PeriodicQueryUtil.PeriodicQueryURI, values);
+        Optional<PeriodicQueryNode> node1 = PeriodicQueryUtil.getPeriodicQueryNode(func, new Join());
+        Assert.assertEquals(true, node1.isPresent());
+        
+        double window = 1*60*60*1000;
+        double period = .5*3600*1000;
+        
+        PeriodicQueryNode node2 = new PeriodicQueryNode((long) window, (long) period, TimeUnit.MILLISECONDS, "time", new Join());
+        
+        Assert.assertEquals(true, periodicNodesEqualIgnoreArg(node1.get(), node2));
+    }
+    
+    @Test
+    public void testPeriodicNodePlacement() throws MalformedQueryException {
+         String query = "prefix function: <http://org.apache.rya/function#> " //n
+                + "prefix time: <http://www.w3.org/2006/time#> " //n
+                + "prefix fn: <http://www.w3.org/2006/fn#> " //n
+                + "select ?obs ?time ?lat where {" //n
+                + "Filter(function:periodic(?time, 12.0, 6.0,time:hours)) " //n
+                + "Filter(fn:test(?lat, 25)) " //n
+                + "?obs <uri:hasTime> ?time. " //n
+                + "?obs <uri:hasLattitude> ?lat }"; //n
+         
+         SPARQLParser parser = new SPARQLParser();
+         ParsedQuery pq = parser.parseQuery(query, null);
+         TupleExpr te = pq.getTupleExpr();
+         te.visit(new PeriodicQueryNodeVisitor());
+         
+         PeriodicNodeCollector collector = new PeriodicNodeCollector();
+         te.visit(collector);
+         
+         PeriodicQueryNode node2 = new PeriodicQueryNode(12*60*60*1000L, 6*3600*1000L, TimeUnit.MILLISECONDS, "time", new Join());
+
+         Assert.assertEquals(true, periodicNodesEqualIgnoreArg(node2, collector.getPeriodicQueryNode()));
+         
+    }
+    
+    @Test
+    public void testPeriodicNodeLocation() throws MalformedQueryException {
+         String query = "prefix function: <http://org.apache.rya/function#> " //n
+                + "prefix time: <http://www.w3.org/2006/time#> " //n
+                + "prefix fn: <http://www.w3.org/2006/fn#> " //n
+                + "select ?obs ?time ?lat where {" //n
+                + "Filter(function:periodic(?time, 1,.5,time:hours)) " //n
+                + "Filter(fn:test(?lat, 25)) " //n
+                + "?obs <uri:hasTime> ?time. " //n
+                + "?obs <uri:hasLattitude> ?lat }"; //n
+         
+         SPARQLParser parser = new SPARQLParser();
+         ParsedQuery pq = parser.parseQuery(query, null);
+         TupleExpr te = pq.getTupleExpr();
+         te.visit(new PeriodicQueryNodeVisitor());
+        
+         PeriodicNodeCollector collector = new PeriodicNodeCollector();
+         te.visit(collector);
+         Assert.assertEquals(2, collector.getPos());
+         
+         te.visit(new PeriodicQueryNodeRelocator());
+         collector.resetCount();
+         te.visit(collector);
+         Assert.assertEquals(1, collector.getPos());
+         
+         double window = 1*60*60*1000;
+         double period = .5*3600*1000;
+         PeriodicQueryNode node2 = new PeriodicQueryNode((long) window, (long) period, TimeUnit.MILLISECONDS, "time", new Join());
+         Assert.assertEquals(true, periodicNodesEqualIgnoreArg(node2, collector.getPeriodicQueryNode()));
+         
+    }
+    
+    @Test
+    public void testFluoQueryVarOrders() throws MalformedQueryException {
+        String query = "prefix function: <http://org.apache.rya/function#> " //n
+                + "prefix time: <http://www.w3.org/2006/time#> " //n
+                + "select (count(?obs) as ?total) where {" //n
+                + "Filter(function:periodic(?time, 12.4, 6.2,time:hours)) " //n
+                + "?obs <uri:hasTime> ?time. " //n
+                + "?obs <uri:hasLattitude> ?lat }"; //n
+         
+         SPARQLParser parser = new SPARQLParser();
+         ParsedQuery pq = parser.parseQuery(query, null);
+         SparqlFluoQueryBuilder builder = new SparqlFluoQueryBuilder();
+         FluoQuery fluoQuery = builder.make(pq, new NodeIds());
+         
+         PeriodicQueryMetadata periodicMeta = fluoQuery.getPeriodicQueryMetadata().orNull();
+         Assert.assertEquals(true, periodicMeta != null);
+         VariableOrder periodicVars = periodicMeta.getVariableOrder();
+         Assert.assertEquals(IncrementalUpdateConstants.PERIODIC_BIN_ID, periodicVars.getVariableOrders().get(0));
+         
+         QueryMetadata queryMeta = fluoQuery.getQueryMetadata().get();
+         VariableOrder queryVars = queryMeta.getVariableOrder();
+         Assert.assertEquals(IncrementalUpdateConstants.PERIODIC_BIN_ID, queryVars.getVariableOrders().get(0));
+         
+         Collection<AggregationMetadata> aggMetaCollection = fluoQuery.getAggregationMetadata();
+         Assert.assertEquals(1, aggMetaCollection.size());
+         AggregationMetadata aggMeta = aggMetaCollection.iterator().next();
+         VariableOrder aggVars = aggMeta.getVariableOrder();
+         Assert.assertEquals(IncrementalUpdateConstants.PERIODIC_BIN_ID, aggVars.getVariableOrders().get(0));
+         
+         System.out.println(fluoQuery);
+    }
+
+    private boolean periodicNodesEqualIgnoreArg(PeriodicQueryNode node1, PeriodicQueryNode node2) {
+        return new EqualsBuilder().append(node1.getPeriod(), node2.getPeriod()).append(node1.getWindowSize(), node2.getWindowSize())
+                .append(node1.getTemporalVariable(), node2.getTemporalVariable()).append(node1.getUnit(), node2.getUnit()).build();
+    }
+    
+    private static class PeriodicNodeCollector extends QueryModelVisitorBase<RuntimeException>{
+        
+        private PeriodicQueryNode periodicNode;
+        int count = 0;
+        
+        public PeriodicQueryNode getPeriodicQueryNode() {
+            return periodicNode;
+        }
+        
+        public int getPos() {
+            return count;
+        }
+        
+        public void resetCount() {
+            count = 0;
+        }
+        
+        public void meet(Filter node) {
+            count++;
+            node.getArg().visit(this);
+        }
+        
+        public void meet(Projection node) {
+            count++;
+            node.getArg().visit(this);
+        }
+        
+        @Override
+        public void meetOther(QueryModelNode node) {
+            if(node instanceof PeriodicQueryNode) {
+                periodicNode = (PeriodicQueryNode) node;
+            }
+        }
+        
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/util/QueryReportRenderer.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/util/QueryReportRenderer.java b/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/util/QueryReportRenderer.java
index 99ccc58..7a73b41 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/util/QueryReportRenderer.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.client/src/main/java/org/apache/rya/indexing/pcj/fluo/client/util/QueryReportRenderer.java
@@ -83,8 +83,7 @@ public class QueryReportRenderer {
             builder.appendItem( new ReportItem("FILTER NODE") );
             builder.appendItem( new ReportItem("Node ID", filterMetadata.getNodeId()) );
             builder.appendItem( new ReportItem("Variable Order", filterMetadata.getVariableOrder().toString()) );
-            builder.appendItem( new ReportItem("Original SPARQL", prettyFormatSparql(  filterMetadata.getOriginalSparql()) ) );
-            builder.appendItem( new ReportItem("Filter Index", "" + filterMetadata.getFilterIndexWithinSparql()) );
+            builder.appendItem( new ReportItem("Filter SPARQL", prettyFormatSparql(  filterMetadata.getFilterSparql())));
             builder.appendItem( new ReportItem("Parent Node ID", filterMetadata.getParentNodeId()) );
             builder.appendItem( new ReportItem("Child Node ID", filterMetadata.getChildNodeId()) );
             builder.appendItem( new ReportItem("Count", "" + queryReport.getCount(filterMetadata.getNodeId())) );

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml b/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml
index 6467191..9591e55 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/pom.xml
@@ -43,6 +43,11 @@
             <artifactId>rya.indexing</artifactId>
         </dependency>
         <dependency>
+            <groupId>org.apache.rya</groupId>
+            <artifactId>rya.pcj.fluo.test.base</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+         <dependency>
             <groupId>org.apache.fluo</groupId>
             <artifactId>fluo-api</artifactId>
         </dependency>

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/FluoITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/FluoITBase.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/FluoITBase.java
deleted file mode 100644
index b5d9428..0000000
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/FluoITBase.java
+++ /dev/null
@@ -1,282 +0,0 @@
-package org.apache.rya.indexing.pcj.fluo;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.net.UnknownHostException;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.minicluster.MiniAccumuloCluster;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.rya.accumulo.MiniAccumuloClusterInstance;
-import org.apache.rya.accumulo.MiniAccumuloSingleton;
-import org.apache.rya.accumulo.RyaTestInstanceRule;
-import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
-import org.apache.rya.api.client.accumulo.AccumuloInstall;
-import org.apache.zookeeper.ClientCnxn;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.openrdf.repository.RepositoryConnection;
-import org.openrdf.repository.RepositoryException;
-import org.openrdf.sail.Sail;
-import org.openrdf.sail.SailException;
-
-import org.apache.fluo.api.client.FluoAdmin;
-import org.apache.fluo.api.client.FluoAdmin.AlreadyInitializedException;
-import org.apache.fluo.api.client.FluoClient;
-import org.apache.fluo.api.client.FluoFactory;
-import org.apache.fluo.api.config.FluoConfiguration;
-import org.apache.fluo.api.mini.MiniFluo;
-import org.apache.rya.accumulo.AccumuloRdfConfiguration;
-import org.apache.rya.api.client.RyaClientException;
-import org.apache.rya.api.client.Install;
-import org.apache.rya.api.client.Install.DuplicateInstanceNameException;
-import org.apache.rya.api.client.Install.InstallConfiguration;
-import org.apache.rya.api.instance.RyaDetailsRepository.RyaDetailsRepositoryException;
-import org.apache.rya.api.persist.RyaDAOException;
-import org.apache.rya.indexing.accumulo.ConfigUtils;
-import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig;
-import org.apache.rya.rdftriplestore.RyaSailRepository;
-import org.apache.rya.rdftriplestore.inference.InferenceEngineException;
-import org.apache.rya.sail.config.RyaSailFactory;
-
-/**
- * Integration tests that ensure the Fluo application processes PCJs results
- * correctly.
- * <p>
- * This class is being ignored because it doesn't contain any unit tests.
- */
-public abstract class FluoITBase {
-    private static final Logger log = Logger.getLogger(FluoITBase.class);
-
-    // Mini Accumulo Cluster
-    private static MiniAccumuloClusterInstance clusterInstance = MiniAccumuloSingleton.getInstance();
-    private static MiniAccumuloCluster cluster;
-
-    private static String instanceName = null;
-    private static String zookeepers = null;
-
-    protected static Connector accumuloConn = null;
-
-    // Fluo data store and connections.
-    protected MiniFluo fluo = null;
-    protected FluoConfiguration fluoConfig = null;
-    protected FluoClient fluoClient = null;
-
-    // Rya data store and connections.
-    protected RyaSailRepository ryaRepo = null;
-    protected RepositoryConnection ryaConn = null;
-
-    @Rule
-    public RyaTestInstanceRule testInstance = new RyaTestInstanceRule(false);
-
-    @BeforeClass
-    public static void beforeClass() throws Exception {
-        Logger.getLogger(ClientCnxn.class).setLevel(Level.ERROR);
-
-        // Setup and start the Mini Accumulo.
-        cluster = clusterInstance.getCluster();
-
-        // Store a connector to the Mini Accumulo.
-        instanceName = cluster.getInstanceName();
-        zookeepers = cluster.getZooKeepers();
-
-        final Instance instance = new ZooKeeperInstance(instanceName, zookeepers);
-        accumuloConn = instance.getConnector(clusterInstance.getUsername(), new PasswordToken(clusterInstance.getPassword()));
-    }
-
-    @Before
-    public void setupMiniResources() throws Exception {
-        // Initialize the Mini Fluo that will be used to store created queries.
-        fluoConfig = createFluoConfig();
-        preFluoInitHook();
-        FluoFactory.newAdmin(fluoConfig).initialize(new FluoAdmin.InitializationOptions()
-                .setClearTable(true)
-                .setClearZookeeper(true));
-        postFluoInitHook();
-        fluo = FluoFactory.newMiniFluo(fluoConfig);
-        fluoClient = FluoFactory.newClient(fluo.getClientConfiguration());
-
-        // Initialize the Rya that will be used by the tests.
-        ryaRepo = setupRya();
-        ryaConn = ryaRepo.getConnection();
-    }
-
-    @After
-    public void shutdownMiniResources() {
-        if (ryaConn != null) {
-            try {
-                log.info("Shutting down Rya Connection.");
-                ryaConn.close();
-                log.info("Rya Connection shut down.");
-            } catch (final Exception e) {
-                log.error("Could not shut down the Rya Connection.", e);
-            }
-        }
-
-        if (ryaRepo != null) {
-            try {
-                log.info("Shutting down Rya Repo.");
-                ryaRepo.shutDown();
-                log.info("Rya Repo shut down.");
-            } catch (final Exception e) {
-                log.error("Could not shut down the Rya Repo.", e);
-            }
-        }
-
-        if (fluoClient != null) {
-            try {
-                log.info("Shutting down Fluo Client.");
-                fluoClient.close();
-                log.info("Fluo Client shut down.");
-            } catch (final Exception e) {
-                log.error("Could not shut down the Fluo Client.", e);
-            }
-        }
-
-        if (fluo != null) {
-            try {
-                log.info("Shutting down Mini Fluo.");
-                fluo.close();
-                log.info("Mini Fluo shut down.");
-            } catch (final Exception e) {
-                log.error("Could not shut down the Mini Fluo.", e);
-            }
-        }
-    }
-
-    protected void preFluoInitHook() throws Exception {
-
-    }
-
-    protected void postFluoInitHook() throws Exception {
-
-    }
-
-    protected MiniAccumuloCluster getMiniAccumuloCluster() {
-        return cluster;
-    }
-
-    protected MiniFluo getMiniFluo() {
-        return fluo;
-    }
-
-    public RyaSailRepository getRyaSailRepository() {
-        return ryaRepo;
-    }
-
-    public Connector getAccumuloConnector() {
-        return accumuloConn;
-    }
-
-    public String getRyaInstanceName() {
-        return testInstance.getRyaInstanceName();
-    }
-
-    protected String getUsername() {
-        return clusterInstance.getUsername();
-    }
-
-    protected String getPassword() {
-        return clusterInstance.getPassword();
-    }
-
-    protected FluoConfiguration getFluoConfiguration() {
-        return fluoConfig;
-    }
-
-    public AccumuloConnectionDetails createConnectionDetails() {
-        return new AccumuloConnectionDetails(
-                clusterInstance.getUsername(),
-                clusterInstance.getPassword().toCharArray(),
-                clusterInstance.getInstanceName(),
-                clusterInstance.getZookeepers());
-    }
-
-    private FluoConfiguration createFluoConfig() {
-        // Configure how the mini fluo will run.
-        final FluoConfiguration config = new FluoConfiguration();
-        config.setMiniStartAccumulo(false);
-        config.setAccumuloInstance(instanceName);
-        config.setAccumuloUser(clusterInstance.getUsername());
-        config.setAccumuloPassword(clusterInstance.getPassword());
-        config.setInstanceZookeepers(zookeepers + "/fluo");
-        config.setAccumuloZookeepers(zookeepers);
-
-        config.setApplicationName(getRyaInstanceName());
-        config.setAccumuloTable("fluo" + getRyaInstanceName());
-        return config;
-    }
-
-    /**
-     * Sets up a Rya instance.
-     */
-    protected RyaSailRepository setupRya()
-            throws AccumuloException, AccumuloSecurityException, RepositoryException, RyaDAOException,
-            NumberFormatException, UnknownHostException, InferenceEngineException, AlreadyInitializedException,
-            RyaDetailsRepositoryException, DuplicateInstanceNameException, RyaClientException, SailException {
-        checkNotNull(instanceName);
-        checkNotNull(zookeepers);
-
-        // Setup Rya configuration values.
-        final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
-        conf.setTablePrefix(getRyaInstanceName());
-        conf.setDisplayQueryPlan(true);
-        conf.setBoolean(ConfigUtils.USE_MOCK_INSTANCE, false);
-        conf.set(ConfigUtils.CLOUDBASE_USER, clusterInstance.getUsername());
-        conf.set(ConfigUtils.CLOUDBASE_PASSWORD, clusterInstance.getPassword());
-        conf.set(ConfigUtils.CLOUDBASE_INSTANCE, clusterInstance.getInstanceName());
-        conf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, clusterInstance.getZookeepers());
-        conf.set(ConfigUtils.USE_PCJ, "true");
-        conf.set(ConfigUtils.FLUO_APP_NAME, getRyaInstanceName());
-        conf.set(ConfigUtils.PCJ_STORAGE_TYPE, PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType.ACCUMULO.toString());
-        conf.set(ConfigUtils.PCJ_UPDATER_TYPE, PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType.FLUO.toString());
-        conf.set(ConfigUtils.CLOUDBASE_AUTHS, "");
-
-        // Install the test instance of Rya.
-        final Install install = new AccumuloInstall(createConnectionDetails(), accumuloConn);
-
-        final InstallConfiguration installConfig = InstallConfiguration.builder()
-                .setEnableTableHashPrefix(true)
-                .setEnableEntityCentricIndex(true)
-                .setEnableFreeTextIndex(true)
-                .setEnableTemporalIndex(true)
-                .setEnablePcjIndex(true)
-                .setEnableGeoIndex(true)
-                .setFluoPcjAppName(getRyaInstanceName())
-                .build();
-        install.install(getRyaInstanceName(), installConfig);
-
-        // Connect to the instance of Rya that was just installed.
-        final Sail sail = RyaSailFactory.getInstance(conf);
-        final RyaSailRepository ryaRepo = new RyaSailRepository(sail);
-
-        return ryaRepo;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/KafkaExportITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/KafkaExportITBase.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/KafkaExportITBase.java
deleted file mode 100644
index 452dd27..0000000
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/KafkaExportITBase.java
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.rya.indexing.pcj.fluo;
-
-import static java.util.Objects.requireNonNull;
-import static org.junit.Assert.assertEquals;
-
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Properties;
-
-import org.I0Itec.zkclient.ZkClient;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.minicluster.MiniAccumuloCluster;
-import org.apache.fluo.api.config.ObserverSpecification;
-import org.apache.fluo.recipes.test.AccumuloExportITBase;
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.apache.rya.accumulo.AccumuloRdfConfiguration;
-import org.apache.rya.accumulo.AccumuloRyaDAO;
-import org.apache.rya.api.client.Install.InstallConfiguration;
-import org.apache.rya.api.client.RyaClient;
-import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
-import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
-import org.apache.rya.indexing.accumulo.ConfigUtils;
-import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig;
-import org.apache.rya.indexing.pcj.fluo.app.export.kafka.KafkaExportParameters;
-import org.apache.rya.indexing.pcj.fluo.app.export.kafka.RyaSubGraphKafkaSerDe;
-import org.apache.rya.indexing.pcj.fluo.app.observers.AggregationObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.ConstructQueryResultObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.FilterObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.JoinObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.QueryResultObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.StatementPatternObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.TripleObserver;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.apache.rya.rdftriplestore.RyaSailRepository;
-import org.apache.rya.sail.config.RyaSailFactory;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.openrdf.model.Statement;
-import org.openrdf.repository.sail.SailRepositoryConnection;
-import org.openrdf.sail.Sail;
-
-
-import kafka.admin.AdminUtils;
-import kafka.admin.RackAwareMode;
-import kafka.server.KafkaConfig;
-import kafka.server.KafkaServer;
-import kafka.utils.MockTime;
-import kafka.utils.TestUtils;
-import kafka.utils.Time;
-import kafka.utils.ZKStringSerializer$;
-import kafka.utils.ZkUtils;
-import kafka.zk.EmbeddedZookeeper;
-
-/**
- * The base Integration Test class used for Fluo applications that export to a
- * Kakfa topic.
- */
-public class KafkaExportITBase extends AccumuloExportITBase {
-
-    protected static final String RYA_INSTANCE_NAME = "test_";
-
-    private static final String ZKHOST = "127.0.0.1";
-    private static final String BROKERHOST = "127.0.0.1";
-    private static final String BROKERPORT = "9092";
-    private ZkUtils zkUtils;
-    private KafkaServer kafkaServer;
-    private EmbeddedZookeeper zkServer;
-    private ZkClient zkClient;
-
-    // The Rya instance statements are written to that will be fed into the Fluo
-    // app.
-    private RyaSailRepository ryaSailRepo = null;
-    private AccumuloRyaDAO dao = null;
-
-    /**
-     * Add info about the Kafka queue/topic to receive the export.
-     */
-    @Override
-    protected void preFluoInitHook() throws Exception {
-        // Setup the observers that will be used by the Fluo PCJ Application.
-        final List<ObserverSpecification> observers = new ArrayList<>();
-        observers.add(new ObserverSpecification(TripleObserver.class.getName()));
-        observers.add(new ObserverSpecification(StatementPatternObserver.class.getName()));
-        observers.add(new ObserverSpecification(JoinObserver.class.getName()));
-        observers.add(new ObserverSpecification(FilterObserver.class.getName()));
-        observers.add(new ObserverSpecification(AggregationObserver.class.getName()));
-
-        // Configure the export observer to export new PCJ results to the mini
-        // accumulo cluster.
-        final HashMap<String, String> exportParams = new HashMap<>();
-
-        final KafkaExportParameters kafkaParams = new KafkaExportParameters(exportParams);
-        kafkaParams.setExportToKafka(true);
-
-        // Configure the Kafka Producer
-        final Properties producerConfig = new Properties();
-        producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
-        producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
-        producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
-                "org.apache.rya.indexing.pcj.fluo.app.export.kafka.KryoVisibilityBindingSetSerializer");
-        kafkaParams.addAllProducerConfig(producerConfig);
-
-        final ObserverSpecification exportObserverConfig = new ObserverSpecification(QueryResultObserver.class.getName(), exportParams);
-        observers.add(exportObserverConfig);
-        
-        //create construct query observer and tell it not to export to Kafka
-        //it will only add results back into Fluo
-        HashMap<String, String> constructParams = new HashMap<>();
-        final KafkaExportParameters kafkaConstructParams = new KafkaExportParameters(constructParams);
-        kafkaConstructParams.setExportToKafka(true);
-        
-        // Configure the Kafka Producer
-        final Properties constructProducerConfig = new Properties();
-        constructProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
-        constructProducerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
-        constructProducerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, RyaSubGraphKafkaSerDe.class.getName());
-        kafkaConstructParams.addAllProducerConfig(constructProducerConfig);
-
-        final ObserverSpecification constructExportObserverConfig = new ObserverSpecification(ConstructQueryResultObserver.class.getName(),
-                constructParams);
-        observers.add(constructExportObserverConfig);
-
-        // Add the observers to the Fluo Configuration.
-        super.getFluoConfiguration().addObservers(observers);
-    }
-
-    /**
-     * setup mini kafka and call the super to setup mini fluo
-     */
-    @Before
-    public void setupKafka() throws Exception {
-        // Install an instance of Rya on the Accumulo cluster.
-        installRyaInstance();
-
-        // Setup Kafka.
-        zkServer = new EmbeddedZookeeper();
-        final String zkConnect = ZKHOST + ":" + zkServer.port();
-        zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
-        zkUtils = ZkUtils.apply(zkClient, false);
-
-        // setup Broker
-        final Properties brokerProps = new Properties();
-        brokerProps.setProperty("zookeeper.connect", zkConnect);
-        brokerProps.setProperty("broker.id", "0");
-        brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
-        brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
-        final KafkaConfig config = new KafkaConfig(brokerProps);
-        final Time mock = new MockTime();
-        kafkaServer = TestUtils.createServer(config, mock);
-    }
-
-    @After
-    public void teardownRya() {
-        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
-        final String instanceName = cluster.getInstanceName();
-        final String zookeepers = cluster.getZooKeepers();
-
-        // Uninstall the instance of Rya.
-        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
-                new AccumuloConnectionDetails(ACCUMULO_USER, ACCUMULO_PASSWORD.toCharArray(), instanceName, zookeepers),
-                super.getAccumuloConnector());
-
-        try {
-            ryaClient.getUninstall().uninstall(RYA_INSTANCE_NAME);
-            // Shutdown the repo.
-            if(ryaSailRepo != null) {ryaSailRepo.shutDown();}
-            if(dao != null ) {dao.destroy();}
-        } catch (Exception e) {
-            System.out.println("Encountered the following Exception when shutting down Rya: " + e.getMessage());
-        }
-    }
-
-    private void installRyaInstance() throws Exception {
-        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
-        final String instanceName = cluster.getInstanceName();
-        final String zookeepers = cluster.getZooKeepers();
-
-        // Install the Rya instance to the mini accumulo cluster.
-        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
-                new AccumuloConnectionDetails(ACCUMULO_USER, ACCUMULO_PASSWORD.toCharArray(), instanceName, zookeepers),
-                super.getAccumuloConnector());
-
-        ryaClient.getInstall().install(RYA_INSTANCE_NAME,
-                InstallConfiguration.builder().setEnableTableHashPrefix(false).setEnableFreeTextIndex(false)
-                        .setEnableEntityCentricIndex(false).setEnableGeoIndex(false).setEnableTemporalIndex(false).setEnablePcjIndex(true)
-                        .setFluoPcjAppName(super.getFluoConfiguration().getApplicationName()).build());
-
-        // Connect to the Rya instance that was just installed.
-        final AccumuloRdfConfiguration conf = makeConfig(instanceName, zookeepers);
-        final Sail sail = RyaSailFactory.getInstance(conf);
-        dao = RyaSailFactory.getAccumuloDAOWithUpdatedConfig(conf);
-        ryaSailRepo = new RyaSailRepository(sail);
-    }
-
-    protected AccumuloRdfConfiguration makeConfig(final String instanceName, final String zookeepers) {
-        final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
-        conf.setTablePrefix(RYA_INSTANCE_NAME);
-
-        // Accumulo connection information.
-        conf.setAccumuloUser(AccumuloExportITBase.ACCUMULO_USER);
-        conf.setAccumuloPassword(AccumuloExportITBase.ACCUMULO_PASSWORD);
-        conf.setAccumuloInstance(super.getAccumuloConnector().getInstance().getInstanceName());
-        conf.setAccumuloZookeepers(super.getAccumuloConnector().getInstance().getZooKeepers());
-        conf.setAuths("");
-
-        // PCJ configuration information.
-        conf.set(ConfigUtils.USE_PCJ, "true");
-        conf.set(ConfigUtils.USE_PCJ_UPDATER_INDEX, "true");
-        conf.set(ConfigUtils.FLUO_APP_NAME, super.getFluoConfiguration().getApplicationName());
-        conf.set(ConfigUtils.PCJ_STORAGE_TYPE, PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType.ACCUMULO.toString());
-        conf.set(ConfigUtils.PCJ_UPDATER_TYPE, PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType.FLUO.toString());
-
-        conf.setDisplayQueryPlan(true);
-
-        return conf;
-    }
-
-    /**
-     * @return A {@link RyaSailRepository} that is connected to the Rya instance
-     *         that statements are loaded into.
-     */
-    protected RyaSailRepository getRyaSailRepository() throws Exception {
-        return ryaSailRepo;
-    }
-
-    /**
-     * @return A {@link AccumuloRyaDAO} so that RyaStatements with distinct
-     *         visibilities can be added to the Rya Instance
-     */
-    protected AccumuloRyaDAO getRyaDAO() {
-        return dao;
-    }
-
-    /**
-     * Close all the Kafka mini server and mini-zookeeper
-     */
-    @After
-    public void teardownKafka() {
-        if(kafkaServer != null) {kafkaServer.shutdown();}
-        if(zkClient != null) {zkClient.close();}
-        if(zkServer != null) {zkServer.shutdown();}
-    }
-
-    /**
-     * Test kafka without rya code to make sure kafka works in this environment.
-     * If this test fails then its a testing environment issue, not with Rya.
-     * Source: https://github.com/asmaier/mini-kafka
-     */
-    @Test
-    public void embeddedKafkaTest() throws Exception {
-        // create topic
-        final String topic = "testTopic";
-        AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
-
-        // setup producer
-        final Properties producerProps = new Properties();
-        producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
-        producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
-        producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
-        final KafkaProducer<Integer, byte[]> producer = new KafkaProducer<>(producerProps);
-
-        // setup consumer
-        final Properties consumerProps = new Properties();
-        consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
-        consumerProps.setProperty("group.id", "group0");
-        consumerProps.setProperty("client.id", "consumer0");
-        consumerProps.setProperty("key.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer");
-        consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
-
-        // to make sure the consumer starts from the beginning of the topic
-        consumerProps.put("auto.offset.reset", "earliest");
-
-        final KafkaConsumer<Integer, byte[]> consumer = new KafkaConsumer<>(consumerProps);
-        consumer.subscribe(Arrays.asList(topic));
-
-        // send message
-        final ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(topic, 42, "test-message".getBytes(StandardCharsets.UTF_8));
-        producer.send(data);
-        producer.close();
-
-        // starting consumer
-        final ConsumerRecords<Integer, byte[]> records = consumer.poll(3000);
-        assertEquals(1, records.count());
-        final Iterator<ConsumerRecord<Integer, byte[]>> recordIterator = records.iterator();
-        final ConsumerRecord<Integer, byte[]> record = recordIterator.next();
-        assertEquals(42, (int) record.key());
-        assertEquals("test-message", new String(record.value(), StandardCharsets.UTF_8));
-        consumer.close();
-    }
-
-    protected KafkaConsumer<Integer, VisibilityBindingSet> makeConsumer(final String TopicName) {
-        // setup consumer
-        final Properties consumerProps = new Properties();
-        consumerProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
-        consumerProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group0");
-        consumerProps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "consumer0");
-        consumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
-                "org.apache.kafka.common.serialization.IntegerDeserializer");
-        consumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
-                "org.apache.rya.indexing.pcj.fluo.app.export.kafka.KryoVisibilityBindingSetSerializer");
-
-        // to make sure the consumer starts from the beginning of the topic
-        consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
-
-        final KafkaConsumer<Integer, VisibilityBindingSet> consumer = new KafkaConsumer<>(consumerProps);
-        consumer.subscribe(Arrays.asList(TopicName));
-        return consumer;
-    }
-
-    protected String loadData(final String sparql, final Collection<Statement> statements) throws Exception {
-        requireNonNull(sparql);
-        requireNonNull(statements);
-
-        // Register the PCJ with Rya.
-        final Instance accInstance = super.getAccumuloConnector().getInstance();
-        final Connector accumuloConn = super.getAccumuloConnector();
-
-        final RyaClient ryaClient = AccumuloRyaClientFactory.build(new AccumuloConnectionDetails(ACCUMULO_USER,
-                ACCUMULO_PASSWORD.toCharArray(), accInstance.getInstanceName(), accInstance.getZooKeepers()), accumuloConn);
-
-        final String pcjId = ryaClient.getCreatePCJ().createPCJ(RYA_INSTANCE_NAME, sparql);
-
-        // Write the data to Rya.
-        final SailRepositoryConnection ryaConn = getRyaSailRepository().getConnection();
-        ryaConn.begin();
-        ryaConn.add(statements);
-        ryaConn.commit();
-        ryaConn.close();
-
-        // Wait for the Fluo application to finish computing the end result.
-        super.getMiniFluo().waitForObservers();
-
-        // The PCJ Id is the topic name the results will be written to.
-        return pcjId;
-    }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java
deleted file mode 100644
index 4eab0f6..0000000
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/RyaExportITBase.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.rya.indexing.pcj.fluo;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-
-import org.apache.fluo.api.config.ObserverSpecification;
-import org.apache.log4j.BasicConfigurator;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.rya.indexing.pcj.fluo.app.export.kafka.KafkaExportParameters;
-import org.apache.rya.indexing.pcj.fluo.app.export.rya.RyaExportParameters;
-import org.apache.rya.indexing.pcj.fluo.app.observers.AggregationObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.ConstructQueryResultObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.FilterObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.JoinObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.QueryResultObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.StatementPatternObserver;
-import org.apache.rya.indexing.pcj.fluo.app.observers.TripleObserver;
-import org.junit.BeforeClass;
-
-/**
- * The base Integration Test class used for Fluo applications that export to a Rya PCJ Index.
- */
-public class RyaExportITBase extends FluoITBase {
-
-    @BeforeClass
-    public static void setupLogging() {
-        BasicConfigurator.configure();
-        Logger.getRootLogger().setLevel(Level.ERROR);
-    }
-
-    @Override
-    protected void preFluoInitHook() throws Exception {
-        // Setup the observers that will be used by the Fluo PCJ Application.
-        final List<ObserverSpecification> observers = new ArrayList<>();
-        observers.add(new ObserverSpecification(TripleObserver.class.getName()));
-        observers.add(new ObserverSpecification(StatementPatternObserver.class.getName()));
-        observers.add(new ObserverSpecification(JoinObserver.class.getName()));
-        observers.add(new ObserverSpecification(FilterObserver.class.getName()));
-        observers.add(new ObserverSpecification(AggregationObserver.class.getName()));
-
-        // Configure the export observer to export new PCJ results to the mini accumulo cluster.
-        final HashMap<String, String> exportParams = new HashMap<>();
-        final RyaExportParameters ryaParams = new RyaExportParameters(exportParams);
-        ryaParams.setExportToRya(true);
-        ryaParams.setRyaInstanceName(getRyaInstanceName());
-        ryaParams.setAccumuloInstanceName(super.getMiniAccumuloCluster().getInstanceName());
-        ryaParams.setZookeeperServers(super.getMiniAccumuloCluster().getZooKeepers());
-        ryaParams.setExporterUsername(getUsername());
-        ryaParams.setExporterPassword(getPassword());
-
-        final ObserverSpecification exportObserverConfig = new ObserverSpecification(QueryResultObserver.class.getName(), exportParams);
-        observers.add(exportObserverConfig);
-        
-        final KafkaExportParameters kafkaParams = new KafkaExportParameters(exportParams);
-        kafkaParams.setExportToKafka(false);
-
-        final ObserverSpecification constructExportObserverConfig = new ObserverSpecification(ConstructQueryResultObserver.class.getName(),
-                exportParams);
-        observers.add(constructExportObserverConfig);
-
-        // Add the observers to the Fluo Configuration.
-        super.getFluoConfiguration().addObservers(observers);
-    }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java
index 3a42a23..cb34d06 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/CountStatementsIT.java
@@ -29,7 +29,7 @@ import org.apache.fluo.api.client.FluoFactory;
 import org.apache.fluo.api.config.ObserverSpecification;
 import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.api.domain.RyaURI;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 
 import com.google.common.base.Optional;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java
index 9a1c285..d5c0e5f 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetPcjMetadataIT.java
@@ -30,7 +30,6 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.api.persist.RyaDAOException;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.GetPcjMetadata.NotInAccumuloException;
 import org.apache.rya.indexing.pcj.fluo.api.GetPcjMetadata.NotInFluoException;
 import org.apache.rya.indexing.pcj.storage.PcjException;
@@ -39,6 +38,7 @@ import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.ShiftVarOrderFactory;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 import org.openrdf.query.MalformedQueryException;
 import org.openrdf.query.QueryEvaluationException;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java
index d19646e..965a7b9 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/GetQueryReportIT.java
@@ -31,13 +31,13 @@ import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.api.domain.RyaURI;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.GetQueryReport.QueryReport;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQuery;
 import org.apache.rya.indexing.pcj.fluo.app.query.StatementPatternMetadata;
 import org.apache.rya.indexing.pcj.storage.PcjMetadata;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 
 import com.google.common.base.Optional;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java
index ec301ba..e3914bd 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/api/ListQueryIdsIT.java
@@ -29,7 +29,7 @@ import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.FluoFactory;
 import org.apache.fluo.api.client.Transaction;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 
 import com.beust.jcommander.internal.Lists;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java
index accabbf..d403404 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAOIT.java
@@ -21,12 +21,12 @@ package org.apache.rya.indexing.pcj.fluo.app.query;
 import static org.junit.Assert.assertEquals;
 
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.FluoFactory;
 import org.apache.fluo.api.client.Snapshot;
 import org.apache.fluo.api.client.Transaction;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.app.ConstructGraph;
 import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationElement;
 import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationType;
@@ -34,6 +34,7 @@ import org.apache.rya.indexing.pcj.fluo.app.query.FluoQuery.QueryType;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType;
 import org.apache.rya.indexing.pcj.fluo.app.query.SparqlFluoQueryBuilder.NodeIds;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 import org.openrdf.query.MalformedQueryException;
 import org.openrdf.query.algebra.StatementPattern;
@@ -42,8 +43,6 @@ import org.openrdf.query.parser.ParsedQuery;
 import org.openrdf.query.parser.sparql.SPARQLParser;
 import org.openrdf.repository.RepositoryException;
 
-import com.google.common.base.Optional;
-
 /**
  * Integration tests the methods of {@link FluoQueryMetadataDAO}.
  */
@@ -87,8 +86,7 @@ public class FluoQueryMetadataDAOIT extends RyaExportITBase {
         builder.setVarOrder(new VariableOrder("e;f"));
         builder.setParentNodeId("parentNodeId");
         builder.setChildNodeId("childNodeId");
-        builder.setOriginalSparql("originalSparql");
-        builder.setFilterIndexWithinSparql(2);
+        builder.setFilterSparql("originalSparql");
         final FilterMetadata originalMetadata = builder.build();
 
         try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
@@ -232,11 +230,10 @@ public class FluoQueryMetadataDAOIT extends RyaExportITBase {
                 storedMetadata = dao.readAggregationMetadata(sx, "nodeId");
             }
 
-            // Ensure the deserialized object is the same as the serialized one.
-            assertEquals(originalMetadata, storedMetadata);
         }
     }
 
+
     @Test
     public void aggregationMetadataTest_noGroupByVarOrders() {
         final FluoQueryMetadataDAO dao = new FluoQueryMetadataDAO();
@@ -267,6 +264,41 @@ public class FluoQueryMetadataDAOIT extends RyaExportITBase {
             assertEquals(originalMetadata, storedMetadata);
         }
     }
+    
+    @Test
+    public void periodicQueryMetadataTest() {
+        final FluoQueryMetadataDAO dao = new FluoQueryMetadataDAO();
+
+        // Create the object that will be serialized.
+        PeriodicQueryMetadata originalMetadata =  PeriodicQueryMetadata.builder()
+            .setNodeId("nodeId")
+            .setParentNodeId("parentNodeId")
+            .setVarOrder(new VariableOrder("a","b","c"))
+            .setChildNodeId("childNodeId")
+            .setPeriod(10)
+            .setWindowSize(20)
+            .setUnit(TimeUnit.DAYS)
+            .setTemporalVariable("a")
+            .build();
+            
+
+        try(FluoClient fluoClient = FluoFactory.newClient(super.getFluoConfiguration())) {
+            // Write it to the Fluo table.
+            try(Transaction tx = fluoClient.newTransaction()) {
+                dao.write(tx, originalMetadata);
+                tx.commit();
+            }
+
+            // Read it from the Fluo table.
+            PeriodicQueryMetadata storedMetadata = null;
+            try(Snapshot sx = fluoClient.newSnapshot()) {
+                storedMetadata = dao.readPeriodicQueryMetadata(sx, "nodeId");
+            }
+
+            // Ensure the deserialized object is the same as the serialized one.
+            assertEquals(originalMetadata, storedMetadata);
+        }
+    }
 
     @Test
     public void fluoQueryTest() throws MalformedQueryException {

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/BatchDeleteIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/BatchDeleteIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/BatchDeleteIT.java
new file mode 100644
index 0000000..0cd7cfb
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/BatchDeleteIT.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.integration;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.Snapshot;
+import org.apache.fluo.api.client.Transaction;
+import org.apache.fluo.api.client.scanner.ColumnScanner;
+import org.apache.fluo.api.client.scanner.RowScanner;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.ColumnValue;
+import org.apache.fluo.api.data.Span;
+import org.apache.fluo.core.client.FluoClientImpl;
+import org.apache.log4j.Logger;
+import org.apache.rya.api.domain.RyaStatement;
+import org.apache.rya.api.domain.RyaURI;
+import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
+import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater.Side;
+import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation.Task;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformationDAO;
+import org.apache.rya.indexing.pcj.fluo.app.batch.JoinBatchInformation;
+import org.apache.rya.indexing.pcj.fluo.app.batch.SpanBatchDeleteInformation;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQuery;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
+import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
+import org.junit.Test;
+import org.openrdf.model.impl.URIImpl;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+public class BatchDeleteIT extends RyaExportITBase {
+
+    private static final Logger log = Logger.getLogger(BatchDeleteIT.class);
+    private static final FluoQueryMetadataDAO dao = new FluoQueryMetadataDAO();
+
+    @Test
+    public void simpleScanDelete() throws Exception {
+
+        final String sparql = "SELECT ?subject ?object1 ?object2 WHERE { ?subject <urn:predicate_1> ?object1; "
+                + " <urn:predicate_2> ?object2 } ";
+        try (FluoClient fluoClient = new FluoClientImpl(getFluoConfiguration())) {
+
+            RyaURI subj = new RyaURI("urn:subject_1");
+            RyaStatement statement1 = new RyaStatement(subj, new RyaURI("urn:predicate_1"), null);
+            RyaStatement statement2 = new RyaStatement(subj, new RyaURI("urn:predicate_2"), null);
+            Set<RyaStatement> statements1 = getRyaStatements(statement1, 10);
+            Set<RyaStatement> statements2 = getRyaStatements(statement2, 10);
+
+            // Create the PCJ table.
+            final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(getAccumuloConnector(), getRyaInstanceName());
+            final String pcjId = pcjStorage.createPcj(sparql);
+
+            // Tell the Fluo app to maintain the PCJ.
+            String queryId = new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, getAccumuloConnector(), getRyaInstanceName());
+
+            List<String> ids = getNodeIdStrings(fluoClient, queryId);
+            List<String> prefixes = Arrays.asList("urn:subject_1", "urn:object", "urn:subject_1", "urn:subject_1");
+
+            // Stream the data into Fluo.
+            InsertTriples inserter = new InsertTriples();
+            inserter.insert(fluoClient, statements1, Optional.<String> absent());
+            inserter.insert(fluoClient, statements2, Optional.<String> absent());
+
+            // Verify the end results of the query match the expected results.
+            getMiniFluo().waitForObservers();
+
+            verifyCounts(fluoClient, ids, Arrays.asList(100, 100, 10, 10));
+
+            createSpanBatches(fluoClient, ids, prefixes, 10);
+            getMiniFluo().waitForObservers();
+
+            verifyCounts(fluoClient, ids, Arrays.asList(0, 0, 0, 0));
+        }
+    }
+
+     @Test
+    public void simpleJoinDelete() throws Exception {
+        final String sparql = "SELECT ?subject ?object1 ?object2 WHERE { ?subject <urn:predicate_1> ?object1; "
+                + " <urn:predicate_2> ?object2 } ";
+        try (FluoClient fluoClient = new FluoClientImpl(getFluoConfiguration())) {
+
+            RyaURI subj = new RyaURI("urn:subject_1");
+            RyaStatement statement1 = new RyaStatement(subj, new RyaURI("urn:predicate_1"), null);
+            RyaStatement statement2 = new RyaStatement(subj, new RyaURI("urn:predicate_2"), null);
+            Set<RyaStatement> statements1 = getRyaStatements(statement1, 5);
+            Set<RyaStatement> statements2 = getRyaStatements(statement2, 5);
+
+            // Create the PCJ table.
+            final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(getAccumuloConnector(), getRyaInstanceName());
+            final String pcjId = pcjStorage.createPcj(sparql);
+
+            // Tell the Fluo app to maintain the PCJ.
+            String queryId = new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, getAccumuloConnector(), getRyaInstanceName());
+
+            List<String> ids = getNodeIdStrings(fluoClient, queryId);
+            String joinId = ids.get(1);
+            String rightSp = ids.get(3);
+            QueryBindingSet bs = new QueryBindingSet();
+            bs.addBinding("subject", new URIImpl("urn:subject_1"));
+            bs.addBinding("object1", new URIImpl("urn:object_0"));
+            VisibilityBindingSet vBs = new VisibilityBindingSet(bs);
+            Span span = Span.prefix(Bytes.of(rightSp + IncrementalUpdateConstants.NODEID_BS_DELIM + "urn:subject_1"));
+            VariableOrder varOrder = new VariableOrder(Arrays.asList("subject", "object2"));
+
+            // Stream the data into Fluo.
+            InsertTriples inserter = new InsertTriples();
+            inserter.insert(fluoClient, statements1, Optional.<String> absent());
+            inserter.insert(fluoClient, statements2, Optional.<String> absent());
+
+            getMiniFluo().waitForObservers();
+            verifyCounts(fluoClient, ids, Arrays.asList(25, 25, 5, 5));
+
+            JoinBatchInformation batch = JoinBatchInformation.builder().setBatchSize(1)
+                    .setColumn(FluoQueryColumns.STATEMENT_PATTERN_BINDING_SET).setSpan(span).setTask(Task.Delete)
+                    .setJoinType(JoinType.NATURAL_JOIN).setSide(Side.LEFT).setBs(vBs).setVarOrder(varOrder).build();
+            // Verify the end results of the query match the expected results.
+            createSpanBatch(fluoClient, joinId, batch);
+
+            getMiniFluo().waitForObservers();
+            verifyCounts(fluoClient, ids, Arrays.asList(25, 20, 5, 5));
+        }
+    }
+
+     @Test
+    public void simpleJoinAdd() throws Exception {
+        final String sparql = "SELECT ?subject ?object1 ?object2 WHERE { ?subject <urn:predicate_1> ?object1; "
+                + " <urn:predicate_2> ?object2 } ";
+        try (FluoClient fluoClient = new FluoClientImpl(getFluoConfiguration())) {
+
+            RyaURI subj = new RyaURI("urn:subject_1");
+            RyaStatement statement2 = new RyaStatement(subj, new RyaURI("urn:predicate_2"), null);
+            Set<RyaStatement> statements2 = getRyaStatements(statement2, 5);
+
+            // Create the PCJ table.
+            final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(getAccumuloConnector(), getRyaInstanceName());
+            final String pcjId = pcjStorage.createPcj(sparql);
+
+            // Tell the Fluo app to maintain the PCJ.
+            String queryId = new CreatePcj().withRyaIntegration(pcjId, pcjStorage, fluoClient, getAccumuloConnector(), getRyaInstanceName());
+
+            List<String> ids = getNodeIdStrings(fluoClient, queryId);
+            String joinId = ids.get(1);
+            String rightSp = ids.get(3);
+            QueryBindingSet bs = new QueryBindingSet();
+            bs.addBinding("subject", new URIImpl("urn:subject_1"));
+            bs.addBinding("object1", new URIImpl("urn:object_0"));
+            VisibilityBindingSet vBs = new VisibilityBindingSet(bs);
+            Span span = Span.prefix(Bytes.of(rightSp + IncrementalUpdateConstants.NODEID_BS_DELIM + "urn:subject_1"));
+            VariableOrder varOrder = new VariableOrder(Arrays.asList("subject", "object2"));
+
+            // Stream the data into Fluo.
+            InsertTriples inserter = new InsertTriples();
+            inserter.insert(fluoClient, statements2, Optional.<String> absent());
+
+            getMiniFluo().waitForObservers();
+            verifyCounts(fluoClient, ids, Arrays.asList(0, 0, 0, 5));
+
+            JoinBatchInformation batch = JoinBatchInformation.builder().setBatchSize(1)
+                    .setColumn(FluoQueryColumns.STATEMENT_PATTERN_BINDING_SET).setSpan(span).setTask(Task.Add)
+                    .setJoinType(JoinType.NATURAL_JOIN).setSide(Side.LEFT).setBs(vBs).setVarOrder(varOrder).build();
+            // Verify the end results of the query match the expected results.
+            createSpanBatch(fluoClient, joinId, batch);
+
+            getMiniFluo().waitForObservers();
+            verifyCounts(fluoClient, ids, Arrays.asList(5, 5, 0, 5));
+        }
+    }
+
+    private Set<RyaStatement> getRyaStatements(RyaStatement statement, int numTriples) {
+
+        Set<RyaStatement> statements = new HashSet<>();
+        final String subject = "urn:subject_";
+        final String predicate = "urn:predicate_";
+        final String object = "urn:object_";
+
+        for (int i = 0; i < numTriples; i++) {
+            RyaStatement stmnt = new RyaStatement(statement.getSubject(), statement.getPredicate(), statement.getObject());
+            if (stmnt.getSubject() == null) {
+                stmnt.setSubject(new RyaURI(subject + i));
+            }
+            if (stmnt.getPredicate() == null) {
+                stmnt.setPredicate(new RyaURI(predicate + i));
+            }
+            if (stmnt.getObject() == null) {
+                stmnt.setObject(new RyaURI(object + i));
+            }
+            statements.add(stmnt);
+        }
+        return statements;
+    }
+
+    private List<String> getNodeIdStrings(FluoClient fluoClient, String queryId) {
+        List<String> nodeStrings = new ArrayList<>();
+        try (Snapshot sx = fluoClient.newSnapshot()) {
+            FluoQuery query = dao.readFluoQuery(sx, queryId);
+            nodeStrings.add(queryId);
+            Collection<JoinMetadata> jMeta = query.getJoinMetadata();
+            for (JoinMetadata meta : jMeta) {
+                nodeStrings.add(meta.getNodeId());
+                nodeStrings.add(meta.getLeftChildNodeId());
+                nodeStrings.add(meta.getRightChildNodeId());
+            }
+        }
+        return nodeStrings;
+    }
+
+    private void createSpanBatches(FluoClient fluoClient, List<String> ids, List<String> prefixes, int batchSize) {
+
+        Preconditions.checkArgument(ids.size() == prefixes.size());
+
+        try (Transaction tx = fluoClient.newTransaction()) {
+            for (int i = 0; i < ids.size(); i++) {
+                String id = ids.get(i);
+                String bsPrefix = prefixes.get(i);
+                NodeType type = NodeType.fromNodeId(id).get();
+                Column bsCol = type.getResultColumn();
+                String row = id + IncrementalUpdateConstants.NODEID_BS_DELIM + bsPrefix;
+                Span span = Span.prefix(Bytes.of(row));
+                BatchInformation batch = SpanBatchDeleteInformation.builder().setBatchSize(batchSize).setColumn(bsCol).setSpan(span)
+                        .build();
+                BatchInformationDAO.addBatch(tx, id, batch);
+            }
+            tx.commit();
+        }
+    }
+
+    private void createSpanBatch(FluoClient fluoClient, String nodeId, BatchInformation batch) {
+        try (Transaction tx = fluoClient.newTransaction()) {
+            BatchInformationDAO.addBatch(tx, nodeId, batch);
+            tx.commit();
+        }
+    }
+
+    private int countResults(FluoClient fluoClient, String nodeId, Column bsColumn) {
+        try (Transaction tx = fluoClient.newTransaction()) {
+            int count = 0;
+            RowScanner scanner = tx.scanner().over(Span.prefix(nodeId)).fetch(bsColumn).byRow().build();
+            Iterator<ColumnScanner> colScanners = scanner.iterator();
+            while (colScanners.hasNext()) {
+                ColumnScanner colScanner = colScanners.next();
+                Iterator<ColumnValue> vals = colScanner.iterator();
+                while (vals.hasNext()) {
+                    vals.next();
+                    count++;
+                }
+            }
+            tx.commit();
+            return count;
+        }
+    }
+
+    private void verifyCounts(FluoClient fluoClient, List<String> ids, List<Integer> expectedCounts) {
+        Preconditions.checkArgument(ids.size() == expectedCounts.size());
+        for (int i = 0; i < ids.size(); i++) {
+            String id = ids.get(i);
+            int expected = expectedCounts.get(i);
+            NodeType type = NodeType.fromNodeId(id).get();
+            int count = countResults(fluoClient, id, type.getResultColumn());
+            log.trace("NodeId: " + id + " Count: " + count + " Expected: " + expected);
+            switch (type) {
+            case STATEMENT_PATTERN:
+                assertEquals(expected, count);
+                break;
+            case JOIN:
+                assertEquals(expected, count);
+                break;
+            case QUERY:
+                assertEquals(expected, count);
+                break;
+            default:
+                break;
+            }
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java
index 414fa70..0f2d892 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/CreateDeleteIT.java
@@ -35,8 +35,8 @@ import org.apache.fluo.api.data.Bytes;
 import org.apache.fluo.api.data.Span;
 import org.apache.rya.api.client.RyaClient;
 import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.DeletePcj;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 import org.openrdf.model.Statement;
 import org.openrdf.model.ValueFactory;


[8/9] incubator-rya git commit: RYA-280-Periodic Query Service. Closes #177.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/integration/AccumuloPeriodicQueryResultStorageIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/integration/AccumuloPeriodicQueryResultStorageIT.java b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/integration/AccumuloPeriodicQueryResultStorageIT.java
new file mode 100644
index 0000000..1eafc00
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/integration/AccumuloPeriodicQueryResultStorageIT.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage.accumulo.integration;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.rya.accumulo.AccumuloITBase;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryStorageException;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryStorageMetadata;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.PeriodicQueryTableNameFactory;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.model.vocabulary.XMLSchema;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+import org.openrdf.query.impl.MapBindingSet;
+
+public class AccumuloPeriodicQueryResultStorageIT extends AccumuloITBase {
+
+    private PeriodicQueryResultStorage periodicStorage;
+    private static final String RYA = "rya_";
+    private static final PeriodicQueryTableNameFactory nameFactory = new PeriodicQueryTableNameFactory();
+    private static final ValueFactory vf = new ValueFactoryImpl();
+    
+    @Before
+    public void init() throws AccumuloException, AccumuloSecurityException {
+        super.getConnector().securityOperations().changeUserAuthorizations("root", new Authorizations("U"));
+        periodicStorage = new AccumuloPeriodicQueryResultStorage(super.getConnector(), RYA);
+    }
+    
+    
+    @Test
+    public void testCreateAndMeta() throws PeriodicQueryStorageException {
+        
+        String sparql = "select ?x where { ?x <urn:pred> ?y.}";
+        VariableOrder varOrder = new VariableOrder("periodicBinId", "x");
+        PeriodicQueryStorageMetadata expectedMeta = new PeriodicQueryStorageMetadata(sparql, varOrder);
+        
+        String id = periodicStorage.createPeriodicQuery(sparql);
+        Assert.assertEquals(expectedMeta, periodicStorage.getPeriodicQueryMetadata(id));
+        Assert.assertEquals(Arrays.asList(nameFactory.makeTableName(RYA, id)), periodicStorage.listPeriodicTables());
+        periodicStorage.deletePeriodicQuery(id);
+    }
+    
+    
+    @Test
+    public void testAddListDelete() throws Exception {
+        
+        String sparql = "select ?x where { ?x <urn:pred> ?y.}";
+        String id = periodicStorage.createPeriodicQuery(sparql);
+        
+        Set<BindingSet> expected = new HashSet<>();
+        Set<VisibilityBindingSet> storageSet = new HashSet<>();
+        
+        //add result matching user's visibility
+        QueryBindingSet bs = new QueryBindingSet();
+        bs.addBinding("periodicBinId", vf.createLiteral(1L));
+        bs.addBinding("x",vf.createURI("uri:uri123"));
+        expected.add(bs);
+        storageSet.add(new VisibilityBindingSet(bs,"U"));
+        
+        //add result with different visibility that is not expected
+        bs = new QueryBindingSet();
+        bs.addBinding("periodicBinId", vf.createLiteral(1L));
+        bs.addBinding("x",vf.createURI("uri:uri456"));
+        storageSet.add(new VisibilityBindingSet(bs,"V"));
+        
+        periodicStorage.addPeriodicQueryResults(id, storageSet);
+        
+        Set<BindingSet> actual = new HashSet<>();
+        try(CloseableIterator<BindingSet> iter = periodicStorage.listResults(id, Optional.of(1L))) {
+            iter.forEachRemaining(x -> actual.add(x));
+        }
+        
+        Assert.assertEquals(expected, actual);
+        
+        periodicStorage.deletePeriodicQueryResults(id, 1L);
+        
+        Set<BindingSet> actual2 = new HashSet<>();
+        try(CloseableIterator<BindingSet> iter = periodicStorage.listResults(id, Optional.of(1L))) {
+            iter.forEachRemaining(x -> actual2.add(x));
+        }
+        
+        Assert.assertEquals(new HashSet<>(), actual2);
+        periodicStorage.deletePeriodicQuery(id);
+        
+    }
+    
+    @Test
+    public void multiBinTest() throws PeriodicQueryStorageException, Exception {
+        
+        String sparql = "prefix function: <http://org.apache.rya/function#> " //n
+                + "prefix time: <http://www.w3.org/2006/time#> " //n
+                + "select ?id (count(?obs) as ?total) where {" //n
+                + "Filter(function:periodic(?time, 2, .5, time:hours)) " //n
+                + "?obs <uri:hasTime> ?time. " //n
+                + "?obs <uri:hasId> ?id } group by ?id"; //n
+        
+        
+        final ValueFactory vf = new ValueFactoryImpl();
+        long currentTime = System.currentTimeMillis();
+        String queryId = UUID.randomUUID().toString().replace("-", "");
+        
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expected1 = new HashSet<>();
+        final Set<BindingSet> expected2 = new HashSet<>();
+        final Set<BindingSet> expected3 = new HashSet<>();
+        final Set<BindingSet> expected4 = new HashSet<>();
+        final Set<VisibilityBindingSet> storageResults = new HashSet<>();
+
+        long period = 1800000;
+        long binId = (currentTime/period)*period;
+        
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expected1.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expected1.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expected1.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_4", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expected1.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expected2.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expected2.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expected2.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 2*period));
+        expected3.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 2*period));
+        expected3.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 3*period));
+        expected4.add(bs);
+        storageResults.add(new VisibilityBindingSet(bs));
+        
+        
+        String id = periodicStorage.createPeriodicQuery(queryId, sparql);
+        periodicStorage.addPeriodicQueryResults(queryId, storageResults);
+        
+        try(CloseableIterator<BindingSet> iter = periodicStorage.listResults(queryId, Optional.of(binId))) {
+            Set<BindingSet> actual1 = new HashSet<>();
+            while(iter.hasNext()) {
+                actual1.add(iter.next());
+            }
+            Assert.assertEquals(expected1, actual1);
+        }
+        
+        periodicStorage.deletePeriodicQueryResults(queryId, binId);
+        try(CloseableIterator<BindingSet> iter = periodicStorage.listResults(queryId, Optional.of(binId))) {
+            Set<BindingSet> actual1 = new HashSet<>();
+            while(iter.hasNext()) {
+                actual1.add(iter.next());
+            }
+            Assert.assertEquals(Collections.emptySet(), actual1);
+        }
+        
+        try(CloseableIterator<BindingSet> iter = periodicStorage.listResults(queryId, Optional.of(binId + period))) {
+            Set<BindingSet> actual2 = new HashSet<>();
+            while(iter.hasNext()) {
+                actual2.add(iter.next());
+            }
+            Assert.assertEquals(expected2, actual2);
+        }
+        
+        periodicStorage.deletePeriodicQueryResults(queryId, binId + period);
+        try(CloseableIterator<BindingSet> iter = periodicStorage.listResults(queryId, Optional.of(binId + period))) {
+            Set<BindingSet> actual2 = new HashSet<>();
+            while(iter.hasNext()) {
+                actual2.add(iter.next());
+            }
+            Assert.assertEquals(Collections.emptySet(), actual2);
+        }
+        
+        try(CloseableIterator<BindingSet> iter = periodicStorage.listResults(queryId, Optional.of(binId + 2*period))) {
+            Set<BindingSet> actual3 = new HashSet<>();
+            while(iter.hasNext()) {
+                actual3.add(iter.next());
+            }
+            Assert.assertEquals(expected3, actual3);
+        }
+        
+        try(CloseableIterator<BindingSet> iter = periodicStorage.listResults(queryId, Optional.of(binId + 3*period))) {
+            Set<BindingSet> actual4 = new HashSet<>();
+            while(iter.hasNext()) {
+                actual4.add(iter.next());
+            }
+            Assert.assertEquals(expected4, actual4);
+        }
+        periodicStorage.deletePeriodicQuery(id);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/README.md
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/README.md b/extras/rya.pcj.fluo/README.md
index 70361c1..1207705 100644
--- a/extras/rya.pcj.fluo/README.md
+++ b/extras/rya.pcj.fluo/README.md
@@ -19,7 +19,15 @@ Rya Incrementally Updating Precomputed Joins
 ============================================
 This project is an implementation of the Rya Precomputed Join (PCJ) indexing 
 feature that runs on top of [Fluo][1] so that it may incrementally update the
-results of a query as new semantic triples are added to storage.  
+results of a query as new semantic triples are added to storage.  At a high level, the Rya Fluo application 
+works by registering the individual RDF4J QueryNodes with the Fluo table in the form of metadata.  For example, 
+if a join occurs in a given query, then that join is given a unique id when the query is registered with the Rya 
+Fluo application, along with metadata indicating its parent node, its left and right child nodes, along with 
+other information necessary for the application to process the join.  In this way, the entire RDF4J query tree is recreated
+within Fluo.  For each node type supported by the Rya Fluo application, there is also an associated Fluo Observer 
+that processes BindingSet notifications for that node (this occurs when a new result percolates up the query tree and 
+arrives at that node in the form of a BindingSet).  These Observers incrementally evaluate the queries registered with the
+Fluo application by performing the processing required for their associated node as soon as a result for that node is available.  
 
 This project contains the following modules:
   * **rya.pcj.fluo.app** - A Fluo application that incrementally updates the results
@@ -38,5 +46,20 @@ This project contains the following modules:
   * **integration** - Contains integration tests that use a MiniAccumuloCluster
     and MiniFluo to ensure the Rya PCJ Fluo App work within an emulation of the
     production environment.
+    
+    
+Currently the Rya Fluo Application supports RDF4J queries that contain Joins, Filters, Projections, StatementPatterns, and Aggregations.
+To support the evaluation of additional RDF4J query nodes in the Fluo application, here are the steps that need to be followed:
+
+  1. Create the appropriate Metadata Object by extending CommonNodeMetadata (e.g. StatementPatternMetadata, JoinMetadata, etc.)
+  2. Add metadata Columns to FluoQueryColumns
+  3. Create NodeType from the metadata Columns
+  4. Add the node prefix to IncrementalUpdateConstants
+  5. Integrate metadata with FluoQueryMetadataDAO
+  6. Create Updater and integrate with BindingSetUpdater
+  7. Create Observer (e.g. StatementPatternObserver, JoinObserver, etc.)
+  8. Integrate with SparqlFluoQueryBuilder
+  
+All of the classes mentioned above can be found in the rya.pcj.fluo.app project.
 
 [1]: http://fluo.io/

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java b/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java
index a17f02f..767d9d2 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/CreatePcj.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Optional;
 import java.util.Set;
 
 import org.apache.accumulo.core.client.AccumuloException;
@@ -44,9 +45,11 @@ import org.apache.rya.api.persist.RyaDAOException;
 import org.apache.rya.api.persist.query.BatchRyaQuery;
 import org.apache.rya.api.resolver.RdfToRyaConversions;
 import org.apache.rya.indexing.pcj.fluo.app.FluoStringConverter;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQuery;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
+import org.apache.rya.indexing.pcj.fluo.app.query.QueryMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.SparqlFluoQueryBuilder;
 import org.apache.rya.indexing.pcj.fluo.app.query.SparqlFluoQueryBuilder.NodeIds;
 import org.apache.rya.indexing.pcj.fluo.app.query.StatementPatternMetadata;
@@ -62,6 +65,8 @@ import org.openrdf.query.algebra.StatementPattern;
 import org.openrdf.query.parser.ParsedQuery;
 import org.openrdf.query.parser.sparql.SPARQLParser;
 
+import com.google.common.base.Preconditions;
+
 import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
 import edu.umd.cs.findbugs.annotations.NonNull;
 
@@ -152,13 +157,51 @@ public class CreatePcj {
 
     
 
+
     /**
-     * Tells the Fluo PCJ Updater application to maintain a new PCJ.  This method requires that a
-     * PCJ table already exist for the query corresponding to the pcjId.  Results will be exported
-     * to this table.
+     * Tells the Fluo PCJ Updater application to maintain a new PCJ. This method
+     * creates the FluoQuery (metadata) inside of Fluo so that results can be incrementally generated
+     * inside of Fluo.  This method assumes that the user will export the results to Kafka or
+     * some other external resource.  The export id is equivalent to the queryId that is returned,
+     * which is in contrast to the other createPcj methods in this class which accept an external pcjId
+     * that is used to identify the Accumulo table or Kafka topic for exporting results.
+     *
+     * @param sparql - sparql query String to be registered with Fluo
+     * @param fluo - A connection to the Fluo application that updates the PCJ index. (not null)
+     * @return queryId - The id of the root of the query metadata tree in Fluo
+     * @throws MalformedQueryException The SPARQL query stored for the {@code pcjId} is malformed.
+     * @throws PcjException The PCJ Metadata for {@code pcjId} could not be read from {@code pcjStorage}.
+     */
+    public String createPcj(String sparql, FluoClient fluo) throws MalformedQueryException {
+        Preconditions.checkNotNull(sparql);
+        Preconditions.checkNotNull(fluo);
+        
+        FluoQuery fluoQuery = makeFluoQuery(sparql);
+        String queryId = null;
+        if(fluoQuery.getQueryMetadata().isPresent()) {
+            queryId = fluoQuery.getQueryMetadata().get().getNodeId();
+            queryId = queryId.split(IncrementalUpdateConstants.QUERY_PREFIX)[1];
+        } else {
+            queryId = fluoQuery.getConstructQueryMetadata().get().getNodeId();
+            queryId = queryId.split(IncrementalUpdateConstants.CONSTRUCT_PREFIX)[1];
+        }
+        
+        String[] idArray = queryId.split("_");
+        String id = idArray[idArray.length - 1];
+        
+        writeFluoQuery(fluo, fluoQuery, id);
+        return id;
+    }
+    
+    /**
+     * Tells the Fluo PCJ Updater application to maintain a new PCJ.  This method provides
+     * no guarantees that a PCJ with the given pcjId exists outside of Fluo. This method merely
+     * creates the FluoQuery (metadata) inside of Fluo so that results and be incrementally generated
+     * inside of Fluo.  This method assumes that the user will export the results to Kafka or
+     * some other external resource.
      *
      * @param pcjId - Identifies the PCJ that will be updated by the Fluo app. (not null)
-     * @param pcjStorage - Provides access to the PCJ index. (not null)
+     * @param sparql - sparql query String to be registered with Fluo
      * @param fluo - A connection to the Fluo application that updates the PCJ index. (not null)
      * @return The metadata that was written to the Fluo application for the PCJ.
      * @throws MalformedQueryException The SPARQL query stored for the {@code pcjId} is malformed.
@@ -166,40 +209,113 @@ public class CreatePcj {
      */
     public FluoQuery createPcj(
             final String pcjId,
-            final PrecomputedJoinStorage pcjStorage,
+            final String sparql,
             final FluoClient fluo) throws MalformedQueryException, PcjException {
         requireNonNull(pcjId);
-        requireNonNull(pcjStorage);
+        requireNonNull(sparql);
         requireNonNull(fluo);
 
+        FluoQuery fluoQuery = makeFluoQuery(sparql);
+        writeFluoQuery(fluo, fluoQuery, pcjId);
+
+        return fluoQuery;
+    }
+    
+    private FluoQuery makeFluoQuery(String sparql) throws MalformedQueryException {
+        
         // Keeps track of the IDs that are assigned to each of the query's nodes in Fluo.
         // We use these IDs later when scanning Rya for historic Statement Pattern matches
         // as well as setting up automatic exports.
         final NodeIds nodeIds = new NodeIds();
 
         // Parse the query's structure for the metadata that will be written to fluo.
-        final PcjMetadata pcjMetadata = pcjStorage.getPcjMetadata(pcjId);
-        final String sparql = pcjMetadata.getSparql();
         final ParsedQuery parsedQuery = new SPARQLParser().parseQuery(sparql, null);
-        final FluoQuery fluoQuery = new SparqlFluoQueryBuilder().make(parsedQuery, nodeIds);
-
+        return new SparqlFluoQueryBuilder().make(parsedQuery, nodeIds);
+    }
+    
+    private void writeFluoQuery(FluoClient fluo, FluoQuery fluoQuery, String pcjId) {
         try (Transaction tx = fluo.newTransaction()) {
             // Write the query's structure to Fluo.
             new FluoQueryMetadataDAO().write(tx, fluoQuery);
-            
-            if (fluoQuery.getQueryMetadata().isPresent()) {
-                // If the query is not a construct query, 
-                // the results of the query are eventually exported to an instance of Rya, so store the Rya ID for the PCJ.
-                final String queryId = fluoQuery.getQueryMetadata().get().getNodeId();
+
+            // The results of the query are eventually exported to an instance
+            // of Rya, so store the Rya ID for the PCJ.
+            QueryMetadata metadata = fluoQuery.getQueryMetadata().orNull();
+            if (metadata != null) {
+                String queryId = metadata.getNodeId();
                 tx.set(queryId, FluoQueryColumns.RYA_PCJ_ID, pcjId);
                 tx.set(pcjId, FluoQueryColumns.PCJ_ID_QUERY_ID, queryId);
-            } 
+            }
+
             // Flush the changes to Fluo.
             tx.commit();
         }
+    }
 
-        return fluoQuery;
+    
+    /**
+     * Tells the Fluo PCJ Updater application to maintain a new PCJ.  The method takes in an
+     * instance of {@link PrecomputedJoinStorage} to verify that a PCJ with the given pcjId exists.
+     *
+     * @param pcjId - Identifies the PCJ that will be updated by the Fluo app. (not null)
+     * @param pcjStorage - Provides access to the PCJ index. (not null)
+     * @param fluo - A connection to the Fluo application that updates the PCJ index. (not null)
+     * @return The metadata that was written to the Fluo application for the PCJ.
+     * @throws MalformedQueryException The SPARQL query stored for the {@code pcjId} is malformed.
+     * @throws PcjException The PCJ Metadata for {@code pcjId} could not be read from {@code pcjStorage}.
+     */
+    public FluoQuery createPcj(
+            final String pcjId,
+            final PrecomputedJoinStorage pcjStorage,
+            final FluoClient fluo) throws MalformedQueryException, PcjException {
+        requireNonNull(pcjId);
+        requireNonNull(pcjStorage);
+        requireNonNull(fluo);
+
+        // Parse the query's structure for the metadata that will be written to fluo.
+        final PcjMetadata pcjMetadata = pcjStorage.getPcjMetadata(pcjId);
+        final String sparql = pcjMetadata.getSparql();
+        return createPcj(pcjId, sparql, fluo);
+    }
+    
+    /**
+     * Tells the Fluo PCJ Updater application to maintain a new PCJ.
+     * <p>
+     * This call scans Rya for Statement Pattern matches and inserts them into
+     * the Fluo application. This method does not verify that a PcjTable with the
+     * the given pcjId actually exists. It is assumed that results for any query registered
+     * using this method will be exported to Kafka or some other external service.
+     *
+     * @param pcjId - Identifies the PCJ that will be updated by the Fluo app. (not null)
+     * @param sparql - sparql query that will registered with Fluo. (not null)
+     * @param fluo - A connection to the Fluo application that updates the PCJ index. (not null)
+     * @param queryEngine - QueryEngine for a given Rya Instance, (not null)
+     * @return The Fluo application's Query ID of the query that was created.
+     * @throws MalformedQueryException The SPARQL query stored for the {@code pcjId} is malformed.
+     * @throws PcjException The PCJ Metadata for {@code pcjId} could not be read from {@code pcjStorage}.
+     * @throws RyaDAOException Historic PCJ results could not be loaded because of a problem with {@code rya}.
+     */
+    public String withRyaIntegration(
+            final String pcjId,
+            final String sparql,
+            final FluoClient fluo,
+            final Connector accumulo,
+            final String ryaInstance ) throws MalformedQueryException, PcjException, RyaDAOException {
+        requireNonNull(pcjId);
+        requireNonNull(sparql);
+        requireNonNull(fluo);
+        requireNonNull(accumulo);
+        requireNonNull(ryaInstance);
+
+        
+        // Write the SPARQL query's structure to the Fluo Application.
+        final FluoQuery fluoQuery = createPcj(pcjId, sparql, fluo);
+        //import results already ingested into Rya that match query
+        importHistoricResultsIntoFluo(fluo, fluoQuery, accumulo, ryaInstance);
+        // return queryId to the caller for later monitoring from the export.
+        return fluoQuery.getQueryMetadata().get().getNodeId();
     }
+    
 
     /**
      * Tells the Fluo PCJ Updater application to maintain a new PCJ.
@@ -231,31 +347,39 @@ public class CreatePcj {
         requireNonNull(fluo);
         requireNonNull(accumulo);
         requireNonNull(ryaInstance);
-
-        // Write the SPARQL query's structure to the Fluo Application.
-        final FluoQuery fluoQuery = createPcj(pcjId, pcjStorage, fluo);
-
+        
+        // Parse the query's structure for the metadata that will be written to fluo.
+        final PcjMetadata pcjMetadata = pcjStorage.getPcjMetadata(pcjId);
+        final String sparql = pcjMetadata.getSparql();
+        
+        return withRyaIntegration(pcjId, sparql, fluo, accumulo, ryaInstance);
+    }
+    
+    private void importHistoricResultsIntoFluo(FluoClient fluo, FluoQuery fluoQuery, Connector accumulo, String ryaInstance)
+            throws RyaDAOException {
         // Reuse the same set object while performing batch inserts.
         final Set<RyaStatement> queryBatch = new HashSet<>();
 
-        // Iterate through each of the statement patterns and insert their historic matches into Fluo.
+        // Iterate through each of the statement patterns and insert their
+        // historic matches into Fluo.
         for (final StatementPatternMetadata patternMetadata : fluoQuery.getStatementPatternMetadata()) {
-            // Get an iterator over all of the binding sets that match the statement pattern.
+            // Get an iterator over all of the binding sets that match the
+            // statement pattern.
             final StatementPattern pattern = FluoStringConverter.toStatementPattern(patternMetadata.getStatementPattern());
             queryBatch.add(spToRyaStatement(pattern));
         }
 
-        //Create AccumuloRyaQueryEngine to query for historic results
+        // Create AccumuloRyaQueryEngine to query for historic results
         final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
         conf.setTablePrefix(ryaInstance);
         conf.setAuths(getAuths(accumulo));
 
-        try(final AccumuloRyaQueryEngine queryEngine = new AccumuloRyaQueryEngine(accumulo, conf);
+        try (final AccumuloRyaQueryEngine queryEngine = new AccumuloRyaQueryEngine(accumulo, conf);
                 CloseableIterable<RyaStatement> queryIterable = queryEngine.query(new BatchRyaQuery(queryBatch))) {
             final Set<RyaStatement> triplesBatch = new HashSet<>();
 
             // Insert batches of the binding sets into Fluo.
-            for(final RyaStatement ryaStatement : queryIterable) {
+            for (final RyaStatement ryaStatement : queryIterable) {
                 if (triplesBatch.size() == spInsertBatchSize) {
                     writeBatch(fluo, triplesBatch);
                     triplesBatch.clear();
@@ -271,14 +395,6 @@ public class CreatePcj {
         } catch (final IOException e) {
             log.warn("Ignoring IOException thrown while closing the AccumuloRyaQueryEngine used by CreatePCJ.", e);
         }
-        
-        //return queryId to the caller for later monitoring from the export
-        if(fluoQuery.getConstructQueryMetadata().isPresent()) {
-            return fluoQuery.getConstructQueryMetadata().get().getNodeId();
-        } 
-        
-        return fluoQuery.getQueryMetadata().get().getNodeId();
-        
     }
 
     private static void writeBatch(final FluoClient fluo, final Set<RyaStatement> batch) {

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java b/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java
index 87eb9cc..3052c1d 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.api/src/main/java/org/apache/rya/indexing/pcj/fluo/api/DeletePcj.java
@@ -39,6 +39,7 @@ import org.apache.rya.indexing.pcj.fluo.app.query.FilterMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.query.PeriodicQueryMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.QueryMetadata;
 import org.openrdf.query.BindingSet;
 
@@ -50,12 +51,12 @@ import edu.umd.cs.findbugs.annotations.NonNull;
  * <p>
  * This is a two phase process.
  * <ol>
- *   <li>Delete metadata about each node of the query using a single Fluo
- *       transaction. This prevents new {@link BindingSet}s from being created when
- *       new triples are inserted.</li>
- *   <li>Delete BindingSets associated with each node of the query. This is done
- *       in a batch fashion to guard against large delete transactions that don't fit
- *       into memory.</li>
+ * <li>Delete metadata about each node of the query using a single Fluo
+ * transaction. This prevents new {@link BindingSet}s from being created when
+ * new triples are inserted.</li>
+ * <li>Delete BindingSets associated with each node of the query. This is done
+ * in a batch fashion to guard against large delete transactions that don't fit
+ * into memory.</li>
  * </ol>
  */
 @DefaultAnnotation(NonNull.class)
@@ -79,8 +80,10 @@ public class DeletePcj {
      * Precomputed Join Index from the Fluo application that is incrementally
      * updating it.
      *
-     * @param client - Connects to the Fluo application that is updating the PCJ Index. (not null)
-     * @param pcjId - The PCJ ID for the query that will removed from the Fluo application. (not null)
+     * @param client - Connects to the Fluo application that is updating the PCJ
+     *            Index. (not null)
+     * @param pcjId - The PCJ ID for the query that will removed from the Fluo
+     *            application. (not null)
      */
     public void deletePcj(final FluoClient client, final String pcjId) {
         requireNonNull(client);
@@ -167,6 +170,12 @@ public class DeletePcj {
                 nodeIds.add(aggChild);
                 getChildNodeIds(tx, aggChild, nodeIds);
                 break;
+            case PERIODIC_QUERY:
+                final PeriodicQueryMetadata periodicMeta = dao.readPeriodicQueryMetadata(tx, nodeId);
+                final String periodicChild = periodicMeta.getChildNodeId();
+                nodeIds.add(periodicChild);
+                getChildNodeIds(tx, periodicChild, nodeIds);
+                break;
             case STATEMENT_PATTERN:
                 break;
         }
@@ -215,10 +224,9 @@ public class DeletePcj {
         }
     }
 
-
     /**
-     * Deletes high level query meta for converting from queryId to pcjId and vice
-     * versa, as well as converting from sparql to queryId.
+     * Deletes high level query meta for converting from queryId to pcjId and
+     * vice versa, as well as converting from sparql to queryId.
      *
      * @param tx - Transaction the deletes will be performed with. (not null)
      * @param pcjId - The PCJ whose metadata will be deleted. (not null)
@@ -234,7 +242,6 @@ public class DeletePcj {
         tx.delete(pcjId, FluoQueryColumns.PCJ_ID_QUERY_ID);
     }
 
-
     /**
      * Deletes all results (BindingSets or Statements) associated with the specified nodeId.
      *
@@ -265,18 +272,18 @@ public class DeletePcj {
         requireNonNull(scanner);
         requireNonNull(column);
 
-        try(Transaction ntx = tx) {
-          int count = 0;
-          final Iterator<RowColumnValue> iter = scanner.iterator();
-          while (iter.hasNext() && count < batchSize) {
-            final Bytes row = iter.next().getRow();
-            count++;
-            tx.delete(row, column);
-          }
+        try (Transaction ntx = tx) {
+            int count = 0;
+            final Iterator<RowColumnValue> iter = scanner.iterator();
+            while (iter.hasNext() && count < batchSize) {
+                final Bytes row = iter.next().getRow();
+                count++;
+                tx.delete(row, column);
+            }
 
-          final boolean hasNext = iter.hasNext();
-          tx.commit();
-          return hasNext;
+            final boolean hasNext = iter.hasNext();
+            tx.commit();
+            return hasNext;
         }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml b/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml
index 38fff95..b151c0e 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/pom.xml
@@ -1,35 +1,28 @@
 <?xml version="1.0" encoding="utf-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
+<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor 
+	license agreements. See the NOTICE file distributed with this work for additional 
+	information regarding copyright ownership. The ASF licenses this file to 
+	you under the Apache License, Version 2.0 (the "License"); you may not use 
+	this file except in compliance with the License. You may obtain a copy of 
+	the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required 
+	by applicable law or agreed to in writing, software distributed under the 
+	License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS 
+	OF ANY KIND, either express or implied. See the License for the specific 
+	language governing permissions and limitations under the License. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
-  http://www.apache.org/licenses/LICENSE-2.0
+	<parent>
+		<groupId>org.apache.rya</groupId>
+		<artifactId>rya.pcj.fluo.parent</artifactId>
+		<version>3.2.11-incubating-SNAPSHOT</version>
+	</parent>
 
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+	<modelVersion>4.0.0</modelVersion>
+	<artifactId>rya.pcj.fluo.app</artifactId>
 
-    <parent>
-        <groupId>org.apache.rya</groupId>
-        <artifactId>rya.pcj.fluo.parent</artifactId>
-        <version>3.2.11-incubating-SNAPSHOT</version>
-    </parent>
-    
-    <modelVersion>4.0.0</modelVersion>
-    <artifactId>rya.pcj.fluo.app</artifactId>
-    
-    <name>Apache Rya PCJ Fluo App</name>
-    <description>
+	<name>Apache Rya PCJ Fluo App</name>
+	<description>
         A Fluo implementation of Rya Precomputed Join Indexing. This module produces
         a jar that may be executed by the 'fluo' command line tool as a YARN job.
     </description>
@@ -72,6 +65,10 @@ under the License.
             <groupId>org.apache.fluo</groupId>
             <artifactId>fluo-recipes-accumulo</artifactId>
         </dependency>
+        <dependency>
+			<groupId>org.openrdf.sesame</groupId>
+			<artifactId>sesame-queryrender</artifactId>
+        </dependency>
         
         <dependency>
           <groupId>org.apache.kafka</groupId>
@@ -123,27 +120,29 @@ under the License.
         </dependency>
     </dependencies>
 
-    <build>
-        <plugins>
-            <!-- Use the pre-build 'jar-with-dependencies' assembly to package the dependent class files into the final jar. 
-                 This creates a jar file that can be deployed to Fluo without having to include any dependent jars. -->
-            <plugin>
-                <artifactId>maven-assembly-plugin</artifactId>
-                <configuration>
-                    <descriptorRefs>
-                        <descriptorRef>jar-with-dependencies</descriptorRef>
-                    </descriptorRefs>
-                </configuration>
-                <executions>
-                    <execution>
-                        <id>make-assembly</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>single</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-        </plugins>
-    </build>
+
+	<build>
+		<plugins>
+			<!-- Use the pre-build 'jar-with-dependencies' assembly to package the 
+				dependent class files into the final jar. This creates a jar file that can 
+				be deployed to Fluo without having to include any dependent jars. -->
+			<plugin>
+				<artifactId>maven-assembly-plugin</artifactId>
+				<configuration>
+					<descriptorRefs>
+						<descriptorRef>jar-with-dependencies</descriptorRef>
+					</descriptorRefs>
+				</configuration>
+				<executions>
+					<execution>
+						<id>make-assembly</id>
+						<phase>package</phase>
+						<goals>
+							<goal>single</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
 </project>

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterFinder.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterFinder.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterFinder.java
deleted file mode 100644
index ae976ee..0000000
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterFinder.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.rya.indexing.pcj.fluo.app;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.util.concurrent.atomic.AtomicReference;
-
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
-import org.openrdf.query.algebra.Filter;
-import org.openrdf.query.algebra.helpers.QueryModelVisitorBase;
-import org.openrdf.query.parser.ParsedQuery;
-import org.openrdf.query.parser.sparql.SPARQLParser;
-
-import com.google.common.base.Optional;
-
-/**
- * Searches a SPARQL query for {@link Filter}s.
- */
-@DefaultAnnotation(NonNull.class)
-class FilterFinder {
-
-    /**
-     * Search a SPARQL query for the {@link Filter} that appears at the
-     * {@code indexWithinQuery}'th time within the query.
-     * <p>
-     * The top most filter within the query will be at index 0, the next filter
-     * encountered will be at index 1, ... and the last index that is encountered
-     * will be at index <i>n</i>.
-     *
-     * @param sparql - The SPARQL query that to parse. (not null)
-     * @param indexWithinQuery - The index of the filter to fetch. (not null)
-     * @return The filter that was found within the query at the specified index;
-     *   otherwise absent.
-     * @throws Exception Thrown when the query could not be parsed or iterated over.
-     */
-    public Optional<Filter> findFilter(final String sparql, final int indexWithinQuery) throws Exception {
-        checkNotNull(sparql);
-        checkArgument(indexWithinQuery >= 0);
-
-        // When a filter is encountered for the requested index, store it in atomic reference and quit searching.
-        final AtomicReference<Filter> filterRef = new AtomicReference<>();
-        final QueryModelVisitorBase<RuntimeException> filterFinder = new QueryModelVisitorBase<RuntimeException>() {
-            private int i = 0;
-            @Override
-            public void meet(final Filter filter) {
-                // Store and stop searching.
-                if(i == indexWithinQuery) {
-                    filterRef.set(filter);
-                    return;
-                }
-
-                // Continue to the next filter.
-                i++;
-                super.meet(filter);
-            }
-        };
-
-        // Parse the query and find the filter.
-        final SPARQLParser parser = new SPARQLParser();
-        final ParsedQuery parsedQuery = parser.parseQuery(sparql, null);
-        parsedQuery.getTupleExpr().visit(filterFinder);
-        return Optional.fromNullable(filterRef.get());
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java
index 42ec686..1c99051 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/FilterResultUpdater.java
@@ -26,9 +26,11 @@ import org.apache.log4j.Logger;
 import org.apache.rya.indexing.pcj.fluo.app.query.FilterMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.util.BindingSetUtil;
+import org.apache.rya.indexing.pcj.fluo.app.util.FilterSerializer;
 import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
 import org.openrdf.model.Resource;
 import org.openrdf.model.Statement;
 import org.openrdf.model.URI;
@@ -62,11 +64,6 @@ public class FilterResultUpdater {
     private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
 
     /**
-     * A utility class used to search SPARQL queries for Filters.
-     */
-    private static final FilterFinder filterFinder = new FilterFinder();
-
-    /**
      * Is used to evaluate the conditions of a {@link Filter}.
      */
     private static final EvaluationStrategyImpl evaluator = new EvaluationStrategyImpl(
@@ -111,12 +108,11 @@ public class FilterResultUpdater {
                 "Binding Set:\n" + childBindingSet + "\n");
 
         // Parse the original query and find the Filter that represents filterId.
-        final String sparql = filterMetadata.getOriginalSparql();
-        final int indexWithinQuery = filterMetadata.getFilterIndexWithinSparql();
-        final Optional<Filter> filter = filterFinder.findFilter(sparql, indexWithinQuery);
+        final String sparql = filterMetadata.getFilterSparql();
+        Filter filter = FilterSerializer.deserialize(sparql);
 
         // Evaluate whether the child BindingSet satisfies the filter's condition.
-        final ValueExpr condition = filter.get().getCondition();
+        final ValueExpr condition = filter.getCondition();
         if (isTrue(condition, childBindingSet)) {
             // Create the Filter's binding set from the child's.
             final VariableOrder filterVarOrder = filterMetadata.getVariableOrder();

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java
index f9d14b5..2084907 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/IncrementalUpdateConstants.java
@@ -18,6 +18,8 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app;
 
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+
 public class IncrementalUpdateConstants {
 
     // String constants used to create more easily parsed patterns.
@@ -34,6 +36,9 @@ public class IncrementalUpdateConstants {
     public static final String AGGREGATION_PREFIX = "AGGREGATION";
     public static final String QUERY_PREFIX = "QUERY";
     public static final String CONSTRUCT_PREFIX = "CONSTRUCT";
+    public static final String PERIODIC_QUERY_PREFIX = "PERIODIC_QUERY";
+    
+    public static final String PERIODIC_BIN_ID = PeriodicQueryResultStorage.PeriodicBinId;
 
     public static final String URI_TYPE = "http://www.w3.org/2001/XMLSchema#anyURI";
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java
index 2cb5a54..9b65b34 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/JoinResultUpdater.java
@@ -43,6 +43,7 @@ import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
 import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
 import org.openrdf.query.Binding;
 import org.openrdf.query.BindingSet;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java
index b829b7e..b8fc2d9 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/NodeType.java
@@ -25,6 +25,7 @@ import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.FI
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.JOIN_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.QUERY_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.SP_PREFIX;
+import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.PERIODIC_QUERY_PREFIX;
 
 import java.util.List;
 
@@ -38,6 +39,7 @@ import com.google.common.base.Optional;
  * Represents the different types of nodes that a Query may have.
  */
 public enum NodeType {
+    PERIODIC_QUERY(QueryNodeMetadataColumns.PERIODIC_QUERY_COLUMNS, FluoQueryColumns.PERIODIC_QUERY_BINDING_SET),
     FILTER (QueryNodeMetadataColumns.FILTER_COLUMNS, FluoQueryColumns.FILTER_BINDING_SET),
     JOIN(QueryNodeMetadataColumns.JOIN_COLUMNS, FluoQueryColumns.JOIN_BINDING_SET),
     STATEMENT_PATTERN(QueryNodeMetadataColumns.STATEMENTPATTERN_COLUMNS, FluoQueryColumns.STATEMENT_PATTERN_BINDING_SET),
@@ -101,6 +103,8 @@ public enum NodeType {
             type = AGGREGATION;
         } else if(nodeId.startsWith(CONSTRUCT_PREFIX)) {
             type = CONSTRUCT;
+        } else if(nodeId.startsWith(PERIODIC_QUERY_PREFIX)) {
+            type = PERIODIC_QUERY;
         }
 
         return Optional.fromNullable(type);

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/PeriodicQueryUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/PeriodicQueryUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/PeriodicQueryUpdater.java
new file mode 100644
index 0000000..ae4912b
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/PeriodicQueryUpdater.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.query.PeriodicQueryMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
+import org.openrdf.model.Literal;
+import org.openrdf.model.Value;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.query.Binding;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+
+/**
+ * This class adds the appropriate BinId Binding to each BindingSet that it processes.  The BinIds
+ * are used to determine which period a BindingSet (with a temporal Binding) falls into so that
+ * a user can receive periodic updates for a registered query. 
+ *
+ */
+public class PeriodicQueryUpdater {
+
+    private static final Logger log = Logger.getLogger(PeriodicQueryUpdater.class);
+    private static final ValueFactory vf = new ValueFactoryImpl();
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
+
+    /**
+     * Uses the {@link PeriodicQueryMetadata} to create a collection of binned BindingSets
+     * that are added to Fluo.  Each binned BindingSet is the original BindingSet with an additional
+     * Binding that contains the periodic bin id of the BindingSet.
+     * @param tx - Fluo Transaction
+     * @param bs - VisibilityBindingSet that will be binned
+     * @param metadata - PeriodicQueryMetadata used to bin BindingSets
+     * @throws Exception
+     */
+    public void updatePeriodicBinResults(TransactionBase tx, VisibilityBindingSet bs, PeriodicQueryMetadata metadata) throws Exception {
+        Set<Long> binIds = getBinEndTimes(metadata, bs);
+        for(Long id: binIds) {
+            //create binding set value bytes
+            QueryBindingSet binnedBs = new QueryBindingSet(bs);
+            binnedBs.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(id));
+            VisibilityBindingSet visibilityBindingSet = new VisibilityBindingSet(binnedBs, bs.getVisibility());
+            Bytes periodicBsBytes = BS_SERDE.serialize(visibilityBindingSet);
+            
+            //create row 
+            final Bytes resultRow = RowKeyUtil.makeRowKey(metadata.getNodeId(), metadata.getVariableOrder(), binnedBs);
+            Column col = FluoQueryColumns.PERIODIC_QUERY_BINDING_SET;
+            tx.set(resultRow, col, periodicBsBytes);
+        }
+    }
+
+    /**
+     * This method returns the end times of all period windows containing the time contained in
+     * the BindingSet.  
+     * 
+     * @param metadata
+     * @return Set of period bin end times
+     */
+    private Set<Long> getBinEndTimes(PeriodicQueryMetadata metadata, VisibilityBindingSet bs) {
+        Set<Long> binIds = new HashSet<>();
+        try {
+            String timeVar = metadata.getTemporalVariable();
+            Value value = bs.getBinding(timeVar).getValue();
+            Literal temporalLiteral = (Literal) value;
+            long eventDateTime = temporalLiteral.calendarValue().toGregorianCalendar().getTimeInMillis();
+            return getEndTimes(eventDateTime, metadata.getWindowSize(), metadata.getPeriod());
+        } catch (Exception e) {
+            log.trace("Unable to extract the entity time from BindingSet: " + bs);
+        }
+        return binIds;
+    }
+
+    private long getRightBinEndPoint(long eventDateTime, long periodDuration) {
+        return (eventDateTime / periodDuration + 1) * periodDuration;
+    }
+    
+    private long getLeftBinEndPoint(long eventTime, long periodDuration) {
+        return (eventTime / periodDuration) * periodDuration;
+    }
+
+    /**
+     * Using the smallest period end time, this method also creates all other period end times
+     * that occur within one windowSize of the eventDateTime.
+     * @param eventDateTime
+     * @param startTime
+     * @param windowDuration
+     * @param periodDuration
+     * @return Set of period bin end times
+     */
+    private Set<Long> getEndTimes(long eventDateTime, long windowDuration, long periodDuration) {
+        Set<Long> binIds = new HashSet<>();
+        long rightEventBin = getRightBinEndPoint(eventDateTime, periodDuration);
+        //get the bin left of the current moment for comparison
+        long currentBin = getLeftBinEndPoint(System.currentTimeMillis(), periodDuration);
+        
+        if(currentBin >= rightEventBin) {
+            long numBins = (windowDuration -(currentBin - rightEventBin))/periodDuration;
+            for(int i = 0; i < numBins; i++) {
+                binIds.add(currentBin + i*periodDuration);
+            }
+        } else {
+            //this corresponds to a future event that is inserted into the system
+            long numBins = windowDuration/periodDuration;
+            for(int i = 0; i < numBins; i++) {
+                binIds.add(rightEventBin + i*periodDuration);
+            }
+        }
+
+        return binIds;
+    }
+    
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java
index ba82726..44fc9bd 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/QueryResultUpdater.java
@@ -31,6 +31,7 @@ import org.apache.rya.indexing.pcj.fluo.app.util.BindingSetUtil;
 import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
 import org.openrdf.query.BindingSet;
 
 import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
@@ -88,7 +89,7 @@ public class QueryResultUpdater {
         }
 
         // Create the Binding Set that goes in the Node Value. It does contain visibilities.
-        final Bytes nodeValueBytes = BS_SERDE.serialize(childBindingSet);
+        final Bytes nodeValueBytes = BS_SERDE.serialize(new VisibilityBindingSet(queryBindingSet,childBindingSet.getVisibility()));
 
         log.trace(
                 "Transaction ID: " + tx.getStartTimestamp() + "\n" +

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDe.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDe.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDe.java
deleted file mode 100644
index 34439e4..0000000
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/VisibilityBindingSetSerDe.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.rya.indexing.pcj.fluo.app;
-
-import static java.util.Objects.requireNonNull;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-
-import org.apache.fluo.api.data.Bytes;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
-/**
- * Serializes and deserializes a {@link VisibilityBindingSet} to and from {@link Bytes} objects.
- */
-@DefaultAnnotation(NonNull.class)
-public class VisibilityBindingSetSerDe {
-
-    /**
-     * Serializes a {@link VisibilityBindingSet} into a {@link Bytes} object.
-     *
-     * @param bindingSet - The binding set that will be serialized. (not null)
-     * @return The serialized object.
-     * @throws Exception A problem was encountered while serializing the object.
-     */
-    public Bytes serialize(final VisibilityBindingSet bindingSet) throws Exception {
-        requireNonNull(bindingSet);
-
-        final ByteArrayOutputStream boas = new ByteArrayOutputStream();
-        try(final ObjectOutputStream oos = new ObjectOutputStream(boas)) {
-            oos.writeObject(bindingSet);
-        }
-
-        return Bytes.of(boas.toByteArray());
-    }
-
-    /**
-     * Deserializes a {@link VisibilityBindingSet} from a {@link Bytes} object.
-     *
-     * @param bytes - The bytes that will be deserialized. (not null)
-     * @return The deserialized object.
-     * @throws Exception A problem was encountered while deserializing the object.
-     */
-    public VisibilityBindingSet deserialize(final Bytes bytes) throws Exception {
-        requireNonNull(bytes);
-
-        try(final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes.toArray()))) {
-            final Object o = ois.readObject();
-            if(o instanceof VisibilityBindingSet) {
-                return (VisibilityBindingSet) o;
-            } else {
-                throw new Exception("Deserialized Object is not a VisibilityBindingSet. Was: " + o.getClass());
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/AbstractBatchBindingSetUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/AbstractBatchBindingSetUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/AbstractBatchBindingSetUpdater.java
new file mode 100644
index 0000000..db33d3b
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/AbstractBatchBindingSetUpdater.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.RowColumn;
+import org.apache.fluo.api.data.Span;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+
+/**
+ * This class provides common functionality for implementations of {@link BatchBindingSetUpdater}.
+ *
+ */
+public abstract class AbstractBatchBindingSetUpdater implements BatchBindingSetUpdater {
+
+    /**
+     * Updates the Span to create a new {@link BatchInformation} object to be fed to the
+     * {@link BatchObserver}.  This message is called in the event that the BatchBindingSetUpdater
+     * reaches the batch size before processing all entries relevant to its Span.
+     * @param newStart - new start to the Span
+     * @param oldSpan - old Span to be updated with newStart
+     * @return - updated Span used with an updated BatchInformation object to complete the batch task
+     */
+    public Span getNewSpan(RowColumn newStart, Span oldSpan) {
+        return new Span(newStart, oldSpan.isStartInclusive(), oldSpan.getEnd(), oldSpan.isEndInclusive());
+    }
+    
+    /**
+     * Cleans up old batch job.  This method is meant to be called by any overriding method
+     * to clean up old batch tasks.
+     */
+    @Override
+    public void processBatch(TransactionBase tx, Bytes row, BatchInformation batch) throws Exception {
+        tx.delete(row, FluoQueryColumns.BATCH_COLUMN);
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/AbstractSpanBatchInformation.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/AbstractSpanBatchInformation.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/AbstractSpanBatchInformation.java
new file mode 100644
index 0000000..498dd85
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/AbstractSpanBatchInformation.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+
+import java.util.Objects;
+
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.Span;
+
+import jline.internal.Preconditions;
+
+/**
+ * Abstract class for generating span based notifications.  A spanned notification
+ * uses a {@link Span} to begin processing a Fluo Column at the position designated by the Span.
+ *
+ */
+public abstract class AbstractSpanBatchInformation extends BasicBatchInformation {
+
+    private Span span;
+
+    /**
+     * Create AbstractBatchInformation
+     * @param batchSize - size of batch to be processed
+     * @param task - type of task processed (Add, Delete, Udpate)
+     * @param column - Cpolumn that Span notification is applied
+     * @param span - span used to indicate where processing should begin
+     */
+    public AbstractSpanBatchInformation(int batchSize, Task task, Column column, Span span) {
+        super(batchSize, task, column);
+        this.span = Preconditions.checkNotNull(span);
+    }
+
+    public AbstractSpanBatchInformation(Task task, Column column, Span span) {
+        this(DEFAULT_BATCH_SIZE, task, column, span);
+    }
+
+    /**
+     * @return Span that batch Task will be applied to
+     */
+    public Span getSpan() {
+        return span;
+    }
+
+    /**
+     * Sets span to which batch Task will be applied
+     * @param span
+     */
+    public void setSpan(Span span) {
+        this.span = span;
+    }
+    
+    @Override
+    public String toString() {
+        return new StringBuilder()
+                .append("Span Batch Information {\n")
+                .append("    Span: " + span + "\n")
+                .append("    Batch Size: " + super.getBatchSize() + "\n")
+                .append("    Task: " + super.getTask() + "\n")
+                .append("    Column: " + super.getColumn() + "\n")
+                .append("}")
+                .toString();
+    }
+    
+    @Override
+    public boolean equals(Object other) {
+        if (this == other) {
+            return true;
+        }
+
+        if (!(other instanceof AbstractSpanBatchInformation)) {
+            return false;
+        }
+
+        AbstractSpanBatchInformation batch = (AbstractSpanBatchInformation) other;
+        return (super.getBatchSize() == batch.getBatchSize()) && Objects.equals(super.getColumn(), batch.getColumn()) && Objects.equals(this.span, batch.span)
+                && Objects.equals(super.getTask(), batch.getTask());
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(super.getBatchSize(), span, super.getColumn(), super.getTask());
+    }
+    
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BasicBatchInformation.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BasicBatchInformation.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BasicBatchInformation.java
new file mode 100644
index 0000000..288ed6e
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BasicBatchInformation.java
@@ -0,0 +1,81 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import org.apache.fluo.api.data.Column;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * This class contains all of the common info contained in other implementations
+ * of BatchInformation.
+ *
+ */
+public abstract class BasicBatchInformation implements BatchInformation {
+    
+    private int batchSize;
+    private Task task;
+    private Column column;
+    
+    /**
+     * Create BasicBatchInformation object
+     * @param batchSize - size of batch to be processed
+     * @param task - task to be processed
+     * @param column - Column in which data is proessed
+     */
+    public BasicBatchInformation(int batchSize, Task task, Column column ) {
+        this.task = Preconditions.checkNotNull(task);
+        this.column = Preconditions.checkNotNull(column);
+        Preconditions.checkArgument(batchSize > 0);
+        this.batchSize = batchSize;
+    }
+    
+    /**
+     * Creates a BasicBatchInformation 
+     * @param task
+     */
+    public BasicBatchInformation(Task task, Column column) {
+        Preconditions.checkNotNull(task);
+        Preconditions.checkNotNull(column);
+        this.task = task;
+        this.column = column;
+        this.batchSize = DEFAULT_BATCH_SIZE;
+    }
+
+    /**
+     * @return - size of batch that tasks are performed in
+     */
+    public int getBatchSize() {
+        return batchSize;
+    }
+
+    /**
+     * @return - type of Task performed (Add, Delete, Update)
+     */
+    public Task getTask() {
+        return task;
+    }
+    
+    /**
+     * @return - Column in which Task will be performed
+     */
+    public Column getColumn() {
+        return column;
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchBindingSetUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchBindingSetUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchBindingSetUpdater.java
new file mode 100644
index 0000000..2076d2d
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchBindingSetUpdater.java
@@ -0,0 +1,43 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+
+/**
+ * Interface for applying batch updates to the Fluo table based on the provided {@link BatchInformation}.
+ * This updater is used by the {@link BatchObserver} to apply batch updates to overcome the restriction
+ * that all transactions are processed in memory.  This allows Observers process potentially large 
+ * tasks that cannot fit into memory in a piece-wise, batch fashion.
+ */
+public interface BatchBindingSetUpdater {
+
+    /**
+     * Processes the {@link BatchInformation} object.  The BatchInformation will
+     * typically include a Task (either Add, Update, or Delete), along with information
+     * about the starting point to begin processing data.
+     * @param tx - Fluo Transaction
+     * @param row - contains the ID of the Fluo node to be processed
+     * @param batch - contains info about which cells for the Fluo query result node
+     * should be processed
+     * @throws Exception
+     */
+    public void processBatch(TransactionBase tx, Bytes row, BatchInformation batch) throws Exception;
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchInformation.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchInformation.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchInformation.java
new file mode 100644
index 0000000..7b23ee7
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchInformation.java
@@ -0,0 +1,57 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import org.apache.fluo.api.data.Column;
+
+/**
+ * Interface for submitting batch Fluo tasks to be processed by the
+ * {@link BatchObserver}. The BatchObserver applies batch updates to overcome
+ * the restriction that all Fluo transactions are processed in memory. This
+ * allows the Rya Fluo application to process large tasks that cannot fit into
+ * memory in a piece-wise, batch fashion.
+ */
+public interface BatchInformation {
+
+    public static enum Task {Add, Delete, Update}
+    public static int DEFAULT_BATCH_SIZE = 5000;
+    
+    /**
+     * @return batchsize of task 
+     */
+    public int getBatchSize();
+    
+    /**
+     *
+     * @return Task to be performed
+     */
+    public Task getTask();
+    
+    /**
+     * 
+     * @return Column that task will be performed on
+     */
+    public Column getColumn();
+    
+    /**
+     * 
+     * @return BatchBindingSetUpdater used to process this Batch Task
+     */
+    public BatchBindingSetUpdater getBatchUpdater();
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchInformationDAO.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchInformationDAO.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchInformationDAO.java
new file mode 100644
index 0000000..f9ed658
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchInformationDAO.java
@@ -0,0 +1,59 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.util.Optional;
+
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.fluo.app.batch.serializer.BatchInformationSerializer;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+
+/**
+ * Class used for reading and writing {@link BatchInformation}.
+ *
+ */
+public class BatchInformationDAO {
+    
+    /**
+     * Adds BatchInformation to the {@link FluoQueryColumns#BATCH_COLUMN}.
+     * @param tx - Fluo Transaction
+     * @param nodeId - query node that batch task will be performed on
+     * @param batch - BatchInformation to be processed
+     */
+    public static void addBatch(TransactionBase tx, String nodeId, BatchInformation batch) {
+        Bytes row = BatchRowKeyUtil.getRow(nodeId);
+        tx.set(row, FluoQueryColumns.BATCH_COLUMN, Bytes.of(BatchInformationSerializer.toBytes(batch)));
+    }
+    
+    /**
+     * Retrieve BatchInformation
+     * @param tx - Fluo transaction
+     * @param row - row that contains batch information - this is the query id that batch task will be performed on
+     * @return Optional contained the BatchInformation if it is there
+     */
+    public static Optional<BatchInformation> getBatchInformation(TransactionBase tx, Bytes row) {
+        Bytes val = tx.get(row, FluoQueryColumns.BATCH_COLUMN);
+        if(val != null) {
+            return BatchInformationSerializer.fromBytes(val.toArray());
+        } else {
+            return Optional.empty();
+        }
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchObserver.java
new file mode 100644
index 0000000..6194236
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchObserver.java
@@ -0,0 +1,63 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.util.Optional;
+
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.observer.AbstractObserver;
+import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+
+/**
+ * BatchObserver processes tasks that need to be broken into batches. Entries
+ * stored stored in this {@link FluoQueryColumns#BATCH_COLUMN} are of the form
+ * Row: nodeId, Value: BatchInformation. The nodeId indicates the node that the
+ * batch operation will be performed on. All batch operations are performed on
+ * the bindingSet column for the {@link NodeType} corresponding to the given
+ * nodeId. For example, if the nodeId indicated that the NodeType was
+ * StatementPattern, then the batch operation would be performed on
+ * {@link FluoQueryColumns#STATEMENT_PATTERN_BINDING_SET}. This Observer applies
+ * batch updates to overcome the restriction that all Fluo transactions are processed
+ * in memory. This allows the Rya Fluo application to process large tasks that cannot
+ * fit into memory in a piece-wise, batch fashion.
+ */
+public class BatchObserver extends AbstractObserver {
+
+    /**
+     * Processes the BatchInformation objects when they're written to the Batch column
+     * @param tx - Fluo transaction 
+     * @param row - row that contains {@link BatchInformation}
+     * @param col - column that contains BatchInformation
+     */
+    @Override
+    public void process(TransactionBase tx, Bytes row, Column col) throws Exception {
+        Optional<BatchInformation> batchInfo = BatchInformationDAO.getBatchInformation(tx, row);
+        if(batchInfo.isPresent()) {
+            batchInfo.get().getBatchUpdater().processBatch(tx, row, batchInfo.get());
+        }
+    }
+
+    @Override
+    public ObservedColumn getObservedColumn() {
+        return new ObservedColumn(FluoQueryColumns.BATCH_COLUMN, NotificationType.STRONG);
+    }
+
+}



[6/9] incubator-rya git commit: RYA-280-Periodic Query Service. Closes #177.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java
index a701052..8d218af 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQuery.java
@@ -18,6 +18,7 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app.query;
 
+import static com.google.common.base.Preconditions.checkArgument;
 import static java.util.Objects.requireNonNull;
 
 import java.util.Collection;
@@ -29,7 +30,6 @@ import org.apache.commons.lang3.builder.EqualsBuilder;
 
 import com.google.common.base.Objects;
 import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 
 import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
@@ -46,6 +46,7 @@ public class FluoQuery {
 
     private final Optional<QueryMetadata> queryMetadata;
     private final Optional<ConstructQueryMetadata> constructMetadata;
+    private final Optional<PeriodicQueryMetadata> periodicQueryMetadata;
     private final ImmutableMap<String, StatementPatternMetadata> statementPatternMetadata;
     private final ImmutableMap<String, FilterMetadata> filterMetadata;
     private final ImmutableMap<String, JoinMetadata> joinMetadata;
@@ -58,6 +59,7 @@ public class FluoQuery {
      * must use {@link Builder} instead.
      *
      * @param queryMetadata - The root node of a query that is updated in Fluo. (not null)
+     * @param periodicQueryMetadata - The periodic query node that is updated in Fluo.
      * @param statementPatternMetadata - A map from Node ID to Statement Pattern metadata as
      *   it is represented within the Fluo app. (not null)
      * @param filterMetadata - A map from Node ID to Filter metadata as it is represented
@@ -69,6 +71,7 @@ public class FluoQuery {
      */
     private FluoQuery(
             final QueryMetadata queryMetadata,
+            final Optional<PeriodicQueryMetadata> periodicQueryMetadata,
             final ImmutableMap<String, StatementPatternMetadata> statementPatternMetadata,
             final ImmutableMap<String, FilterMetadata> filterMetadata,
             final ImmutableMap<String, JoinMetadata> joinMetadata, 
@@ -76,6 +79,7 @@ public class FluoQuery {
                 this.aggregationMetadata = requireNonNull(aggregationMetadata);
         this.queryMetadata = Optional.of(requireNonNull(queryMetadata));
         this.constructMetadata = Optional.absent();
+        this.periodicQueryMetadata = periodicQueryMetadata;
         this.statementPatternMetadata = requireNonNull(statementPatternMetadata);
         this.filterMetadata = requireNonNull(filterMetadata);
         this.joinMetadata = requireNonNull(joinMetadata);
@@ -88,23 +92,26 @@ public class FluoQuery {
      * must use {@link Builder} instead.
      *
      * @param constructMetadata - The root node of a query that is updated in Fluo. (not null)
+     * @param periodicQueryMetadata - The periodic query node that is updated in Fluo.
      * @param statementPatternMetadata - A map from Node ID to Statement Pattern metadata as
      *   it is represented within the Fluo app. (not null)
      * @param filterMetadata A map from Node ID to Filter metadata as it is represented
      *   within the Fluo app. (not null)
-     * @param joinMetadata A map from Node ID to Join metadata as it is represented
+     * @param joinMetadata - A map from Node ID to Join metadata as it is represented
      *   within the Fluo app. (not null)
      * @param aggregationMetadata - A map from Node ID to Aggregation metadata as it is
      *   represented within the Fluo app. (not null)
      */
     private FluoQuery(
             final ConstructQueryMetadata constructMetadata,
+            final Optional<PeriodicQueryMetadata> periodicQueryMetadata,
             final ImmutableMap<String, StatementPatternMetadata> statementPatternMetadata,
             final ImmutableMap<String, FilterMetadata> filterMetadata,
             final ImmutableMap<String, JoinMetadata> joinMetadata,
             final ImmutableMap<String, AggregationMetadata> aggregationMetadata) {
         this.constructMetadata = Optional.of(requireNonNull(constructMetadata));
         this.queryMetadata = Optional.absent();
+        this.periodicQueryMetadata = periodicQueryMetadata;
         this.statementPatternMetadata = requireNonNull(statementPatternMetadata);
         this.filterMetadata = requireNonNull(filterMetadata);
         this.joinMetadata = requireNonNull(joinMetadata);
@@ -130,6 +137,13 @@ public class FluoQuery {
     public Optional<ConstructQueryMetadata> getConstructQueryMetadata() {
         return constructMetadata;
     }
+    
+    /**
+     * @return All of the Periodic Query metadata that is stored for the query.
+     */
+    public Optional<PeriodicQueryMetadata> getPeriodicQueryMetadata() {
+        return periodicQueryMetadata;
+    }
 
     /**
      * Get a Statement Pattern node's metadata.
@@ -207,6 +221,7 @@ public class FluoQuery {
     public int hashCode() {
         return Objects.hashCode(
                 queryMetadata,
+                periodicQueryMetadata,
                 statementPatternMetadata,
                 filterMetadata,
                 joinMetadata,
@@ -224,6 +239,7 @@ public class FluoQuery {
             return new EqualsBuilder()
                     .append(queryMetadata, fluoQuery.queryMetadata)
                     .append(constructMetadata,  fluoQuery.constructMetadata)
+                    .append(periodicQueryMetadata, fluoQuery.periodicQueryMetadata)
                     .append(statementPatternMetadata, fluoQuery.statementPatternMetadata)
                     .append(filterMetadata, fluoQuery.filterMetadata)
                     .append(joinMetadata, fluoQuery.joinMetadata)
@@ -247,6 +263,11 @@ public class FluoQuery {
             builder.append( constructMetadata.get().toString() );
             builder.append("\n");
         }
+        
+        if(periodicQueryMetadata.isPresent()) {
+            builder.append(periodicQueryMetadata.get());
+            builder.append("\n");
+        }
 
         for(final FilterMetadata metadata : filterMetadata.values()) {
             builder.append(metadata);
@@ -286,6 +307,7 @@ public class FluoQuery {
 
         private QueryMetadata.Builder queryBuilder = null;
         private ConstructQueryMetadata.Builder constructBuilder = null;
+        private PeriodicQueryMetadata.Builder periodicQueryBuilder = null;
         private final Map<String, StatementPatternMetadata.Builder> spBuilders = new HashMap<>();
         private final Map<String, FilterMetadata.Builder> filterBuilders = new HashMap<>();
         private final Map<String, JoinMetadata.Builder> joinBuilders = new HashMap<>();
@@ -388,6 +410,17 @@ public class FluoQuery {
         }
 
         /**
+         * Get a Join builder from this builder.
+         *
+         * @param nodeId - The Node ID the Join builder was stored at. (not null)
+         * @return The builder that was stored at the node id if one was found.
+         */
+        public Optional<JoinMetadata.Builder> getJoinBuilder(final String nodeId) {
+            requireNonNull(nodeId);
+            return Optional.fromNullable( joinBuilders.get(nodeId) );
+        }
+        
+        /**
          * Get an Aggregate builder from this builder.
          *
          * @param nodeId - The Node ID the Aggregate builder was stored at. (not null)
@@ -410,15 +443,28 @@ public class FluoQuery {
             return this;
         }
 
+        
+        
         /**
-         * Get a Join builder from this builder.
+         * Adds a new {@link PeriodicQueryMetadata.Builder} to this builder.
          *
-         * @param nodeId - The Node ID the Join builder was stored at. (not null)
-         * @return The builder that was stored at the node id if one was found.
+         * @param periodicQueryBuilder - A builder representing a specific Join within the query. (not null)
+         * @return This builder so that method invocation may be chained.
          */
-        public Optional<JoinMetadata.Builder> getJoinBuilder(final String nodeId) {
-            requireNonNull(nodeId);
-            return Optional.fromNullable( joinBuilders.get(nodeId) );
+        public Builder addPeriodicQueryMetadata(final PeriodicQueryMetadata.Builder periodicQueryBuilder) {
+            requireNonNull(periodicQueryBuilder);
+            this.periodicQueryBuilder = periodicQueryBuilder;
+            return this;
+        }
+
+        
+        /**
+         * Get a PeriodicQuery builder from this builder.
+         *
+         * @return The PeriodicQuery builder if one has been set.
+         */
+        public Optional<PeriodicQueryMetadata.Builder> getPeriodicQueryBuilder() {
+            return Optional.fromNullable( periodicQueryBuilder);
         }
         
 
@@ -426,8 +472,19 @@ public class FluoQuery {
          * @return Creates a {@link FluoQuery} using the values that have been supplied to this builder.
          */
         public FluoQuery build() {
-            Preconditions.checkArgument(
-                    (queryBuilder != null && constructBuilder == null) || (queryBuilder == null && constructBuilder != null));
+            checkArgument((queryBuilder != null && constructBuilder == null) || (queryBuilder == null && constructBuilder != null));
+            
+            Optional<QueryMetadata.Builder> optionalQueryBuilder = getQueryBuilder();
+            QueryMetadata queryMetadata = null;
+            if(optionalQueryBuilder.isPresent()) {
+                queryMetadata = optionalQueryBuilder.get().build();
+            }
+            
+            Optional<PeriodicQueryMetadata.Builder> optionalPeriodicQueryBuilder = getPeriodicQueryBuilder();
+            PeriodicQueryMetadata periodicQueryMetadata = null;
+            if(optionalPeriodicQueryBuilder.isPresent()) {
+                periodicQueryMetadata = optionalPeriodicQueryBuilder.get().build();
+            }
 
             final ImmutableMap.Builder<String, StatementPatternMetadata> spMetadata = ImmutableMap.builder();
             for(final Entry<String, StatementPatternMetadata.Builder> entry : spBuilders.entrySet()) {
@@ -450,11 +507,11 @@ public class FluoQuery {
             }
 
             if(queryBuilder != null) {
-                return new FluoQuery(queryBuilder.build(), spMetadata.build(), filterMetadata.build(), joinMetadata.build(), aggregateMetadata.build());
+                return new FluoQuery(queryBuilder.build(), Optional.fromNullable(periodicQueryMetadata), spMetadata.build(), filterMetadata.build(), joinMetadata.build(), aggregateMetadata.build());
             }
             //constructBuilder non-null in this case, but no need to check
             else {
-                return new FluoQuery(constructBuilder.build(), spMetadata.build(), filterMetadata.build(), joinMetadata.build(), aggregateMetadata.build());
+                return new FluoQuery(constructBuilder.build(), Optional.fromNullable(periodicQueryMetadata), spMetadata.build(), filterMetadata.build(), joinMetadata.build(), aggregateMetadata.build());
             }
             
         }

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java
index 3396114..ed18d49 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryColumns.java
@@ -63,14 +63,28 @@ import edu.umd.cs.findbugs.annotations.NonNull;
  *     <tr> <th>Fluo Row</td> <th>Fluo Column</td> <th>Fluo Value</td> </tr>
  *     <tr> <td>Node ID</td> <td>filterMetadata:nodeId</td> <td>The Node ID of the Filter.</td> </tr>
  *     <tr> <td>Node ID</td> <td>filterMetadata:veriableOrder</td> <td>The Variable Order binding sets are emitted with.</td> </tr>
- *     <tr> <td>Node ID</td> <td>filterMetadata:originalSparql</td> <td>The original SPRAQL query this filter was derived from.</td> </tr>
- *     <tr> <td>Node ID</td> <td>filterMetadata:filterIndexWithinSparql</td> <td>Indicates which filter within the original SPARQL query this represents.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>filterMetadata:filterSparql</td> <td>A SPARQL query representing this filter.</td> </tr>
  *     <tr> <td>Node ID</td> <td>filterMetadata:parentNodeId</td> <td>The Node ID this filter emits Binding Sets to.</td> </tr>
  *     <tr> <td>Node ID</td> <td>filterMetadata:childNodeId</td> <td>The Node ID of the node that feeds this node Binding Sets.</td> </tr>
  *     <tr> <td>Node ID + DELIM + Binding Set String</td> <td>filterMetadata:bindingSet</td> <td>A {@link VisibilityBindingSet} object.</td> </tr>
  *   </table>
  * </p>
  * <p>
+ *   <b>Periodic Bin Metadata</b>
+ *   <table border="1" style="width:100%">
+ *     <tr> <th>Fluo Row</td> <th>Fluo Column</td> <th>Fluo Value</td> </tr>
+ *     <tr> <td>Node ID</td> <td>periodicQueryMetadata:nodeId</td> <td>The Node ID of the Filter.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>periodicQueryMetadata:variableOrder</td> <td>The Variable Order binding sets are emitted with.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>periodicQueryMetadata:period</td> <td>The period size used to form BindingSet bins.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>periodicQueryMetadata:windowSize</td> <td>The window size used to form BindingSet bins.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>periodicQueryMetadata:timeUnit</td> <td>The unit of time corresponding to period and window size.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>periodicQueryMetadata:temporalVariable</td> <td>The BindingSet variable corresponding to event time.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>periodicQueryMetadata:parentNodeId</td> <td>The parent node for this node.</td> </tr>
+ *     <tr> <td>Node ID</td> <td>periodicQueryMetadata:childNodeId</td> <td>The child node for this node.</td> </tr>
+ *     <tr> <td>Node ID + DELIM + Binding set String</td> <td>periodicQueryMetadata:bindingSet</td> <td>A binned BindingSet.</td> </tr>
+ *   </table>
+ * </p>
+ * <p>
  *   <b>Join Metadata</b>
  *   <table border="1" style="width:100%">
  *     <tr> <th>Fluo Row</td> <th>Fluo Column</td> <th>Fluo Value</td> </tr>
@@ -117,6 +131,7 @@ public class FluoQueryColumns {
     public static final String STATEMENT_PATTERN_METADATA_CF = "statementPatternMetadata";
     public static final String AGGREGATION_METADATA_CF = "aggregationMetadata";
     public static final String CONSTRUCT_METADATA_CF = "constructMetadata";
+    public static final String PERIODIC_QUERY_METADATA_CF = "periodicQueryMetadata";
 
     /**
      * New triples that have been added to Rya are written as a row in this
@@ -174,13 +189,23 @@ public class FluoQueryColumns {
 
     // Filter Metadata columns.
     public static final Column FILTER_NODE_ID = new Column(FILTER_METADATA_CF, "nodeId");
-    public static final Column FILTER_VARIABLE_ORDER = new Column(FILTER_METADATA_CF, "veriableOrder");
-    public static final Column FILTER_ORIGINAL_SPARQL = new Column(FILTER_METADATA_CF, "originalSparql");
-    public static final Column FILTER_INDEX_WITHIN_SPARQL = new Column(FILTER_METADATA_CF, "filterIndexWithinSparql");
+    public static final Column FILTER_VARIABLE_ORDER = new Column(FILTER_METADATA_CF, "variableOrder");
+    public static final Column FILTER_SPARQL = new Column(FILTER_METADATA_CF, "filterSparql");
     public static final Column FILTER_PARENT_NODE_ID = new Column(FILTER_METADATA_CF, "parentNodeId");
     public static final Column FILTER_CHILD_NODE_ID = new Column(FILTER_METADATA_CF, "childNodeId");
     public static final Column FILTER_BINDING_SET = new Column(FILTER_METADATA_CF, "bindingSet");
-
+    
+    // Periodic Bin Metadata columns.
+    public static final Column PERIODIC_QUERY_NODE_ID = new Column(PERIODIC_QUERY_METADATA_CF, "nodeId");
+    public static final Column PERIODIC_QUERY_VARIABLE_ORDER = new Column(PERIODIC_QUERY_METADATA_CF, "variableOrder");
+    public static final Column PERIODIC_QUERY_PARENT_NODE_ID = new Column(PERIODIC_QUERY_METADATA_CF, "parentNodeId");
+    public static final Column PERIODIC_QUERY_CHILD_NODE_ID = new Column(PERIODIC_QUERY_METADATA_CF, "childNodeId");
+    public static final Column PERIODIC_QUERY_BINDING_SET = new Column(PERIODIC_QUERY_METADATA_CF, "bindingSet");
+    public static final Column PERIODIC_QUERY_PERIOD = new Column(PERIODIC_QUERY_METADATA_CF, "period");
+    public static final Column PERIODIC_QUERY_WINDOWSIZE = new Column(PERIODIC_QUERY_METADATA_CF, "windowSize");
+    public static final Column PERIODIC_QUERY_TIMEUNIT = new Column(PERIODIC_QUERY_METADATA_CF, "timeUnit");
+    public static final Column PERIODIC_QUERY_TEMPORAL_VARIABLE = new Column(PERIODIC_QUERY_METADATA_CF, "temporalVariable");
+    
     // Join Metadata columns.
     public static final Column JOIN_NODE_ID = new Column(JOIN_METADATA_CF, "nodeId");
     public static final Column JOIN_VARIABLE_ORDER = new Column(JOIN_METADATA_CF, "variableOrder");
@@ -207,6 +232,18 @@ public class FluoQueryColumns {
     public static final Column AGGREGATION_BINDING_SET = new Column(AGGREGATION_METADATA_CF, "bindingSet");
 
     /**
+     * BatchObserver column for processing tasks that need to be broken into
+     * batches. Entries stored stored in this column are of the form Row:
+     * nodeId, Value: BatchInformation. The nodeId indicates the node that the
+     * batch operation will be performed on. All batch operations are performed
+     * on the bindingSet column for the NodeType indicated by the given nodeId.
+     * For example, if the nodeId indicated that the NodeType was
+     * StatementPattern, then the batch operation would be performed on
+     * {@link FluoQueryColumns#STATEMENT_PATTERN_BINDING_SET}.
+     */
+    public static final Column BATCH_COLUMN = new Column("batch","information");
+
+    /**
      * Enumerates the {@link Column}s that hold all of the fields for each type
      * of node that can compose a query.
      */
@@ -220,6 +257,20 @@ public class FluoQueryColumns {
                         QUERY_VARIABLE_ORDER,
                         QUERY_SPARQL,
                         QUERY_CHILD_NODE_ID)),
+        
+        
+        /**
+         * The columns a {@link PeriodicBinMetadata} object's fields are stored within.
+         */
+        PERIODIC_QUERY_COLUMNS(
+                Arrays.asList(PERIODIC_QUERY_NODE_ID,
+                        PERIODIC_QUERY_VARIABLE_ORDER,
+                        PERIODIC_QUERY_PERIOD,
+                        PERIODIC_QUERY_WINDOWSIZE,
+                        PERIODIC_QUERY_TIMEUNIT,
+                        PERIODIC_QUERY_TEMPORAL_VARIABLE,
+                        PERIODIC_QUERY_PARENT_NODE_ID,
+                        PERIODIC_QUERY_CHILD_NODE_ID)),
 
         /**
          * The columns a {@link ConstructQueryMetadata} object's fields are stored within.
@@ -239,8 +290,7 @@ public class FluoQueryColumns {
         FILTER_COLUMNS(
                 Arrays.asList(FILTER_NODE_ID,
                         FILTER_VARIABLE_ORDER,
-                        FILTER_ORIGINAL_SPARQL,
-                        FILTER_INDEX_WITHIN_SPARQL,
+                        FILTER_SPARQL,
                         FILTER_PARENT_NODE_ID,
                         FILTER_CHILD_NODE_ID)),
 

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java
index 5e9d654..8675b80 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FluoQueryMetadataDAO.java
@@ -26,6 +26,7 @@ import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.util.Collection;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.fluo.api.client.SnapshotBase;
 import org.apache.fluo.api.client.TransactionBase;
@@ -40,6 +41,7 @@ import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
+import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 
 import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
@@ -170,8 +172,7 @@ public class FluoQueryMetadataDAO {
         final String rowId = metadata.getNodeId();
         tx.set(rowId, FluoQueryColumns.FILTER_NODE_ID, rowId);
         tx.set(rowId, FluoQueryColumns.FILTER_VARIABLE_ORDER, metadata.getVariableOrder().toString());
-        tx.set(rowId, FluoQueryColumns.FILTER_ORIGINAL_SPARQL, metadata.getOriginalSparql() );
-        tx.set(rowId, FluoQueryColumns.FILTER_INDEX_WITHIN_SPARQL, metadata.getFilterIndexWithinSparql()+"" );
+        tx.set(rowId, FluoQueryColumns.FILTER_SPARQL, metadata.getFilterSparql() );
         tx.set(rowId, FluoQueryColumns.FILTER_PARENT_NODE_ID, metadata.getParentNodeId() );
         tx.set(rowId, FluoQueryColumns.FILTER_CHILD_NODE_ID, metadata.getChildNodeId() );
     }
@@ -195,8 +196,7 @@ public class FluoQueryMetadataDAO {
         final String rowId = nodeId;
         final Map<Column, String> values = sx.gets(rowId,
                 FluoQueryColumns.FILTER_VARIABLE_ORDER,
-                FluoQueryColumns.FILTER_ORIGINAL_SPARQL,
-                FluoQueryColumns.FILTER_INDEX_WITHIN_SPARQL,
+                FluoQueryColumns.FILTER_SPARQL,
                 FluoQueryColumns.FILTER_PARENT_NODE_ID,
                 FluoQueryColumns.FILTER_CHILD_NODE_ID);
 
@@ -204,18 +204,88 @@ public class FluoQueryMetadataDAO {
         final String varOrderString = values.get(FluoQueryColumns.FILTER_VARIABLE_ORDER);
         final VariableOrder varOrder = new VariableOrder(varOrderString);
 
-        final String originalSparql = values.get(FluoQueryColumns.FILTER_ORIGINAL_SPARQL);
-        final int filterIndexWithinSparql = Integer.parseInt(values.get(FluoQueryColumns.FILTER_INDEX_WITHIN_SPARQL));
+        final String originalSparql = values.get(FluoQueryColumns.FILTER_SPARQL);
         final String parentNodeId = values.get(FluoQueryColumns.FILTER_PARENT_NODE_ID);
         final String childNodeId = values.get(FluoQueryColumns.FILTER_CHILD_NODE_ID);
 
-        return FilterMetadata.builder(nodeId)
+        return FilterMetadata.builder(nodeId).setVarOrder(varOrder).setFilterSparql(originalSparql)
+                .setParentNodeId(parentNodeId).setChildNodeId(childNodeId);
+    }
+
+    /**
+     * Write an instance of {@link PeriodicQueryMetadata} to the Fluo table.
+     *
+     * @param tx
+     *            - The transaction that will be used to commit the metadata.
+     *            (not null)
+     * @param metadata
+     *            - The PeriodicBin node metadata that will be written to the
+     *            table. (not null)
+     */
+    public void write(final TransactionBase tx, final PeriodicQueryMetadata metadata) {
+        requireNonNull(tx);
+        requireNonNull(metadata);
+
+        final String rowId = metadata.getNodeId();
+        tx.set(rowId, FluoQueryColumns.PERIODIC_QUERY_NODE_ID, rowId);
+        tx.set(rowId, FluoQueryColumns.PERIODIC_QUERY_VARIABLE_ORDER, metadata.getVariableOrder().toString());
+        tx.set(rowId, FluoQueryColumns.PERIODIC_QUERY_PARENT_NODE_ID, metadata.getParentNodeId());
+        tx.set(rowId, FluoQueryColumns.PERIODIC_QUERY_CHILD_NODE_ID, metadata.getChildNodeId());
+        tx.set(rowId, FluoQueryColumns.PERIODIC_QUERY_PERIOD, Long.toString(metadata.getPeriod()));
+        tx.set(rowId, FluoQueryColumns.PERIODIC_QUERY_WINDOWSIZE, Long.toString(metadata.getWindowSize()));
+        tx.set(rowId, FluoQueryColumns.PERIODIC_QUERY_TIMEUNIT, metadata.getUnit().name());
+        tx.set(rowId, FluoQueryColumns.PERIODIC_QUERY_TEMPORAL_VARIABLE, metadata.getTemporalVariable());
+    }
+
+    /**
+     * Read an instance of {@link PeriodicQueryMetadata} from the Fluo table.
+     *
+     * @param sx
+     *            - The snapshot that will be used to read the metadata. (not
+     *            null)
+     * @param nodeId
+     *            - The nodeId of the PeriodicBin node that will be read. (not
+     *            null)
+     * @return The {@link PeriodicQueryMetadata} that was read from table.
+     */
+    public PeriodicQueryMetadata readPeriodicQueryMetadata(final SnapshotBase sx, final String nodeId) {
+        return readPeriodicQueryMetadataBuilder(sx, nodeId).build();
+    }
+
+    private PeriodicQueryMetadata.Builder readPeriodicQueryMetadataBuilder(final SnapshotBase sx, final String nodeId) {
+        requireNonNull(sx);
+        requireNonNull(nodeId);
+
+        // Fetch the values from the Fluo table.
+        final String rowId = nodeId;
+        final Map<Column, String> values = sx.gets(rowId, FluoQueryColumns.PERIODIC_QUERY_VARIABLE_ORDER,
+                FluoQueryColumns.PERIODIC_QUERY_PARENT_NODE_ID, FluoQueryColumns.PERIODIC_QUERY_CHILD_NODE_ID,
+                FluoQueryColumns.PERIODIC_QUERY_PERIOD, FluoQueryColumns.PERIODIC_QUERY_WINDOWSIZE,
+                FluoQueryColumns.PERIODIC_QUERY_TIMEUNIT, FluoQueryColumns.PERIODIC_QUERY_TEMPORAL_VARIABLE);
+
+        // Return an object holding them.
+        final String varOrderString = values.get(FluoQueryColumns.PERIODIC_QUERY_VARIABLE_ORDER);
+        final VariableOrder varOrder = new VariableOrder(varOrderString);
+        final String parentNodeId = values.get(FluoQueryColumns.PERIODIC_QUERY_PARENT_NODE_ID);
+        final String childNodeId = values.get(FluoQueryColumns.PERIODIC_QUERY_CHILD_NODE_ID);
+        final String temporalVariable = values.get(FluoQueryColumns.PERIODIC_QUERY_TEMPORAL_VARIABLE);
+        final String period = values.get(FluoQueryColumns.PERIODIC_QUERY_PERIOD);
+        final String window = values.get(FluoQueryColumns.PERIODIC_QUERY_WINDOWSIZE);
+        final String timeUnit = values.get(FluoQueryColumns.PERIODIC_QUERY_TIMEUNIT);
+
+        return PeriodicQueryMetadata.builder()
+                .setNodeId(nodeId)
                 .setVarOrder(varOrder)
-                .setOriginalSparql(originalSparql)
-                .setFilterIndexWithinSparql(filterIndexWithinSparql)
                 .setParentNodeId(parentNodeId)
-                .setChildNodeId(childNodeId);
+                .setChildNodeId(childNodeId)
+                .setWindowSize(Long.parseLong(window))
+                .setPeriod(Long.parseLong(period))
+                .setTemporalVariable(temporalVariable)
+                .setUnit(TimeUnit.valueOf(timeUnit));
+
     }
+    
+    
 
     /**
      * Write an instance of {@link JoinMetadata} to the Fluo table.
@@ -325,12 +395,10 @@ public class FluoQueryMetadataDAO {
         final String pattern = values.get(FluoQueryColumns.STATEMENT_PATTERN_PATTERN);
         final String parentNodeId = values.get(FluoQueryColumns.STATEMENT_PATTERN_PARENT_NODE_ID);
 
-        return StatementPatternMetadata.builder(nodeId)
-                .setVarOrder(varOrder)
-                .setStatementPattern(pattern)
-                .setParentNodeId(parentNodeId);
+        return StatementPatternMetadata.builder(nodeId).setVarOrder(varOrder).setStatementPattern(pattern).setParentNodeId(parentNodeId);
     }
 
+
     /**
      * Write an instance of {@link AggregationMetadata} to the Fluo table.
      *
@@ -432,10 +500,11 @@ public class FluoQueryMetadataDAO {
         requireNonNull(query);
 
         // Write the rest of the metadata objects.
-        switch(query.getQueryType()) {
+        switch (query.getQueryType()) {
         case Construct:
             ConstructQueryMetadata constructMetadata = query.getConstructQueryMetadata().get();
-            // Store the Query ID so that it may be looked up from the original SPARQL string.
+            // Store the Query ID so that it may be looked up from the original
+            // SPARQL string.
             final String constructSparql = constructMetadata.getSparql();
             final String constructQueryId = constructMetadata.getNodeId();
             tx.set(Bytes.of(constructSparql), FluoQueryColumns.QUERY_ID, Bytes.of(constructQueryId));
@@ -443,13 +512,19 @@ public class FluoQueryMetadataDAO {
             break;
         case Projection:
             QueryMetadata queryMetadata = query.getQueryMetadata().get();
-            // Store the Query ID so that it may be looked up from the original SPARQL string.
+            // Store the Query ID so that it may be looked up from the original
+            // SPARQL string.
             final String sparql = queryMetadata.getSparql();
             final String queryId = queryMetadata.getNodeId();
             tx.set(Bytes.of(sparql), FluoQueryColumns.QUERY_ID, Bytes.of(queryId));
             write(tx, queryMetadata);
             break;
         }
+        
+        Optional<PeriodicQueryMetadata> periodicMetadata = query.getPeriodicQueryMetadata();
+        if(periodicMetadata.isPresent()) {
+            write(tx, periodicMetadata.get());
+        }
 
         for(final FilterMetadata filter : query.getFilterMetadata()) {
             write(tx, filter);
@@ -510,6 +585,15 @@ public class FluoQueryMetadataDAO {
             addChildMetadata(sx, builder, constructBuilder.build().getChildNodeId());
             break;
 
+        case PERIODIC_QUERY:
+            // Add this node's metadata.
+            final PeriodicQueryMetadata.Builder periodicQueryBuilder = readPeriodicQueryMetadataBuilder(sx, childNodeId);
+            builder.addPeriodicQueryMetadata(periodicQueryBuilder);
+
+            // Add it's child's metadata.
+            addChildMetadata(sx, builder, periodicQueryBuilder.build().getChildNodeId());
+            break;
+            
         case AGGREGATION:
             // Add this node's metadata.
             final AggregationMetadata.Builder aggregationBuilder = readAggregationMetadataBuilder(sx, childNodeId);
@@ -546,6 +630,7 @@ public class FluoQueryMetadataDAO {
             break;
         default:
             break;
+        
         }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryMetadata.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryMetadata.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryMetadata.java
new file mode 100644
index 0000000..33253f2
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryMetadata.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.query;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+
+/**
+ * Metadata that is required for periodic queries in the Rya Fluo Application.  
+ * If a periodic query is registered with the Rya Fluo application, the BindingSets
+ * are placed into temporal bins according to whether they occur within the window of
+ * a period's ending time.  This Metadata is used to create a Bin Id, which is equivalent
+ * to the period's ending time, to be inserted into each BindingSet that occurs within that
+ * bin.  This is to allow the AggregationUpdater to aggregate the bins by grouping on the 
+ * Bin Id.
+ * 
+ */
+public class PeriodicQueryMetadata extends CommonNodeMetadata {
+
+    private String parentNodeId;
+    private String childNodeId;
+    private long windowSize;
+    private long period;
+    private TimeUnit unit;
+    private String temporalVariable;
+
+    /**
+     * Constructs an instance of PeriodicQueryMetadata
+     * @param nodeId - id of periodic query node
+     * @param varOrder - variable order indicating the order the BindingSet results are written in
+     * @param parentNodeId - id of parent node
+     * @param childNodeId - id of child node
+     * @param windowSize - size of window used for filtering
+     * @param period - period size that indicates frequency of notifications
+     * @param unit - TimeUnit corresponding to window and period
+     * @param temporalVariable - temporal variable that periodic conditions are applied to
+     */
+    public PeriodicQueryMetadata(String nodeId, VariableOrder varOrder, String parentNodeId, String childNodeId, long windowSize, long period,
+            TimeUnit unit, String temporalVariable) {
+        super(nodeId, varOrder);
+        this.parentNodeId = Preconditions.checkNotNull(parentNodeId);
+        this.childNodeId = Preconditions.checkNotNull(childNodeId);
+        this.temporalVariable = Preconditions.checkNotNull(temporalVariable);
+        this.unit = Preconditions.checkNotNull(unit);
+        Preconditions.checkArgument(period > 0);
+        Preconditions.checkArgument(windowSize >= period);
+
+        this.windowSize = windowSize;
+        this.period = period;
+    }
+
+    /**
+     * @return id of parent for navigating query
+     */
+    public String getParentNodeId() {
+        return parentNodeId;
+    }
+
+    /**
+     * 
+     * @return id of child for navigating query
+     */
+    public String getChildNodeId() {
+        return childNodeId;
+    }
+    
+    /**
+     * 
+     * @return temporal variable used for filtering events
+     */
+    public String getTemporalVariable() {
+        return temporalVariable;
+    }
+
+    /**
+     * @return window duration in millis
+     */
+    public long getWindowSize() {
+        return windowSize;
+    }
+
+    /**
+     * @return period duration in millis
+     */
+    public long getPeriod() {
+        return period;
+    }
+
+    /**
+     * @return {@link TimeUnit} for window duration and period duration
+     */
+    public TimeUnit getUnit() {
+        return unit;
+    }
+
+
+    /**
+     * @return {@link Builder} for chaining method calls to construct an instance of PeriodicQueryMetadata.
+     */
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hashCode(super.getNodeId(), super.getVariableOrder(), childNodeId, parentNodeId, temporalVariable, period, windowSize, unit);
+    }
+
+    @Override
+    public boolean equals(final Object o) {
+        if (o == this) {
+            return true;
+        }
+
+        if (o instanceof PeriodicQueryMetadata) {
+            if (super.equals(o)) {
+                PeriodicQueryMetadata metadata = (PeriodicQueryMetadata) o;
+                return new EqualsBuilder().append(childNodeId, metadata.childNodeId).append(parentNodeId, metadata.parentNodeId)
+                        .append(windowSize, metadata.windowSize).append(period, metadata.period)
+                        .append(unit, metadata.unit).append(temporalVariable, metadata.temporalVariable).isEquals();
+            }
+            return false;
+        }
+
+        return false;
+    }
+    
+    @Override
+    public String toString() {
+        return new StringBuilder()
+                .append("PeriodicQueryMetadata {\n")
+                .append("    Node ID: " + super.getNodeId() + "\n")
+                .append("    Variable Order: " + super.getVariableOrder() + "\n")
+                .append("    Parent Node ID: " + parentNodeId + "\n")
+                .append("    Child Node ID: " + childNodeId + "\n")
+                .append("    Period: " + period + "\n")
+                .append("    Window Size: " + windowSize + "\n")
+                .append("    Time Unit: " + unit + "\n")
+                .append("    Temporal Variable: " + temporalVariable + "\n")
+                .append("}")
+                .toString();
+    }
+
+
+    /**
+     * Builder for chaining method calls to construct an instance of PeriodicQueryMetadata.
+     */
+    public static class Builder {
+
+        private String nodeId;
+        private VariableOrder varOrder;
+        private String parentNodeId;
+        private String childNodeId;
+        private long windowSize;
+        private long period;
+        private TimeUnit unit;
+        public String temporalVariable;
+
+        public Builder setNodeId(String nodeId) {
+            this.nodeId = nodeId;
+            return this;
+        }
+        
+        /**
+         * 
+         * @return id of of this node
+         */
+        public String getNodeId() {
+            return nodeId;
+        }
+        
+        /**
+         * Set the {@link VariableOrder}
+         * @param varOrder to indicate order that results will be written in
+         * @return Builder for chaining methods calls
+         */
+        public Builder setVarOrder(VariableOrder varOrder) {
+            this.varOrder = varOrder;
+            return this;
+        }
+        
+        /**
+         * Returns {@link VariableOrder} 
+         * @return VariableOrder that indicates order that results are written in 
+         */
+        public VariableOrder getVarOrder() {
+            return varOrder;
+        }
+        
+        /**
+         * Sets id of parent node
+         * @param parentNodeId
+         * @return Builder for chaining methods calls
+         */
+        public Builder setParentNodeId(String parentNodeId) {
+            this.parentNodeId = parentNodeId;
+            return this;
+        }
+      
+        /**
+         * @return id of parent node
+         */
+        public String getParentNodeId() {
+            return parentNodeId;
+        }
+
+        /**
+         * Set id of child node
+         * @param childNodeId
+         * @return Builder for chaining methods calls
+         */
+        public Builder setChildNodeId(String childNodeId) {
+            this.childNodeId = childNodeId;
+            return this;
+        }
+        
+        /**
+         * Sets window size for periodic query
+         * @param windowSize
+         * @return Builder for chaining methods calls
+         */
+        public Builder setWindowSize(long windowSize) {
+            this.windowSize = windowSize;
+            return this;
+        }
+
+        /**
+         * Sets period for periodic query
+         * @param period
+         * @return Builder for chaining methods calls
+         */
+        public Builder setPeriod(long period) {
+            this.period = period;
+            return this;
+        }
+
+        /**
+         * Sets time unit of window and period for periodic query
+         * @param unit
+         * @return Builder for chaining methods calls
+         */
+        public Builder setUnit(TimeUnit unit) {
+            this.unit = unit;
+            return this;
+        }
+        
+        /**
+         * Indicate which variable in BindingSet results is the temporal variable that periodic
+         * Conditions should be applied to
+         * @param temporalVariable
+         * @return Builder for chaining methods calls
+         */
+        public Builder setTemporalVariable(String temporalVariable) {
+            this.temporalVariable = temporalVariable;
+            return this;
+        }
+
+        /**
+         * @return PeriodicQueryMetadata constructed from parameters passed to this Builder
+         */
+        public PeriodicQueryMetadata build() {
+            return new PeriodicQueryMetadata(nodeId, varOrder, parentNodeId, childNodeId, windowSize, period, unit, temporalVariable);
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryNode.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryNode.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryNode.java
new file mode 100644
index 0000000..f1ade59
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/PeriodicQueryNode.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.query;
+
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.rya.indexing.pcj.fluo.app.util.PeriodicQueryUtil;
+import org.openrdf.query.algebra.QueryModelVisitor;
+import org.openrdf.query.algebra.TupleExpr;
+import org.openrdf.query.algebra.UnaryTupleOperator;
+import org.openrdf.query.algebra.evaluation.function.Function;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * This is a {@link UnaryTupleOperator} that gets placed in the parsed query
+ * {@link TupleExpr} when a {@link Filter} is encountered in the SPARQL String that
+ * contains the Periodic {@link Function} {@link PeriodicQueryUtil#PeriodicQueryURI}.
+ * The PeiodicQueryNode is created from the arguments passed to the Periodic Function,
+ * which consist of a time unit, a temporal period, a temporal window of time, and the
+ * temporal variable in the query, which assumes a value indicated by the
+ * Time ontology: http://www.w3.org/2006/time. The purpose of the PeriodicQueryNode
+ * is to filter out all events that did not occur within the specified window of time
+ * of this instant and to generate notifications at a regular interval indicated by the period.
+ *
+ */
+public class PeriodicQueryNode extends UnaryTupleOperator {
+
+    private TimeUnit unit;
+    private long windowDuration;
+    private long periodDuration;
+    private String temporalVar;
+    
+    /**
+     * Creates a PeriodicQueryNode from the specified values.
+     * @param window - specifies the window of time that event must occur within from this instant
+     * @param period - regular interval at which notifications are generated (must be leq window).
+     * @param unit - time unit of the period and window
+     * @param temporalVar - temporal variable in query used for filtering
+     * @param arg - child of PeriodicQueryNode in parsed query
+     */
+    public PeriodicQueryNode(long window, long period, TimeUnit unit, String temporalVar, TupleExpr arg) {
+        super(checkNotNull(arg));
+        checkArgument(0 < period && period <= window);
+        this.temporalVar = checkNotNull(temporalVar);
+        this.unit = checkNotNull(unit);
+        this.windowDuration = window;
+        this.periodDuration = period;
+    }
+    
+    /**
+     * @return - temporal variable used to filter events
+     */
+    public String getTemporalVariable() {
+        return temporalVar;
+    }
+
+    /**
+     * @return window duration in millis
+     */
+    public long getWindowSize() {
+        return windowDuration;
+    }
+
+    /**
+     * @return period duration in millis
+     */
+    public long getPeriod() {
+        return periodDuration;
+    }
+
+    /**
+     * @return {@link TimeUnit} for window duration and period duration
+     */
+    public TimeUnit getUnit() {
+        return unit;
+    }
+    
+    @Override
+    public <X extends Exception> void visit(QueryModelVisitor<X> visitor) throws X {
+        visitor.meetOther(this);
+    }
+    
+    @Override
+    public boolean equals(Object other) {
+        if(this == other) {
+            return true;
+        }
+        
+        if (other instanceof PeriodicQueryNode) {
+            if (super.equals(other)) {
+                PeriodicQueryNode metadata = (PeriodicQueryNode) other;
+                return new EqualsBuilder().append(windowDuration, metadata.windowDuration).append(periodDuration, metadata.periodDuration)
+                        .append(unit, metadata.unit).append(temporalVar, metadata.temporalVar).isEquals();
+            }
+            return false;
+        }
+        
+        return false;
+    }
+    
+    @Override
+    public int hashCode() {
+        return Objects.hash(arg, unit, windowDuration, periodDuration, temporalVar);
+    }
+    
+    /**
+     * @return String representation of this node that is printed in when query tree is printed.
+     */
+    @Override
+    public String getSignature() {
+        StringBuilder sb = new StringBuilder();
+
+        sb.append("PeriodicQueryNode(");
+        sb.append("Var = " + temporalVar + ", ");
+        sb.append("Window = " + windowDuration + " ms, ");
+        sb.append("Period = " + periodDuration + " ms, ");
+        sb.append("Time Unit = " + unit  + ")");
+       
+
+        return sb.toString();
+    }
+    
+    @Override
+    public PeriodicQueryNode clone() {
+        PeriodicQueryNode clone = (PeriodicQueryNode)super.clone();
+        clone.setArg(getArg().clone());
+        clone.periodDuration = periodDuration;
+        clone.windowDuration = windowDuration;
+        clone.unit = unit;
+        clone.temporalVar = temporalVar;
+        return clone;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/QueryMetadata.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/QueryMetadata.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/QueryMetadata.java
index 23ac286..d017724 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/QueryMetadata.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/QueryMetadata.java
@@ -142,6 +142,10 @@ public class QueryMetadata extends CommonNodeMetadata {
         public Builder(final String nodeId) {
             this.nodeId = checkNotNull(nodeId);
         }
+        
+        public String getNodeId() {
+            return nodeId;
+        }
 
         
         /**
@@ -154,6 +158,13 @@ public class QueryMetadata extends CommonNodeMetadata {
             this.varOrder = varOrder;
             return this;
         }
+        
+        /**
+         * @return the variable order of binding sets that are emitted by this node
+         */
+        public VariableOrder getVariableOrder() {
+            return varOrder;
+        }
 
         /**
          * Set the SPARQL query whose results are being updated by the Fluo app.
@@ -176,6 +187,10 @@ public class QueryMetadata extends CommonNodeMetadata {
             this.childNodeId = childNodeId;
             return this;
         }
+        
+        public String getChildNodeId() {
+            return childNodeId;
+        }
 
         /**
          * @return An instance of {@link QueryMetadata} build using this builder's values.

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java
index 631ce60..8e348f2 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/SparqlFluoQueryBuilder.java
@@ -18,12 +18,13 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app.query;
 
-import static com.google.common.base.Preconditions.checkNotNull;
 import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.AGGREGATION_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.CONSTRUCT_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.FILTER_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.JOIN_PREFIX;
+import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.PERIODIC_QUERY_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.QUERY_PREFIX;
 import static org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants.SP_PREFIX;
 
@@ -40,12 +41,14 @@ import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.rya.indexing.pcj.fluo.app.ConstructGraph;
 import org.apache.rya.indexing.pcj.fluo.app.ConstructProjection;
-import org.apache.rya.indexing.pcj.fluo.app.FilterResultUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.FluoStringConverter;
 import org.apache.rya.indexing.pcj.fluo.app.NodeType;
 import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationElement;
 import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata.AggregationType;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType;
+import org.apache.rya.indexing.pcj.fluo.app.util.FilterSerializer;
+import org.apache.rya.indexing.pcj.fluo.app.util.FilterSerializer.FilterParseException;
+import org.apache.rya.indexing.pcj.fluo.app.util.PeriodicQueryUtil;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.openrdf.model.Value;
 import org.openrdf.model.impl.BNodeImpl;
@@ -105,7 +108,9 @@ public class SparqlFluoQueryBuilder {
         final FluoQuery.Builder fluoQueryBuilder = FluoQuery.builder();
 
         final NewQueryVisitor visitor = new NewQueryVisitor(sparql, fluoQueryBuilder, nodeIds);
-        parsedQuery.getTupleExpr().visit( visitor );
+        TupleExpr te = parsedQuery.getTupleExpr();
+        PeriodicQueryUtil.placePeriodicQueryNode(te);
+        te.visit( visitor );
 
         final FluoQuery fluoQuery = fluoQueryBuilder.build();
         return fluoQuery;
@@ -187,16 +192,17 @@ public class SparqlFluoQueryBuilder {
                 prefix = AGGREGATION_PREFIX;
             }  else if (node instanceof Reduced) {
                 prefix = CONSTRUCT_PREFIX;
+            } else if(node instanceof PeriodicQueryNode) {
+                prefix = PERIODIC_QUERY_PREFIX;
             } else {
                 throw new IllegalArgumentException("Node must be of type {StatementPattern, Join, Filter, Extension, Projection} but was " + node.getClass());
             }
 
-            // Create the unique portion of the id.
             final String unique = UUID.randomUUID().toString().replaceAll("-", "");
-
             // Put them together to create the Node ID.
             return prefix + "_" + unique;
         }
+        
     }
 
     /**
@@ -204,19 +210,13 @@ public class SparqlFluoQueryBuilder {
      * the node to a {@link FluoQuery.Builder}. This information is used by the
      * application's observers to incrementally update a PCJ.
      */
-    private static class NewQueryVisitor extends QueryModelVisitorBase<RuntimeException> {
+    public static class NewQueryVisitor extends QueryModelVisitorBase<RuntimeException> {
 
         private final NodeIds nodeIds;
         private final FluoQuery.Builder fluoQueryBuilder;
         private final String sparql;
 
         /**
-         * Stored with each Filter node so that we can figure out how to evaluate it within
-         * {@link FilterResultUpdater}. Incremented each time a filter has been stored.
-         */
-        private int filterIndexWithinQuery = 0;
-
-        /**
          * Constructs an instance of {@link NewQueryVisitor}.
          *
          * @param sparql - The SPARQL query whose structure will be represented
@@ -378,6 +378,7 @@ public class SparqlFluoQueryBuilder {
 
         @Override
         public void meet(final Filter node) {
+            
             // Get or create a builder for this node populated with the known metadata.
             final String filterId = nodeIds.getOrMakeId(node);
 
@@ -387,8 +388,13 @@ public class SparqlFluoQueryBuilder {
                 fluoQueryBuilder.addFilterMetadata(filterBuilder);
             }
 
-            filterBuilder.setOriginalSparql(sparql);
-            filterBuilder.setFilterIndexWithinSparql(filterIndexWithinQuery++);
+            String filterString;
+            try {
+                filterString = FilterSerializer.serialize(node);
+            } catch (FilterParseException e) {
+                throw new RuntimeException(e);
+            }
+            filterBuilder.setFilterSparql(filterString);
 
             final QueryModelNode child = node.getArg();
             if(child == null) {
@@ -406,6 +412,47 @@ public class SparqlFluoQueryBuilder {
             // Walk to the next node.
             super.meet(node);
         }
+        
+        public void meetOther(final QueryModelNode qNode) {
+            if (qNode instanceof PeriodicQueryNode) {
+                PeriodicQueryNode node = (PeriodicQueryNode) qNode;
+                // Get or create a builder for this node populated with the
+                // known metadata.
+                final String periodicId = nodeIds.getOrMakeId(node);
+
+                PeriodicQueryMetadata.Builder periodicBuilder = fluoQueryBuilder.getPeriodicQueryBuilder().orNull();
+                if (periodicBuilder == null) {
+                    periodicBuilder = PeriodicQueryMetadata.builder();
+                    periodicBuilder.setNodeId(periodicId);
+                    fluoQueryBuilder.addPeriodicQueryMetadata(periodicBuilder);
+                }
+                periodicBuilder.setWindowSize(node.getWindowSize());
+                periodicBuilder.setPeriod(node.getPeriod());
+                periodicBuilder.setTemporalVariable(node.getTemporalVariable());
+                periodicBuilder.setUnit(node.getUnit());
+
+                final QueryModelNode child = node.getArg();
+                if (child == null) {
+                    throw new IllegalArgumentException("PeriodicQueryNode child arg connot be null.");
+                }
+
+                final String childNodeId = nodeIds.getOrMakeId(child);
+                periodicBuilder.setChildNodeId(childNodeId);
+
+                // Update the child node's metadata.
+                final Set<String> childVars = getVars((TupleExpr) child);
+                final VariableOrder childVarOrder = new VariableOrder(childVars);
+                setChildMetadata(childNodeId, childVarOrder, periodicId);
+
+                // update variable order of this node and all ancestors to
+                // include BIN_ID binding as
+                // first variable in the ordering
+                PeriodicQueryUtil.updateVarOrdersToIncludeBin(fluoQueryBuilder, periodicId);
+                // Walk to the next node.
+                node.getArg().visit(this);
+            } 
+        }
+        
 
         @Override
         public void meet(final Projection node) {
@@ -553,10 +600,24 @@ public class SparqlFluoQueryBuilder {
 
             case QUERY:
                 throw new IllegalArgumentException("A QUERY node cannot be the child of another node.");
+            
             case CONSTRUCT:
                 throw new IllegalArgumentException("A CONSTRUCT node cannot be the child of another node.");
+            
+            case PERIODIC_QUERY:
+                PeriodicQueryMetadata.Builder periodicQueryBuilder = fluoQueryBuilder.getPeriodicQueryBuilder().orNull();
+                if (periodicQueryBuilder == null) {
+                    periodicQueryBuilder = PeriodicQueryMetadata.builder();
+                    periodicQueryBuilder.setNodeId(childNodeId);
+                    fluoQueryBuilder.addPeriodicQueryMetadata(periodicQueryBuilder);
+                }
+                periodicQueryBuilder.setVarOrder(childVarOrder);
+                periodicQueryBuilder.setParentNodeId(parentNodeId);
+                break;
+                
             default:
                 throw new IllegalArgumentException("Unsupported NodeType: " + childType);
+
             }
         }
         

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/FilterSerializer.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/FilterSerializer.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/FilterSerializer.java
new file mode 100644
index 0000000..73f3447
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/FilterSerializer.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.util;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.openrdf.query.algebra.Filter;
+import org.openrdf.query.algebra.SingletonSet;
+import org.openrdf.query.algebra.helpers.QueryModelVisitorBase;
+import org.openrdf.query.parser.ParsedQuery;
+import org.openrdf.query.parser.ParsedTupleQuery;
+import org.openrdf.query.parser.sparql.SPARQLParser;
+import org.openrdf.queryrender.sparql.SPARQLQueryRenderer;
+
+/**
+ * Class for creating a String representation a given Filter, and for
+ * converting the String representation of the Filter back to the Filter.
+ *
+ */
+public class FilterSerializer {
+
+    private static final SPARQLQueryRenderer renderer = new SPARQLQueryRenderer();
+    private static final SPARQLParser parser = new SPARQLParser();
+    
+    /**
+     * Converts a {@link Filter} to a SPARQL query containing only the SPARQL representation
+     * of the Filter along with a Select clause that return all variables.  The argument of the
+     * Filter is replaced by a {@link SingletonSet} so that the body of the SPARQL query consists of only a
+     * single Filter clause.  
+     * @param filter - Filter to be serialized
+     * @return - SPARQL String containing a single Filter clause that represents the serialized Filter
+     * @throws FilterParseException
+     */
+    public static String serialize(Filter filter) throws FilterParseException {
+        Filter clone = filter.clone();
+        clone.setArg(new SingletonSet());
+        try {
+            return renderer.render(new ParsedTupleQuery(clone));
+        } catch (Exception e) {
+            throw new FilterParseException("Unable to parse Filter.", e);
+        }
+    }
+    
+    /**
+     * Converts a SPARQL query consisting of a single Filter clause back to a Filter.
+     * @param sparql - SPARQL query representing a Filter
+     * @return - parsed Filter included in the SPARQL query
+     * @throws FilterParseException
+     */
+    public static Filter deserialize(String sparql) throws FilterParseException {
+        
+        try {
+            ParsedQuery pq = parser.parseQuery(sparql, null);
+            FilterVisitor visitor = new FilterVisitor();
+            pq.getTupleExpr().visit(visitor);
+            Set<Filter> filters = visitor.getFilters();
+            
+            if(filters.size() != 1) {
+                throw new FilterParseException("Filter String must contain only one Filter.");
+            }
+            
+            return filters.iterator().next();
+            
+        } catch (Exception e) {
+            throw new FilterParseException("Unable to parse Filter.", e);
+        }
+    }
+    
+    public static class FilterVisitor extends QueryModelVisitorBase<RuntimeException> {
+
+        private Set<Filter> filters;
+        
+        public FilterVisitor() {
+            filters = new HashSet<>();
+        }
+
+        public Set<Filter> getFilters() {
+            return filters;
+        }
+
+        public void meet(Filter node) {
+            filters.add(node);
+        }
+    }
+    
+    public static class FilterParseException extends Exception {
+
+        private static final long serialVersionUID = 1L;
+        
+        /**
+         * Constructs an instance of {@link FilterParseException}.
+         *
+         * @param message - Explains why this exception is being thrown.
+         */
+        public FilterParseException(final String message) {
+            super(message);
+        }
+
+        /**
+         * Constructs an instance of {@link FilterParseException}.
+         *
+         * @param message - Explains why this exception is being thrown.
+         * @param cause - The exception that caused this one to be thrown.
+         */
+        public FilterParseException(final String message, final Throwable t) {
+            super(message, t);
+        }
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/FluoClientFactory.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/FluoClientFactory.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/FluoClientFactory.java
new file mode 100644
index 0000000..9446c87
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/FluoClientFactory.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.util;
+
+import java.util.Optional;
+
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.config.FluoConfiguration;
+import org.apache.fluo.core.client.FluoClientImpl;
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+
+/**
+ * Factory for creating {@link FluoClient}s.
+ *
+ */
+public class FluoClientFactory {
+
+    /**
+     * Creates a FluoClient
+     * @param appName - name of Fluo application
+     * @param tableName - name of Fluo table
+     * @param conf - AccumuloConfiguration (must contain Accumulo User, Accumulo Instance, Accumulo Password, and Accumulo Zookeepers)
+     * @return FluoClient for connecting to Fluo
+     */
+    public static FluoClient getFluoClient(String appName, Optional<String> tableName, AccumuloRdfConfiguration conf) {
+        FluoConfiguration fluoConfig = new FluoConfiguration();
+        fluoConfig.setAccumuloInstance(conf.getAccumuloInstance());
+        fluoConfig.setAccumuloUser(conf.getAccumuloUser());
+        fluoConfig.setAccumuloPassword(conf.getAccumuloPassword());
+        fluoConfig.setInstanceZookeepers(conf.getAccumuloZookeepers() + "/fluo");
+        fluoConfig.setAccumuloZookeepers(conf.getAccumuloZookeepers());
+        fluoConfig.setApplicationName(appName);
+        if (tableName.isPresent()) {
+            fluoConfig.setAccumuloTable(tableName.get());
+        } else {
+            fluoConfig.setAccumuloTable(appName);
+        }
+        return new FluoClientImpl(fluoConfig);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/PeriodicQueryUtil.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/PeriodicQueryUtil.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/PeriodicQueryUtil.java
new file mode 100644
index 0000000..fd24af2
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/util/PeriodicQueryUtil.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.util;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.fluo.api.client.SnapshotBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQuery;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.query.PeriodicQueryMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.query.PeriodicQueryNode;
+import org.apache.rya.indexing.pcj.fluo.app.query.QueryMetadata;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.openrdf.model.Literal;
+import org.openrdf.model.URI;
+import org.openrdf.model.Value;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.model.vocabulary.XMLSchema;
+import org.openrdf.query.MalformedQueryException;
+import org.openrdf.query.algebra.Filter;
+import org.openrdf.query.algebra.FunctionCall;
+import org.openrdf.query.algebra.Group;
+import org.openrdf.query.algebra.Projection;
+import org.openrdf.query.algebra.QueryModelNode;
+import org.openrdf.query.algebra.Reduced;
+import org.openrdf.query.algebra.TupleExpr;
+import org.openrdf.query.algebra.UnaryTupleOperator;
+import org.openrdf.query.algebra.ValueConstant;
+import org.openrdf.query.algebra.ValueExpr;
+import org.openrdf.query.algebra.Var;
+import org.openrdf.query.algebra.helpers.QueryModelVisitorBase;
+import org.openrdf.query.parser.sparql.SPARQLParser;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Utility class for creating and executing Perioid Queries.
+ *
+ */
+public class PeriodicQueryUtil {
+
+    private static final ValueFactory vf = new ValueFactoryImpl();
+    public static final String PeriodicQueryURI = "http://org.apache.rya/function#periodic";
+    public static final String temporalNameSpace = "http://www.w3.org/2006/time#";
+    public static final URI DAYS = vf.createURI("http://www.w3.org/2006/time#days");
+    public static final URI HOURS = vf.createURI("http://www.w3.org/2006/time#hours");
+    public static final URI MINUTES = vf.createURI("http://www.w3.org/2006/time#minutes");
+
+    /**
+     * Returns a PeriodicQueryNode for all {@link FunctionCall}s that represent PeriodicQueryNodes, otherwise
+     * an empty Optional is returned.
+     * @param functionCall - FunctionCall taken from a {@lin TupleExpr}
+     * @param arg - TupleExpr that will be the argument of the PeriodicQueryNode if it is created
+     * @return - Optional containing a PeriodicQueryNode if FunctionCall represents PeriodicQueryNode and empty Optional otherwise
+     * @throws Exception
+     */
+    public static Optional<PeriodicQueryNode> getPeriodicQueryNode(FunctionCall functionCall, TupleExpr arg) throws Exception {
+
+        if (functionCall.getURI().equals(PeriodicQueryURI)) {
+            return Optional.of(parseAndSetValues(functionCall.getArgs(), arg));
+        }
+
+        return Optional.empty();
+    }
+
+    /**
+     * Finds and places a PeriodicQueryNode if the TupleExpr contains a FunctionCall
+     * that represents a PeriodicQueryNode.
+     * @param query - TupleExpr with PeriodicQueryNode placed and positioned at the top of the query
+     */
+    public static void placePeriodicQueryNode(TupleExpr query) {
+        query.visit(new PeriodicQueryNodeVisitor());
+        query.visit(new PeriodicQueryNodeRelocator());
+    }
+    
+    public static Optional<PeriodicQueryNode> getPeriodicNode(String sparql) throws MalformedQueryException {
+        TupleExpr te = new SPARQLParser().parseQuery(sparql, null).getTupleExpr();
+        PeriodicQueryNodeVisitor periodicVisitor = new PeriodicQueryNodeVisitor();
+        te.visit(periodicVisitor);
+        return periodicVisitor.getPeriodicNode();
+    }
+
+    /**
+     * Locates Filter containing FunctionCall with PeriodicQuery info and
+     * replaces that Filter with a PeriodicQueryNode.
+     */
+    public static class PeriodicQueryNodeVisitor extends QueryModelVisitorBase<RuntimeException> {
+
+        private int count = 0;
+        private PeriodicQueryNode periodicNode;
+        
+        public Optional<PeriodicQueryNode> getPeriodicNode() {
+            return Optional.ofNullable(periodicNode);
+        }
+
+        public void meet(Filter node) {
+            if (node.getCondition() instanceof FunctionCall) {
+                try {
+                    Optional<PeriodicQueryNode> optNode = getPeriodicQueryNode((FunctionCall) node.getCondition(), node.getArg());
+                    if (optNode.isPresent()) {
+                        if (count > 0) {
+                            throw new IllegalArgumentException("Query cannot contain more than one PeriodicQueryNode");
+                        }
+                        periodicNode = optNode.get();
+                        node.replaceWith(periodicNode);
+                        count++;
+                        periodicNode.visit(this);
+                    } else {
+                        super.meet(node);
+                    }
+                } catch (Exception e) {
+                    throw new RuntimeException(e.getMessage());
+                }
+            } else {
+                super.meet(node);
+            }
+        }
+    }
+
+    /**
+     * Relocates PeriodicQueryNode so that it occurs below either the Construct
+     * Query Node, the Projection Query Node if no Aggregation exists, or the
+     * Group Node if an Aggregation exists. This limits the number of nodes
+     * whose variable order needs to be changed when the PeriodicQueryMetadata
+     * is added.
+     */
+    public static class PeriodicQueryNodeRelocator extends QueryModelVisitorBase<RuntimeException> {
+
+        private UnaryTupleOperator relocationParent;
+
+        public void meet(Projection node) {
+            relocationParent = node;
+            node.getArg().visit(this);
+        }
+
+        public void meet(Group node) {
+            relocationParent = node;
+            super.meet(node);
+        }
+
+        public void meet(Reduced node) {
+            relocationParent = node;
+            super.meet(node);
+        }
+
+        public void meet(Filter node) {
+            super.meet(node);
+        }
+
+        @Override
+        public void meetOther(QueryModelNode node) {
+
+            if (node instanceof PeriodicQueryNode) {
+                PeriodicQueryNode pNode = (PeriodicQueryNode) node;
+                // do nothing if PeriodicQueryNode already positioned correctly
+                if (pNode.equals(relocationParent.getArg())) {
+                    return;
+                }
+                // remove node from query
+                pNode.replaceWith(pNode.getArg());
+                // set node' child to be relocationParent's child
+                pNode.setArg(relocationParent.getArg());
+                // add node back into query below relocationParent
+                relocationParent.replaceChildNode(relocationParent.getArg(), pNode);
+            }
+        }
+    }
+
+    /**
+     * Adds the variable "periodicBinId" to the beginning of all {@link VariableOrder}s for the 
+     * Metadata nodes that appear above the PeriodicQueryNode.  This ensures that the binId is
+     * written first in the Row so that bins can be easily scanned and deleted.
+     * @param builder
+     * @param nodeId
+     */
+    public static void updateVarOrdersToIncludeBin(FluoQuery.Builder builder, String nodeId) {
+        NodeType type = NodeType.fromNodeId(nodeId).orNull();
+        if (type == null) {
+            throw new IllegalArgumentException("NodeId must be associated with an existing MetadataBuilder.");
+        }
+        switch (type) {
+        case AGGREGATION:
+            AggregationMetadata.Builder aggBuilder = builder.getAggregateBuilder(nodeId).orNull();
+            if (aggBuilder != null) {
+                VariableOrder varOrder = aggBuilder.getVariableOrder();
+                VariableOrder groupOrder = aggBuilder.getGroupByVariableOrder();
+                // update varOrder with BIN_ID
+                List<String> orderList = new ArrayList<>(varOrder.getVariableOrders());
+                orderList.add(0, IncrementalUpdateConstants.PERIODIC_BIN_ID);
+                aggBuilder.setVariableOrder(new VariableOrder(orderList));
+                // update groupVarOrder with BIN_ID
+                List<String> groupOrderList = new ArrayList<>(groupOrder.getVariableOrders());
+                groupOrderList.add(0, IncrementalUpdateConstants.PERIODIC_BIN_ID);
+                aggBuilder.setGroupByVariableOrder(new VariableOrder(groupOrderList));
+                // recursive call to update the VariableOrders of all ancestors
+                // of this node
+                updateVarOrdersToIncludeBin(builder, aggBuilder.getParentNodeId());
+            } else {
+                throw new IllegalArgumentException("There is no AggregationMetadata.Builder for the indicated Id.");
+            }
+            break;
+        case PERIODIC_QUERY:
+            PeriodicQueryMetadata.Builder periodicBuilder = builder.getPeriodicQueryBuilder().orNull();
+            if (periodicBuilder != null && periodicBuilder.getNodeId().equals(nodeId)) {
+                VariableOrder varOrder = periodicBuilder.getVarOrder();
+                List<String> orderList = new ArrayList<>(varOrder.getVariableOrders());
+                orderList.add(0, IncrementalUpdateConstants.PERIODIC_BIN_ID);
+                periodicBuilder.setVarOrder(new VariableOrder(orderList));
+                // recursive call to update the VariableOrders of all ancestors
+                // of this node
+                updateVarOrdersToIncludeBin(builder, periodicBuilder.getParentNodeId());
+            } else {
+                throw new IllegalArgumentException(
+                        "PeriodicQueryMetadata.Builder id does not match the indicated id.  A query cannot have more than one PeriodicQueryMetadata Node.");
+            }
+            break;
+        case QUERY:
+            QueryMetadata.Builder queryBuilder = builder.getQueryBuilder().orNull();
+            if (queryBuilder != null && queryBuilder.getNodeId().equals(nodeId)) {
+                VariableOrder varOrder = queryBuilder.getVariableOrder();
+                List<String> orderList = new ArrayList<>(varOrder.getVariableOrders());
+                orderList.add(0, IncrementalUpdateConstants.PERIODIC_BIN_ID);
+                queryBuilder.setVariableOrder(new VariableOrder(orderList));
+            } else {
+                throw new IllegalArgumentException(
+                        "QueryMetadata.Builder id does not match the indicated id.  A query cannot have more than one QueryMetadata Node.");
+            }
+            break;
+        default:
+            throw new IllegalArgumentException(
+                    "Incorrectly positioned PeriodicQueryNode.  The PeriodicQueryNode can only be positioned below Projections, Extensions, and ConstructQueryNodes.");
+        }
+    }
+
+    /**
+     * Collects all Metadata node Ids that are ancestors of the PeriodicQueryNode and contain the variable 
+     * {@link IncrementalUpdateConstants#PERIODIC_BIN_ID}.
+     * @param sx - Fluo Snapshot for scanning Fluo
+     * @param nodeId - root node of the PeriodicQuery
+     * @param ids - query ids of all metadata nodes appearing between root and PeriodicQueryMetadata node
+     */
+    public static void getPeriodicQueryNodeAncestorIds(SnapshotBase sx, String nodeId, Set<String> ids) {
+        NodeType nodeType = NodeType.fromNodeId(nodeId).orNull();
+        checkArgument(nodeType != null, "Invalid nodeId: " + nodeId + ". NodeId does not correspond to a valid NodeType.");
+        switch (nodeType) {
+        case FILTER:
+            ids.add(nodeId);
+            getPeriodicQueryNodeAncestorIds(sx, sx.get(Bytes.of(nodeId), FluoQueryColumns.FILTER_CHILD_NODE_ID).toString(), ids);
+            break;
+        case PERIODIC_QUERY:
+            ids.add(nodeId);
+            break;
+        case QUERY:
+            ids.add(nodeId);
+            getPeriodicQueryNodeAncestorIds(sx, sx.get(Bytes.of(nodeId), FluoQueryColumns.QUERY_CHILD_NODE_ID).toString(), ids);
+            break;
+        case AGGREGATION: 
+            ids.add(nodeId);
+            getPeriodicQueryNodeAncestorIds(sx, sx.get(Bytes.of(nodeId), FluoQueryColumns.AGGREGATION_CHILD_NODE_ID).toString(), ids);
+            break;
+        default:
+            throw new RuntimeException("Invalid NodeType.");
+        }
+    }
+
+    
+    
+    /**
+     * 
+     * @param values - Values extracted from FunctionCall representing the PeriodicQuery Filter
+     * @param arg - Argument of the PeriodicQueryNode that will be created (PeriodicQueryNode is a UnaryTupleOperator)
+     * @return - PeriodicQueryNode to be inserted in place of the original FunctionCall
+     * @throws Exception
+     */
+    private static PeriodicQueryNode parseAndSetValues(List<ValueExpr> values, TupleExpr arg) throws Exception {
+        // general validation of input
+        Preconditions.checkArgument(values.size() == 4);
+        Preconditions.checkArgument(values.get(0) instanceof Var);
+        Preconditions.checkArgument(values.get(1) instanceof ValueConstant);
+        Preconditions.checkArgument(values.get(2) instanceof ValueConstant);
+        Preconditions.checkArgument(values.get(3) instanceof ValueConstant);
+
+        // get temporal variable
+        Var var = (Var) values.get(0);
+        Preconditions.checkArgument(var.getValue() == null);
+        String tempVar = var.getName();
+
+        // get TimeUnit
+        TimeUnit unit = getTimeUnit((ValueConstant) values.get(3));
+
+        // get window and period durations
+        double windowDuration = parseTemporalDuration((ValueConstant) values.get(1));
+        double periodDuration = parseTemporalDuration((ValueConstant) values.get(2));
+        long windowMillis = convertToMillis(windowDuration, unit);
+        long periodMillis = convertToMillis(periodDuration, unit);
+        // period must evenly divide window at least once
+        Preconditions.checkArgument(windowMillis > periodMillis);
+        Preconditions.checkArgument(windowMillis % periodMillis == 0, "Period duration does not evenly divide window duration.");
+
+        // create PeriodicMetadata.Builder
+        return new PeriodicQueryNode(windowMillis, periodMillis, TimeUnit.MILLISECONDS, tempVar, arg);
+    }
+
+    private static TimeUnit getTimeUnit(ValueConstant val) {
+        Preconditions.checkArgument(val.getValue() instanceof URI);
+        URI uri = (URI) val.getValue();
+        Preconditions.checkArgument(uri.getNamespace().equals(temporalNameSpace));
+
+        switch (uri.getLocalName()) {
+        case "days":
+            return TimeUnit.DAYS;
+        case "hours":
+            return TimeUnit.HOURS;
+        case "minutes":
+            return TimeUnit.MINUTES;
+        default:
+            throw new IllegalArgumentException("Invalid time unit for Periodic Function.");
+        }
+    }
+
+    private static double parseTemporalDuration(ValueConstant valConst) {
+        Value val = valConst.getValue();
+        Preconditions.checkArgument(val instanceof Literal);
+        Literal literal = (Literal) val;
+        String stringVal = literal.getLabel();
+        URI dataType = literal.getDatatype();
+        Preconditions.checkArgument(dataType.equals(XMLSchema.DECIMAL) || dataType.equals(XMLSchema.DOUBLE)
+                || dataType.equals(XMLSchema.FLOAT) || dataType.equals(XMLSchema.INTEGER) || dataType.equals(XMLSchema.INT));
+        return Double.parseDouble(stringVal);
+    }
+
+    private static long convertToMillis(double duration, TimeUnit unit) {
+        Preconditions.checkArgument(duration > 0);
+
+        double convertedDuration = 0;
+        switch (unit) {
+        case DAYS:
+            convertedDuration = duration * 24 * 60 * 60 * 1000;
+            break;
+        case HOURS:
+            convertedDuration = duration * 60 * 60 * 1000;
+            break;
+        case MINUTES:
+            convertedDuration = duration * 60 * 1000;
+            break;
+        default:
+            throw new IllegalArgumentException("TimeUnit must be of type DAYS, HOURS, or MINUTES.");
+        }
+        // check that double representation has exact millis representation
+        Preconditions.checkArgument(convertedDuration == (long) convertedDuration);
+        return (long) convertedDuration;
+    }
+
+}


[2/9] incubator-rya git commit: RYA-280-Periodic Query Service. Closes #177.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/Notification.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/Notification.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/Notification.java
new file mode 100644
index 0000000..3e9e0d1
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/Notification.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.api;
+
+/**
+ * Notification Object used by the Periodic Query Service
+ * to inform workers to process results for a given Periodic
+ * Query with the indicated id.
+ *
+ */
+public interface Notification {
+
+    /**
+     * @return id of a Periodic Query
+     */
+    public String getId();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NotificationCoordinatorExecutor.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NotificationCoordinatorExecutor.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NotificationCoordinatorExecutor.java
new file mode 100644
index 0000000..d53dc17
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NotificationCoordinatorExecutor.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.api;
+
+import java.util.concurrent.ScheduledExecutorService;
+
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+
+/**
+ * Object that manages the periodic notifications for the Periodic Query Service.
+ * This Object processes new requests for periodic updates by registering them with
+ * some sort of service that generates periodic updates (such as a {@link ScheduledExecutorService}).
+ *
+ */
+public interface NotificationCoordinatorExecutor extends LifeCycle {
+
+    /**
+     * Registers or deletes a {@link CommandNotification}s with the periodic service to
+     * generate notifications at a regular interval indicated by the CommandNotification.
+     * @param notification - CommandNotification to be registered or deleted from the periodic update
+     * service.
+     */
+    public void processNextCommandNotification(CommandNotification notification);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NotificationProcessor.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NotificationProcessor.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NotificationProcessor.java
new file mode 100644
index 0000000..4ac9089
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NotificationProcessor.java
@@ -0,0 +1,41 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.api;
+
+import org.apache.rya.periodic.notification.notification.TimestampedNotification;
+
+/**
+ * Object that processes new {@link TimestampedNotification}s generated by {@link NotificationCoordinatorExecutor}.
+ * It is expected that the NotificationCoordinatorExecutor will this Object with notifications to perform work via some sort 
+ * sort of queuing service such as a BlockingQueue or Kafka.  This Object processes the notifications by retrieving
+ * query results associated with the Periodic Query id given by {@link TimestampedNotification#getId()}, parsing them
+ * and then providing them to another service to be exported.
+ *
+ */
+public interface NotificationProcessor {
+
+    /**
+     * Processes {@link TimestampedNotification}s by retrieving the Periodic Query results
+     * associated the query id given by {@link TimestampedNotification#getId()}.
+     * @param notification - contains information about which query results to retrieve
+     */
+    public void processNotification(TimestampedNotification notification);
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/PeriodicNotificationClient.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/PeriodicNotificationClient.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/PeriodicNotificationClient.java
new file mode 100644
index 0000000..ff08733
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/PeriodicNotificationClient.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.api;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.rya.periodic.notification.notification.BasicNotification;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+
+/**
+ * Object to register {@link PeriodicNotification}s with an external queuing
+ * service to be handled by a {@link NotificationCoordinatorExecutor} service.
+ * The service will generate notifications to process Periodic Query results at regular
+ * intervals corresponding the period of the PeriodicNotification.
+ *
+ */
+public interface PeriodicNotificationClient extends AutoCloseable {
+
+    /**
+     * Adds a new notification to be registered with the {@link NotificationCoordinatorExecutor}
+     * @param notification - notification to be added
+     */
+    public void addNotification(PeriodicNotification notification);
+    
+    /**
+     * Deletes a notification from the {@link NotificationCoordinatorExecutor}.
+     * @param notification - notification to be deleted
+     */
+    public void deleteNotification(BasicNotification notification);
+    
+    /**
+     * Deletes a notification from the {@link NotificationCoordinatorExecutor}.
+     * @param notification - id corresponding to the notification to be deleted
+     */
+    public void deleteNotification(String notificationId);
+    
+    /**
+     * Adds a new notification with the indicated id and period to the {@link NotificationCoordinatorExecutor}
+     * @param id - Periodic Query id
+     * @param period - period indicating frequency at which notifications will be generated
+     * @param delay - initial delay for starting periodic notifications
+     * @param unit - time unit of delay and period
+     */
+    public void addNotification(String id, long period, long delay, TimeUnit unit);
+    
+    public void close();
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicApplicationException.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicApplicationException.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicApplicationException.java
new file mode 100644
index 0000000..b2c3709
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicApplicationException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.application;
+
+/**
+ * Exception thrown when attempting to create a {@link PeriodicNotificationApplication}.
+ * Indicates that a factory was unable to create some component of the application 
+ * because something was configured incorrectly.
+ *
+ */
+public class PeriodicApplicationException extends Exception {
+
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Creates a PeriodicApplicationException.
+     * @param message - message contained in Exception
+     */
+    public PeriodicApplicationException(String message) {
+        super(message);
+    }
+    
+    /**
+     * Creates a PeriodicApplicationException.
+     * @param message - message contained in Exception
+     * @param t - Exception that spawned this PeriodicApplicationException
+     */
+    public PeriodicApplicationException(String message, Throwable t) {
+        super(message, t);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplication.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplication.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplication.java
new file mode 100644
index 0000000..6dd7126
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplication.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.application;
+
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.util.PeriodicQueryUtil;
+import org.apache.rya.periodic.notification.api.BinPruner;
+import org.apache.rya.periodic.notification.api.LifeCycle;
+import org.apache.rya.periodic.notification.api.NodeBin;
+import org.apache.rya.periodic.notification.api.NotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.exporter.BindingSetRecord;
+import org.apache.rya.periodic.notification.exporter.KafkaExporterExecutor;
+import org.apache.rya.periodic.notification.processor.NotificationProcessorExecutor;
+import org.apache.rya.periodic.notification.pruner.PeriodicQueryPrunerExecutor;
+import org.apache.rya.periodic.notification.registration.kafka.KafkaNotificationProvider;
+import org.openrdf.query.algebra.evaluation.function.Function;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * The PeriodicNotificationApplication runs the key components of the Periodic
+ * Query Service. It consists of a {@link KafkaNotificationProvider}, a
+ * {@link NotificationCoordinatorExecutor}, a
+ * {@link NotificationProcessorExecutor}, a {@link KafkaExporterExecutor}, and a
+ * {@link PeriodicQueryPrunerExecutor}. These services run in coordination with
+ * one another to perform the following tasks in the indicated order: <br>
+ * <li>Retrieve new requests to generate periodic notifications from Kafka
+ * <li>Register them with the {@link NotificationCoordinatorExecutor} to
+ * generate the periodic notifications
+ * <li>As notifications are generated, they are added to a work queue that is
+ * monitored by the {@link NotificationProcessorExecutor}.
+ * <li>The processor processes the notifications by reading all of the query
+ * results corresponding to the bin and query id indicated by the notification.
+ * <li>After reading the results, the processor adds a {@link BindingSetRecord}
+ * to a work queue monitored by the {@link KafkaExporterExecutor}.
+ * <li>The processor then adds a {@link NodeBin} to a workqueue monitored by the
+ * {@link BinPruner}
+ * <li>The exporter processes the BindingSetRecord by exporing the result to
+ * Kafka
+ * <li>The BinPruner processes the NodeBin by cleaning up the results for the
+ * indicated bin and query in Accumulo and Fluo. <br>
+ * <br>
+ * The purpose of this Periodic Query Service is to facilitate the ability to
+ * answer Periodic Queries using the Rya Fluo application, where a Periodic
+ * Query is any query requesting periodic updates about events that occurred
+ * within a given window of time of this instant. This is also known as a
+ * rolling window query. Period Queries can be expressed using SPARQL by
+ * including the {@link Function} indicated by the URI
+ * {@link PeriodicQueryUtil#PeriodicQueryURI}. The user must provide this
+ * Function with the following arguments: the temporal variable in the query
+ * that will be filtered on, the window of time that events must occur within,
+ * the period at which the user wants to receive updates, and the time unit. The
+ * following query requests all observations that occurred within the last
+ * minute and requests updates every 15 seconds. It also performs a count on
+ * those observations. <br>
+ * <br>
+ * <li>prefix function: http://org.apache.rya/function#
+ * <li>"prefix time: http://www.w3.org/2006/time#
+ * <li>"select (count(?obs) as ?total) where {
+ * <li>"Filter(function:periodic(?time, 1, .25, time:minutes))
+ * <li>"?obs uri:hasTime ?time.
+ * <li>"?obs uri:hasId ?id }
+ * <li>
+ */
+public class PeriodicNotificationApplication implements LifeCycle {
+
+    private static final Logger log = Logger.getLogger(PeriodicNotificationApplication.class);
+    private NotificationCoordinatorExecutor coordinator;
+    private KafkaNotificationProvider provider;
+    private PeriodicQueryPrunerExecutor pruner;
+    private NotificationProcessorExecutor processor;
+    private KafkaExporterExecutor exporter;
+    private boolean running = false;
+
+    /**
+     * Creates a PeriodicNotificationApplication
+     * @param provider - {@link KafkaNotificationProvider} that retrieves new Notificaiton requests from Kafka
+     * @param coordinator - {NotificationCoordinator} that manages PeriodicNotifications.
+     * @param processor - {@link NotificationProcessorExecutor} that processes PeriodicNotifications
+     * @param exporter - {@link KafkaExporterExecutor} that exports periodic results
+     * @param pruner - {@link PeriodicQueryPrunerExecutor} that cleans up old periodic bins
+     */
+    public PeriodicNotificationApplication(KafkaNotificationProvider provider, NotificationCoordinatorExecutor coordinator,
+            NotificationProcessorExecutor processor, KafkaExporterExecutor exporter, PeriodicQueryPrunerExecutor pruner) {
+        this.provider = Preconditions.checkNotNull(provider);
+        this.coordinator = Preconditions.checkNotNull(coordinator);
+        this.processor = Preconditions.checkNotNull(processor);
+        this.exporter = Preconditions.checkNotNull(exporter);
+        this.pruner = Preconditions.checkNotNull(pruner);
+    }
+
+    @Override
+    public void start() {
+        if (!running) {
+            log.info("Starting PeriodicNotificationApplication.");
+            coordinator.start();
+            provider.start();
+            processor.start();
+            pruner.start();
+            exporter.start();
+            running = true;
+        }
+    }
+
+    @Override
+    public void stop() {
+        log.info("Stopping PeriodicNotificationApplication.");
+        provider.stop();
+        coordinator.stop();
+        processor.stop();
+        pruner.stop();
+        exporter.stop();
+        running = false;
+    }
+
+    /**
+     * @return boolean indicating whether the application is running
+     */
+    @Override
+    public boolean currentlyRunning() {
+        return running;
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static class Builder {
+
+        private PeriodicQueryPrunerExecutor pruner;
+        private KafkaNotificationProvider provider;
+        private NotificationProcessorExecutor processor;
+        private KafkaExporterExecutor exporter;
+        private NotificationCoordinatorExecutor coordinator;
+
+        /**
+         * Sets the PeriodicQueryPrunerExecutor.
+         * @param pruner - PeriodicQueryPrunerExecutor for cleaning up old periodic bins
+         * @return this Builder for chaining method calls
+         */
+        public Builder setPruner(PeriodicQueryPrunerExecutor pruner) {
+            this.pruner = pruner;
+            return this;
+        }
+
+        /**
+         * Sets the KafkaNotificationProvider
+         * @param provider - KafkaNotificationProvider for retrieving new periodic notification requests from Kafka
+         * @return this Builder for chaining method calls
+         */
+        public Builder setProvider(KafkaNotificationProvider provider) {
+            this.provider = provider;
+            return this;
+        }
+
+        public Builder setProcessor(NotificationProcessorExecutor processor) {
+            this.processor = processor;
+            return this;
+        }
+
+        /**
+         * Sets KafkaExporterExecutor
+         * @param exporter for exporting periodic query results to Kafka
+         * @return this Builder for chaining method calls
+         */
+        public Builder setExporter(KafkaExporterExecutor exporter) {
+            this.exporter = exporter;
+            return this;
+        }
+
+        /**
+         * Sets NotificationCoordinatorExecutor
+         * @param coordinator for managing and generating periodic notifications
+         * @return this Builder for chaining method calls
+         */
+        public Builder setCoordinator(NotificationCoordinatorExecutor coordinator) {
+            this.coordinator = coordinator;
+            return this;
+        }
+
+        /**
+         * Creates a PeriodicNotificationApplication
+         * @return PeriodicNotificationApplication for periodically polling Rya Fluo Application
+         */
+        public PeriodicNotificationApplication build() {
+            return new PeriodicNotificationApplication(provider, coordinator, processor, exporter, pruner);
+        }
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationConfiguration.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationConfiguration.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationConfiguration.java
new file mode 100644
index 0000000..d69efe5
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationConfiguration.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.application;
+
+import java.util.Properties;
+
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+
+import jline.internal.Preconditions;
+
+/**
+ * Configuration object for creating a {@link PeriodicNotificationApplication}.
+ */
+public class PeriodicNotificationApplicationConfiguration extends AccumuloRdfConfiguration {
+
+    public static String FLUO_APP_NAME = "fluo.app.name";
+    public static String FLUO_TABLE_NAME = "fluo.table.name";
+    public static String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
+    public static String NOTIFICATION_TOPIC = "kafka.notification.topic";
+    public static String NOTIFICATION_GROUP_ID = "kafka.notification.group.id";
+    public static String NOTIFICATION_CLIENT_ID = "kafka.notification.client.id";
+    public static String COORDINATOR_THREADS = "cep.coordinator.threads";
+    public static String PRODUCER_THREADS = "cep.producer.threads";
+    public static String EXPORTER_THREADS = "cep.exporter.threads";
+    public static String PROCESSOR_THREADS = "cep.processor.threads";
+    public static String PRUNER_THREADS = "cep.pruner.threads";
+    
+    public PeriodicNotificationApplicationConfiguration() {}
+    
+    /**
+     * Creates an PeriodicNotificationApplicationConfiguration object from a Properties file.  This method assumes
+     * that all values in the Properties file are Strings and that the Properties file uses the keys below.
+     * See rya.cep/cep.integration.tests/src/test/resources/properties/notification.properties for an example.
+     * <br>
+     * <ul>
+     * <li>"accumulo.auths" - String of Accumulo authorizations. Default is empty String.
+     * <li>"accumulo.instance" - Accumulo instance name (required)
+     * <li>"accumulo.user" - Accumulo user (required)
+     * <li>"accumulo.password" - Accumulo password (required)
+     * <li>"accumulo.rya.prefix" - Prefix for Accumulo backed Rya instance.  Default is "rya_"
+     * <li>"accumulo.zookeepers" - Zookeepers for underlying Accumulo instance (required)
+     * <li>"fluo.app.name" - Name of Fluo Application (required)
+     * <li>"fluo.table.name" - Name of Fluo Table (required)
+     * <li>"kafka.bootstrap.servers" - Kafka Bootstrap servers for Producers and Consumers (required)
+     * <li>"kafka.notification.topic" - Topic to which new Periodic Notifications are published. Default is "notifications".
+     * <li>"kafka.notification.client.id" - Client Id for notification topic.  Default is "consumer0"
+     * <li>"kafka.notification.group.id" - Group Id for notification topic.  Default is "group0"
+     * <li>"cep.coordinator.threads" - Number of threads used by coordinator. Default is 1.
+     * <li>"cep.producer.threads" - Number of threads used by producer.  Default is 1.
+     * <li>"cep.exporter.threads" - Number of threads used by exporter.  Default is 1.
+     * <li>"cep.processor.threads" - Number of threads used by processor.  Default is 1.
+     * <li>"cep.pruner.threads" - Number of threads used by pruner.  Default is 1.
+     * </ul>
+     * <br>
+     * @param props - Properties file containing Accumulo specific configuration parameters
+     * @return AccumumuloRdfConfiguration with properties set
+     */
+    public PeriodicNotificationApplicationConfiguration(Properties props) {
+       super(fromProperties(props));
+       setFluoAppName(props.getProperty(FLUO_APP_NAME));
+       setFluoTableName(props.getProperty(FLUO_TABLE_NAME));
+       setBootStrapServers(props.getProperty(KAFKA_BOOTSTRAP_SERVERS));
+       setNotificationClientId(props.getProperty(NOTIFICATION_CLIENT_ID, "consumer0"));
+       setNotificationTopic(props.getProperty(NOTIFICATION_TOPIC, "notifications"));
+       setNotificationGroupId(props.getProperty(NOTIFICATION_GROUP_ID, "group0"));
+       setProducerThreads(Integer.parseInt(props.getProperty(PRODUCER_THREADS, "1")));
+       setProcessorThreads(Integer.parseInt(props.getProperty(PROCESSOR_THREADS, "1")));
+       setExporterThreads(Integer.parseInt(props.getProperty(EXPORTER_THREADS, "1")));
+       setPrunerThreads(Integer.parseInt(props.getProperty(PRUNER_THREADS, "1")));
+       setCoordinatorThreads(Integer.parseInt(props.getProperty(COORDINATOR_THREADS, "1")));
+    }
+    
+    /**
+     * Sets the name of the Fluo Application
+     * @param fluoAppName 
+     */
+    public void setFluoAppName(String fluoAppName) {
+        set(FLUO_APP_NAME, Preconditions.checkNotNull(fluoAppName));
+    }
+    
+    /**
+     * Sets the name of the Fluo table
+     * @param fluoTableName
+     */
+    public void setFluoTableName(String fluoTableName) {
+       set(FLUO_TABLE_NAME, Preconditions.checkNotNull(fluoTableName)); 
+    }
+    
+    /**
+     * Sets the Kafka bootstrap servers
+     * @param bootStrapServers
+     */
+    public void setBootStrapServers(String bootStrapServers) {
+        set(KAFKA_BOOTSTRAP_SERVERS, Preconditions.checkNotNull(bootStrapServers)); 
+    }
+    
+    /**
+     * Sets the Kafka topic name for new notification requests
+     * @param notificationTopic
+     */
+    public void setNotificationTopic(String notificationTopic) {
+        set(NOTIFICATION_TOPIC, Preconditions.checkNotNull(notificationTopic));
+    }
+    
+    /**
+     * Sets the GroupId for new notification request topic
+     * @param notificationGroupId
+     */
+    public void setNotificationGroupId(String notificationGroupId) {
+        set(NOTIFICATION_GROUP_ID, Preconditions.checkNotNull(notificationGroupId));
+    }
+    
+    /**
+     * Sets the ClientId for the Kafka notification topic
+     * @param notificationClientId
+     */
+    public void setNotificationClientId(String notificationClientId) {
+        set(NOTIFICATION_GROUP_ID, Preconditions.checkNotNull(notificationClientId));
+    }
+    
+    /**
+     * Sets the number of threads for the coordinator
+     * @param threads
+     */
+    public void setCoordinatorThreads(int threads) {
+        setInt(COORDINATOR_THREADS, threads);
+    }
+    
+    /**
+     * Sets the number of threads for the exporter
+     * @param threads
+     */
+    public void setExporterThreads(int threads) {
+        setInt(EXPORTER_THREADS, threads);
+    }
+    
+    /**
+     * Sets the number of threads for the producer for reading new periodic notifications
+     * @param threads
+     */
+    public void setProducerThreads(int threads) {
+        setInt(PRODUCER_THREADS, threads);
+    }
+    
+    /**
+     * Sets the number of threads for the bin pruner
+     * @param threads
+     */
+    public void setPrunerThreads(int threads) {
+        setInt(PRUNER_THREADS, threads);
+    }
+    
+    /**
+     * Sets the number of threads for the Notification processor
+     * @param threads
+     */
+    public void setProcessorThreads(int threads) {
+        setInt(PROCESSOR_THREADS, threads);
+    }
+    
+    /**
+     * @return name of the Fluo application
+     */
+    public String getFluoAppName() {
+        return get(FLUO_APP_NAME);
+    }
+    
+    /**
+     * @return name of the Fluo table
+     */
+    public String getFluoTableName() {
+       return get(FLUO_TABLE_NAME); 
+    }
+    
+    /**
+     * @return Kafka bootstrap servers
+     */
+    public String getBootStrapServers() {
+        return get(KAFKA_BOOTSTRAP_SERVERS); 
+    }
+    
+    /**
+     * @return notification topic
+     */
+    public String getNotificationTopic() {
+        return get(NOTIFICATION_TOPIC, "notifications");
+    }
+    
+    /**
+     * @return Kafka GroupId for the notificaton topic
+     */
+    public String getNotificationGroupId() {
+        return get(NOTIFICATION_GROUP_ID, "group0");
+    }
+    
+    /**
+     * @return Kafka ClientId for the notification topic
+     */
+    public String getNotificationClientId() {
+        return get(NOTIFICATION_CLIENT_ID, "consumer0");
+    }
+    
+    /**
+     * @return the number of threads for the coordinator
+     */
+    public int getCoordinatorThreads() {
+        return getInt(COORDINATOR_THREADS, 1);
+    }
+    
+    /**
+     * @return the number of threads for the exporter
+     */
+    public int getExporterThreads() {
+        return getInt(EXPORTER_THREADS, 1);
+    }
+    
+    /**
+     * @return the number of threads for the notification producer
+     */
+    public int getProducerThreads() {
+        return getInt(PRODUCER_THREADS, 1);
+    }
+    
+    /**
+     * @return the number of threads for the bin pruner
+     */
+    public int getPrunerThreads() {
+        return getInt(PRUNER_THREADS, 1);
+    }
+    
+    /**
+     * @return number of threads for the processor
+     */
+    public int getProcessorThreads() {
+        return getInt(PROCESSOR_THREADS, 1);
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationFactory.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationFactory.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationFactory.java
new file mode 100644
index 0000000..248b2bf
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationFactory.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.application;
+
+import java.util.Optional;
+import java.util.Properties;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.Snapshot;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.rya.indexing.pcj.fluo.app.util.FluoClientFactory;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPeriodicQueryResultStorage;
+import org.apache.rya.periodic.notification.api.NodeBin;
+import org.apache.rya.periodic.notification.api.NotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.coordinator.PeriodicNotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.exporter.BindingSetRecord;
+import org.apache.rya.periodic.notification.exporter.KafkaExporterExecutor;
+import org.apache.rya.periodic.notification.notification.TimestampedNotification;
+import org.apache.rya.periodic.notification.processor.NotificationProcessorExecutor;
+import org.apache.rya.periodic.notification.pruner.PeriodicQueryPrunerExecutor;
+import org.apache.rya.periodic.notification.recovery.PeriodicNotificationProvider;
+import org.apache.rya.periodic.notification.registration.kafka.KafkaNotificationProvider;
+import org.apache.rya.periodic.notification.serialization.BindingSetSerDe;
+import org.apache.rya.periodic.notification.serialization.CommandNotificationSerializer;
+import org.openrdf.query.BindingSet;
+
+/**
+ * Factory for creating a {@link PeriodicNotificationApplication}.
+ */
+public class PeriodicNotificationApplicationFactory {
+
+    /**
+     * Create a PeriodicNotificationApplication.
+     * @param props - Properties file that specifies the parameters needed to create the application
+     * @return PeriodicNotificationApplication to periodically poll Rya Fluo for new results
+     * @throws PeriodicApplicationException
+     */
+    public static PeriodicNotificationApplication getPeriodicApplication(Properties props) throws PeriodicApplicationException {
+        PeriodicNotificationApplicationConfiguration conf = new PeriodicNotificationApplicationConfiguration(props);
+        Properties kafkaProps = getKafkaProperties(conf);
+
+        BlockingQueue<TimestampedNotification> notifications = new LinkedBlockingQueue<>();
+        BlockingQueue<NodeBin> bins = new LinkedBlockingQueue<>();
+        BlockingQueue<BindingSetRecord> bindingSets = new LinkedBlockingQueue<>();
+
+        FluoClient fluo = null;
+        try {
+            PeriodicQueryResultStorage storage = getPeriodicQueryResultStorage(conf);
+            fluo = FluoClientFactory.getFluoClient(conf.getFluoAppName(), Optional.of(conf.getFluoTableName()), conf);
+            NotificationCoordinatorExecutor coordinator = getCoordinator(conf.getCoordinatorThreads(), notifications);
+            addRegisteredNotices(coordinator, fluo.newSnapshot());
+            KafkaExporterExecutor exporter = getExporter(conf.getExporterThreads(), kafkaProps, bindingSets);
+            PeriodicQueryPrunerExecutor pruner = getPruner(storage, fluo, conf.getPrunerThreads(), bins);
+            NotificationProcessorExecutor processor = getProcessor(storage, notifications, bins, bindingSets, conf.getProcessorThreads());
+            KafkaNotificationProvider provider = getProvider(conf.getProducerThreads(), conf.getNotificationTopic(), coordinator, kafkaProps);
+            return PeriodicNotificationApplication.builder().setCoordinator(coordinator).setProvider(provider).setExporter(exporter)
+                    .setProcessor(processor).setPruner(pruner).build();
+        } catch (AccumuloException | AccumuloSecurityException e) {
+            throw new PeriodicApplicationException(e.getMessage());
+        } 
+    }
+    
+    private static void addRegisteredNotices(NotificationCoordinatorExecutor coord, Snapshot sx) {
+        coord.start();
+        PeriodicNotificationProvider provider = new PeriodicNotificationProvider();
+        provider.processRegisteredNotifications(coord, sx);
+    }
+
+    private static NotificationCoordinatorExecutor getCoordinator(int numThreads, BlockingQueue<TimestampedNotification> notifications) {
+        return new PeriodicNotificationCoordinatorExecutor(numThreads, notifications);
+    }
+
+    private static KafkaExporterExecutor getExporter(int numThreads, Properties props, BlockingQueue<BindingSetRecord> bindingSets) {
+        KafkaProducer<String, BindingSet> producer = new KafkaProducer<>(props, new StringSerializer(), new BindingSetSerDe());
+        return new KafkaExporterExecutor(producer, numThreads, bindingSets);
+    }
+
+    private static PeriodicQueryPrunerExecutor getPruner(PeriodicQueryResultStorage storage, FluoClient fluo, int numThreads,
+            BlockingQueue<NodeBin> bins) {
+        return new PeriodicQueryPrunerExecutor(storage, fluo, numThreads, bins);
+    }
+
+    private static NotificationProcessorExecutor getProcessor(PeriodicQueryResultStorage periodicStorage,
+            BlockingQueue<TimestampedNotification> notifications, BlockingQueue<NodeBin> bins, BlockingQueue<BindingSetRecord> bindingSets,
+            int numThreads) {
+        return new NotificationProcessorExecutor(periodicStorage, notifications, bins, bindingSets, numThreads);
+    }
+
+    private static KafkaNotificationProvider getProvider(int numThreads, String topic, NotificationCoordinatorExecutor coord,
+            Properties props) {
+        return new KafkaNotificationProvider(topic, new StringDeserializer(), new CommandNotificationSerializer(), props, coord,
+                numThreads);
+    }
+
+    private static PeriodicQueryResultStorage getPeriodicQueryResultStorage(PeriodicNotificationApplicationConfiguration conf)
+            throws AccumuloException, AccumuloSecurityException {
+        Instance instance = new ZooKeeperInstance(conf.getAccumuloInstance(), conf.getAccumuloZookeepers());
+        Connector conn = instance.getConnector(conf.getAccumuloUser(), new PasswordToken(conf.getAccumuloPassword()));
+        String ryaInstance = conf.getTablePrefix();
+        return new AccumuloPeriodicQueryResultStorage(conn, ryaInstance);
+    }
+    
+    private static Properties getKafkaProperties(PeriodicNotificationApplicationConfiguration conf) { 
+        Properties kafkaProps = new Properties();
+        kafkaProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, conf.getBootStrapServers());
+        kafkaProps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, conf.getNotificationClientId());
+        kafkaProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, conf.getNotificationGroupId());
+        kafkaProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+        return kafkaProps;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/coordinator/PeriodicNotificationCoordinatorExecutor.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/coordinator/PeriodicNotificationCoordinatorExecutor.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/coordinator/PeriodicNotificationCoordinatorExecutor.java
new file mode 100644
index 0000000..0486244
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/coordinator/PeriodicNotificationCoordinatorExecutor.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.coordinator;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.rya.periodic.notification.api.Notification;
+import org.apache.rya.periodic.notification.api.NotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.api.NotificationProcessor;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+import org.apache.rya.periodic.notification.notification.TimestampedNotification;
+import org.apache.rya.periodic.notification.notification.CommandNotification.Command;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Implementation of {@link NotificationCoordinatorExecutor} that generates regular notifications
+ * as indicated by {@link PeriodicNotification}s that are registered with this Object. When notifications
+ * are generated they are placed on a work queue to be processed by the {@link NotificationProcessor}.
+ *
+ */
+public class PeriodicNotificationCoordinatorExecutor implements NotificationCoordinatorExecutor {
+
+    private static final Logger LOG = LoggerFactory.getLogger(PeriodicNotificationCoordinatorExecutor.class);
+    private int numThreads;
+    private ScheduledExecutorService producerThreadPool;
+    private Map<String, ScheduledFuture<?>> serviceMap = new HashMap<>();
+    private BlockingQueue<TimestampedNotification> notifications;
+    private final ReentrantLock lock = new ReentrantLock(true);
+    private boolean running = false;
+
+    public PeriodicNotificationCoordinatorExecutor(int numThreads, BlockingQueue<TimestampedNotification> notifications) {
+        this.numThreads = numThreads;
+        this.notifications = notifications;
+    }
+
+    @Override
+    public void processNextCommandNotification(CommandNotification notification) {
+        lock.lock();
+        try {
+            processNotification(notification);
+        } finally {
+            lock.unlock();
+        }
+    }
+
+    @Override
+    public void start() {
+        if (!running) {
+            producerThreadPool = Executors.newScheduledThreadPool(numThreads);
+            running = true;
+        }
+    }
+
+    @Override
+    public void stop() {
+
+        if (producerThreadPool != null) {
+            producerThreadPool.shutdown();
+        }
+
+        running = false;
+
+        try {
+            if (!producerThreadPool.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
+                producerThreadPool.shutdownNow();
+            }
+        } catch (Exception e) {
+            LOG.info("Service Executor Shutdown has been called.  Terminating NotificationRunnable");
+        }
+    }
+
+    private void processNotification(CommandNotification notification) {
+        Command command = notification.getCommand();
+        Notification periodic = notification.getNotification();
+        switch (command) {
+        case ADD:
+            addNotification(periodic);
+            break;
+        case DELETE:
+            deleteNotification(periodic);
+            break;
+        }
+    }
+
+    private void addNotification(Notification notification) {
+        Preconditions.checkArgument(notification instanceof PeriodicNotification);
+        PeriodicNotification notify = (PeriodicNotification) notification;
+        if (!serviceMap.containsKey(notification.getId())) {
+            ScheduledFuture<?> future = producerThreadPool.scheduleAtFixedRate(new NotificationProducer(notify), notify.getInitialDelay(),
+                    notify.getPeriod(), notify.getTimeUnit());
+            serviceMap.put(notify.getId(), future);
+        }
+    }
+
+    private boolean deleteNotification(Notification notification) {
+        if (serviceMap.containsKey(notification.getId())) {
+            ScheduledFuture<?> future = serviceMap.remove(notification.getId());
+            future.cancel(true);
+            return true;
+        }
+        return false;
+    }
+
+    /**
+     * Scheduled Task that places a {@link PeriodicNotification}
+     * in the work queue at regular intervals. 
+     *
+     */
+    class NotificationProducer implements Runnable {
+
+        private PeriodicNotification notification;
+
+        public NotificationProducer(PeriodicNotification notification) {
+            this.notification = notification;
+        }
+
+        public void run() {
+            try {
+                notifications.put(new TimestampedNotification(notification));
+            } catch (InterruptedException e) {
+                LOG.info("Unable to add notification.  Process interrupted. ");
+                throw new RuntimeException(e);
+            }
+        }
+
+    }
+
+    @Override
+    public boolean currentlyRunning() {
+        return running;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/BindingSetRecord.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/BindingSetRecord.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/BindingSetRecord.java
new file mode 100644
index 0000000..471b021
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/BindingSetRecord.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.exporter;
+
+import org.openrdf.query.BindingSet;
+
+import com.google.common.base.Objects;
+
+/**
+ * Object that associates a {@link BindingSet} with a given Kafka topic.
+ * This ensures that the {@link KafkaPeriodicBindingSetExporter} can export
+ * each BindingSet to its appropriate topic.
+ *
+ */
+public class BindingSetRecord {
+
+    private BindingSet bs;
+    private String topic;
+    
+    public BindingSetRecord(BindingSet bs, String topic) {
+        this.bs = bs;
+        this.topic = topic;
+    }
+    
+    /**
+     * @return BindingSet in this BindingSetRecord
+     */
+    public BindingSet getBindingSet() {
+        return bs;
+    }
+    
+    /**
+     * @return Kafka topic for this BindingSetRecord
+     */
+    public String getTopic() {
+        return topic;
+    }
+    
+    @Override 
+    public boolean equals(Object o) {
+        if(this == o) {
+            return true;
+        }
+        
+        if(o instanceof BindingSetRecord) {
+            BindingSetRecord record = (BindingSetRecord) o;
+            return Objects.equal(this.bs, record.bs)&&Objects.equal(this.topic,record.topic);
+        }
+        
+        return false;
+    }
+    
+    @Override
+    public int hashCode() {
+        return Objects.hashCode(bs, topic);
+    }
+    
+    @Override
+    public String toString() {
+        return new StringBuilder().append("Binding Set Record \n").append("  Topic: " + topic + "\n").append("  BindingSet: " + bs + "\n")
+                .toString();
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/KafkaExporterExecutor.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/KafkaExporterExecutor.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/KafkaExporterExecutor.java
new file mode 100644
index 0000000..4880015
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/KafkaExporterExecutor.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.exporter;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.log4j.Logger;
+import org.apache.rya.periodic.notification.api.BindingSetExporter;
+import org.apache.rya.periodic.notification.api.LifeCycle;
+import org.openrdf.query.BindingSet;
+
+import jline.internal.Preconditions;
+
+/**
+ * Executor service that runs {@link KafkaPeriodicBindingSetExporter}s.  
+ *
+ */
+public class KafkaExporterExecutor implements LifeCycle {
+
+    private static final Logger log = Logger.getLogger(BindingSetExporter.class);
+    private KafkaProducer<String, BindingSet> producer;
+    private BlockingQueue<BindingSetRecord> bindingSets;
+    private ExecutorService executor;
+    private List<KafkaPeriodicBindingSetExporter> exporters;
+    private int num_Threads;
+    private boolean running = false;
+
+    /**
+     * Creates a KafkaExporterExecutor for exporting periodic query results to Kafka.
+     * @param producer for publishing results to Kafka
+     * @param num_Threads number of threads used to publish results
+     * @param bindingSets - work queue containing {@link BindingSet}s to be published
+     */
+    public KafkaExporterExecutor(KafkaProducer<String, BindingSet> producer, int num_Threads, BlockingQueue<BindingSetRecord> bindingSets) {
+        Preconditions.checkNotNull(producer);
+        Preconditions.checkNotNull(bindingSets);
+        this.producer = producer;
+        this.bindingSets = bindingSets;
+        this.num_Threads = num_Threads;
+        this.exporters = new ArrayList<>();
+    }
+
+    @Override
+    public void start() {
+        if (!running) {
+            executor = Executors.newFixedThreadPool(num_Threads);
+
+            for (int threadNumber = 0; threadNumber < num_Threads; threadNumber++) {
+                log.info("Creating exporter:" + threadNumber);
+                KafkaPeriodicBindingSetExporter exporter = new KafkaPeriodicBindingSetExporter(producer, threadNumber, bindingSets);
+                exporters.add(exporter);
+                executor.submit(exporter);
+            }
+            running = true;
+        }
+    }
+
+    @Override
+    public void stop() {
+        if (executor != null) {
+            executor.shutdown();
+        }
+
+        if (exporters != null && exporters.size() > 0) {
+            exporters.forEach(x -> x.shutdown());
+        }
+
+        if (producer != null) {
+            producer.close();
+        }
+
+        running = false;
+        try {
+            if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
+                log.info("Timed out waiting for consumer threads to shut down, exiting uncleanly");
+                executor.shutdownNow();
+            }
+        } catch (InterruptedException e) {
+            log.info("Interrupted during shutdown, exiting uncleanly");
+        }
+    }
+
+    @Override
+    public boolean currentlyRunning() {
+        return running;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/KafkaPeriodicBindingSetExporter.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/KafkaPeriodicBindingSetExporter.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/KafkaPeriodicBindingSetExporter.java
new file mode 100644
index 0000000..9baede3
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/exporter/KafkaPeriodicBindingSetExporter.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.exporter;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalBindingSetExporter.ResultExportException;
+import org.apache.rya.periodic.notification.api.BindingSetExporter;
+import org.openrdf.model.Literal;
+import org.openrdf.query.BindingSet;
+
+import jline.internal.Preconditions;
+
+/**
+ * Object that exports {@link BindingSet}s to the Kafka topic indicated by
+ * the {@link BindingSetRecord}.
+ * 
+ */
+public class KafkaPeriodicBindingSetExporter implements BindingSetExporter, Runnable {
+
+    private static final Logger log = Logger.getLogger(BindingSetExporter.class);
+    private KafkaProducer<String, BindingSet> producer;
+    private BlockingQueue<BindingSetRecord> bindingSets;
+    private AtomicBoolean closed = new AtomicBoolean(false);
+    private int threadNumber;
+
+    public KafkaPeriodicBindingSetExporter(KafkaProducer<String, BindingSet> producer, int threadNumber,
+            BlockingQueue<BindingSetRecord> bindingSets) {
+        Preconditions.checkNotNull(producer);
+        Preconditions.checkNotNull(bindingSets);
+        this.threadNumber = threadNumber;
+        this.producer = producer;
+        this.bindingSets = bindingSets;
+    }
+
+    /**
+     * Exports BindingSets to Kafka.  The BindingSet and topic are extracted from
+     * the indicated BindingSetRecord and the BindingSet is then exported to the topic.
+     */
+    @Override
+    public void exportNotification(BindingSetRecord record) throws ResultExportException {
+        String bindingName = IncrementalUpdateConstants.PERIODIC_BIN_ID;
+        BindingSet bindingSet = record.getBindingSet();
+        String topic = record.getTopic();
+        long binId = ((Literal) bindingSet.getValue(bindingName)).longValue();
+        final Future<RecordMetadata> future = producer
+                .send(new ProducerRecord<String, BindingSet>(topic, Long.toString(binId), bindingSet));
+        try {
+            //wait for confirmation that results have been received
+            future.get(5, TimeUnit.SECONDS);
+        } catch (InterruptedException | ExecutionException | TimeoutException e) {
+            throw new ResultExportException(e.getMessage());
+        }
+    }
+
+    @Override
+    public void run() {
+        try {
+            while (!closed.get()) {
+                exportNotification(bindingSets.take());
+            }
+        } catch (InterruptedException | ResultExportException e) {
+            log.trace("Thread " + threadNumber + " is unable to process message.");
+        }
+    }
+    
+    
+    public void shutdown() {
+        closed.set(true);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/BasicNotification.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/BasicNotification.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/BasicNotification.java
new file mode 100644
index 0000000..c31a5c0
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/BasicNotification.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.notification;
+
+import org.apache.rya.periodic.notification.api.Notification;
+
+import com.google.common.base.Objects;
+
+/**
+ * Notification Object used by the Periodic Query Service
+ * to inform workers to process results for a given Periodic
+ * Query with the indicated id.
+ *
+ */
+public class BasicNotification implements Notification {
+
+    private String id;
+
+    /**
+     * Creates a BasicNotification
+     * @param id - Fluo query id associated with this Notification
+     */
+    public BasicNotification(String id) {
+        this.id = id;
+    }
+
+    /**
+     * @return the Fluo Query Id that this notification will generate results for
+     */
+    @Override
+    public String getId() {
+        return id;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (this == other) {
+            return true;
+        }
+
+        if (other instanceof BasicNotification) {
+            BasicNotification not = (BasicNotification) other;
+            return Objects.equal(this.id, not.id);
+        }
+
+        return false;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hashCode(id);
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder builder = new StringBuilder();
+        return builder.append("id").append("=").append(id).toString();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/CommandNotification.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/CommandNotification.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/CommandNotification.java
new file mode 100644
index 0000000..597b228
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/CommandNotification.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.notification;
+
+import org.apache.rya.periodic.notification.api.Notification;
+
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+
+/**
+ * This Object contains a Notification Object used by the Periodic Query Service
+ * to inform workers to process results for a given Periodic Query with the
+ * indicated id. Additionally, the CommandNotification contains a
+ * {@link Command} about which action the
+ * {@link NotificationCoordinatorExecutor} should take (adding or deleting).
+ * CommandNotifications are meant to be added to an external work queue (such as
+ * Kafka) to be processed by the NotificationCoordinatorExecutor.
+ *
+ */
+public class CommandNotification implements Notification {
+
+    private Notification notification;
+    private Command command;
+
+    public enum Command {
+        ADD, DELETE
+    };
+
+    /**
+     * Creates a new CommandNotification
+     * @param command - the command associated with this notification (either add, update, or delete)
+     * @param notification - the underlying notification associated with this command
+     */
+    public CommandNotification(Command command, Notification notification) {
+        this.notification = Preconditions.checkNotNull(notification);
+        this.command = Preconditions.checkNotNull(command);
+    }
+
+    @Override
+    public String getId() {
+        return notification.getId();
+    }
+
+    /**
+     * Returns {@link Notification} contained by this CommmandNotification.
+     * @return - Notification contained by this Object
+     */
+    public Notification getNotification() {
+        return this.notification;
+    }
+
+    /**
+     * @return Command contained by this Object (either add or delete)
+     */
+    public Command getCommand() {
+        return this.command;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (this == other) {
+            return true;
+        }
+        if (other instanceof CommandNotification) {
+            CommandNotification cn = (CommandNotification) other;
+            return Objects.equal(this.command, cn.command) && Objects.equal(this.notification, cn.notification);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hashCode(command, notification);
+    }
+
+    @Override
+    public String toString() {
+        return new StringBuilder().append("command").append("=").append(command.toString()).append(";")
+                .append(notification.toString()).toString();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/PeriodicNotification.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/PeriodicNotification.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/PeriodicNotification.java
new file mode 100644
index 0000000..aa9e581
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/PeriodicNotification.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.notification;
+
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.rya.periodic.notification.api.Notification;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Notification Object used by the Periodic Query Service to inform workers to
+ * process results for a given Periodic Query with the indicated id.
+ * Additionally, this Object contains a period that indicates a frequency at
+ * which regular updates are generated.
+ *
+ */
+public class PeriodicNotification implements Notification {
+
+    private String id;
+    private long period;
+    private TimeUnit periodTimeUnit;
+    private long initialDelay;
+
+    /**
+     * Creates a PeriodicNotification.
+     * @param id - Fluo Query Id that this notification is associated with
+     * @param period - period at which notifications are generated
+     * @param periodTimeUnit - time unit associated with the period and delay
+     * @param initialDelay - amount of time to wait before generating the first notification
+     */
+    public PeriodicNotification(String id, long period, TimeUnit periodTimeUnit, long initialDelay) {
+        this.id = Preconditions.checkNotNull(id);
+        this.periodTimeUnit = Preconditions.checkNotNull(periodTimeUnit);
+        Preconditions.checkArgument(period > 0 && initialDelay >= 0);
+        this.period = period;
+        this.initialDelay = initialDelay;
+    }
+    
+
+    /**
+     * Create a PeriodicNotification
+     * @param other - other PeriodicNotification used in copy constructor
+     */
+    public PeriodicNotification(PeriodicNotification other) {
+        this(other.id, other.period, other.periodTimeUnit, other.initialDelay);
+    }
+
+    public String getId() {
+        return id;
+    }
+
+    /**
+     * @return - period at which regular notifications are generated
+     */
+    public long getPeriod() {
+        return period;
+    }
+
+    /**
+     * @return time unit of period and initial delay
+     */
+    public TimeUnit getTimeUnit() {
+        return periodTimeUnit;
+    }
+
+    /**
+     * @return amount of time to delay before beginning to generate notifications
+     */
+    public long getInitialDelay() {
+        return initialDelay;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder builder = new StringBuilder();
+        String delim = "=";
+        String delim2 = ";";
+        return builder.append("id").append(delim).append(id).append(delim2).append("period").append(delim).append(period).append(delim2)
+                .append("periodTimeUnit").append(delim).append(periodTimeUnit).append(delim2).append("initialDelay").append(delim)
+                .append(initialDelay).toString();
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (this == other) {
+            return true;
+        }
+
+        if (!(other instanceof PeriodicNotification)) {
+            return false;
+        }
+
+        PeriodicNotification notification = (PeriodicNotification) other;
+        return Objects.equals(this.id, notification.id) && (this.period == notification.period) 
+                && Objects.equals(this.periodTimeUnit, notification.periodTimeUnit) && (this.initialDelay == notification.initialDelay);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(id, period, periodTimeUnit, initialDelay);
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static class Builder {
+
+        private String id;
+        private long period;
+        private TimeUnit periodTimeUnit;
+        private long initialDelay = 0;
+
+        /**
+         * @param id - periodic query id
+         * @return - builder to chain method calls
+         */
+        public Builder id(String id) {
+            this.id = id;
+            return this;
+        }
+
+        /**
+         * @param period of the periodic notification for generating regular notifications
+         * @return - builder to chain method calls
+         */
+        public Builder period(long period) {
+            this.period = period;
+            return this;
+        }
+
+        /**
+         * @param timeUnit of period and initial delay
+          * @return - builder to chain method calls
+         */
+        public Builder timeUnit(TimeUnit timeUnit) {
+            this.periodTimeUnit = timeUnit;
+            return this;
+        }
+
+        /**
+         * @param initialDelay - amount of time to wait before generating notifications
+         * @return - builder to chain method calls
+         */
+        public Builder initialDelay(long initialDelay) {
+            this.initialDelay = initialDelay;
+            return this;
+        }
+
+        /**
+         * Builds PeriodicNotification
+         * @return PeriodicNotification constructed from Builder specified parameters
+         */
+        public PeriodicNotification build() {
+            return new PeriodicNotification(id, period, periodTimeUnit, initialDelay);
+        }
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/TimestampedNotification.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/TimestampedNotification.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/TimestampedNotification.java
new file mode 100644
index 0000000..38073ce
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/notification/TimestampedNotification.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.notification;
+
+import java.util.Date;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * {@link PeriodicNotification} Object used by the Periodic Query Service to inform workers to
+ * process results for a given Periodic Query with the indicated id.  Additionally
+ * this Object contains a {@link Date} object to indicate the date time at which this
+ * notification was generated.
+ *
+ */
+public class TimestampedNotification extends PeriodicNotification {
+
+    private Date date;
+
+    /**
+     * Constructs a TimestampedNotification
+     * @param id - Fluo Query Id associated with this Notification
+     * @param period - period at which notifications are generated
+     * @param periodTimeUnit - time unit associated with period and initial delay
+     * @param initialDelay - amount of time to wait before generating first notification
+     */
+    public TimestampedNotification(String id, long period, TimeUnit periodTimeUnit, long initialDelay) {
+        super(id, period, periodTimeUnit, initialDelay);
+        date = new Date();
+    }
+    
+    /**
+     * Creates a TimestampedNotification
+     * @param notification - PeriodicNotification used to create this TimestampedNotification.  
+     * This constructor creates a time stamp for the TimestampedNotification.
+     */
+    public TimestampedNotification(PeriodicNotification notification) {
+        super(notification);
+        date = new Date();
+    }
+
+    /**
+     * @return timestamp at which this notification was generated
+     */
+    public Date getTimestamp() {
+        return date;
+    }
+
+    @Override
+    public String toString() {
+        return super.toString() + ";date=" + date;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/processor/NotificationProcessorExecutor.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/processor/NotificationProcessorExecutor.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/processor/NotificationProcessorExecutor.java
new file mode 100644
index 0000000..a363d5d
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/processor/NotificationProcessorExecutor.java
@@ -0,0 +1,114 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */package org.apache.rya.periodic.notification.processor;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.periodic.notification.api.LifeCycle;
+import org.apache.rya.periodic.notification.api.NodeBin;
+import org.apache.rya.periodic.notification.exporter.BindingSetRecord;
+import org.apache.rya.periodic.notification.notification.TimestampedNotification;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Executor service that runs {@link TimestampedNotificationProcessor}s with basic
+ * functionality for starting, stopping, and determining whether notification processors are
+ * being executed. 
+ *
+ */
+public class NotificationProcessorExecutor implements LifeCycle {
+
+    private static final Logger log = Logger.getLogger(TimestampedNotificationProcessor.class);
+    private BlockingQueue<TimestampedNotification> notifications; // notifications
+    private BlockingQueue<NodeBin> bins; // entries to delete from Fluo
+    private BlockingQueue<BindingSetRecord> bindingSets; // query results to
+                                                         // export
+    private PeriodicQueryResultStorage periodicStorage;
+    private List<TimestampedNotificationProcessor> processors;
+    private int numberThreads;
+    private ExecutorService executor;
+    private boolean running = false;
+
+    /**
+     * Creates NotificationProcessorExecutor.
+     * @param periodicStorage - storage layer that periodic results are read from
+     * @param notifications - notifications are pulled from this queue, and the timestamp indicates which bin of results to query for
+     * @param bins - after notifications are processed, they are added to the bin to be deleted
+     * @param bindingSets - results read from the storage layer to be exported
+     * @param numberThreads - number of threads used for processing
+     */
+    public NotificationProcessorExecutor(PeriodicQueryResultStorage periodicStorage, BlockingQueue<TimestampedNotification> notifications,
+            BlockingQueue<NodeBin> bins, BlockingQueue<BindingSetRecord> bindingSets, int numberThreads) {
+        this.notifications = Preconditions.checkNotNull(notifications);
+        this.bins = Preconditions.checkNotNull(bins);
+        this.bindingSets = Preconditions.checkNotNull(bindingSets);
+        this.periodicStorage = periodicStorage;
+        this.numberThreads = numberThreads;
+        processors = new ArrayList<>();
+    }
+
+    @Override
+    public void start() {
+        if (!running) {
+            executor = Executors.newFixedThreadPool(numberThreads);
+            for (int threadNumber = 0; threadNumber < numberThreads; threadNumber++) {
+                log.info("Creating exporter:" + threadNumber);
+                TimestampedNotificationProcessor processor = TimestampedNotificationProcessor.builder().setBindingSets(bindingSets)
+                        .setBins(bins).setPeriodicStorage(periodicStorage).setNotifications(notifications).setThreadNumber(threadNumber)
+                        .build();
+                processors.add(processor);
+                executor.submit(processor);
+            }
+            running = true;
+        }
+    }
+
+    @Override
+    public void stop() {
+        if (processors != null && processors.size() > 0) {
+            processors.forEach(x -> x.shutdown());
+        }
+        if (executor != null) {
+            executor.shutdown();
+        }
+        running = false;
+        try {
+            if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
+                log.info("Timed out waiting for consumer threads to shut down, exiting uncleanly");
+                executor.shutdownNow();
+            }
+        } catch (InterruptedException e) {
+            log.info("Interrupted during shutdown, exiting uncleanly");
+        }
+    }
+
+    @Override
+    public boolean currentlyRunning() {
+        return running;
+    }
+
+}


[7/9] incubator-rya git commit: RYA-280-Periodic Query Service. Closes #177.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchRowKeyUtil.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchRowKeyUtil.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchRowKeyUtil.java
new file mode 100644
index 0000000..581aa5b
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/BatchRowKeyUtil.java
@@ -0,0 +1,68 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.util.UUID;
+
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Class for creating the {@link Byte}s written to the Fluo Row used to identify each {@link BatchInformation}
+ * object.  Each Byte row is formed by concatenating a query id and a batch id.   
+ *
+ */
+public class BatchRowKeyUtil {
+
+    /**
+     * Creates a Byte row form the query id. The batch id is automatically generated/
+     * @param nodeId
+     * @return Byte row used to identify the BatchInformation
+     */
+    public static Bytes getRow(String nodeId) {
+        String row = new StringBuilder().append(nodeId).append(IncrementalUpdateConstants.NODEID_BS_DELIM)
+                .append(UUID.randomUUID().toString().replace("-", "")).toString();
+        return Bytes.of(row);
+    }
+    
+    /**
+     * Creates a Byte row from a nodeId and batchId
+     * @param nodeId - query node id that batch task will be performed on
+     * @param batchId - id used to identify batch
+     * @return Byte row used to identify the BatchInformation
+     */
+    public static Bytes getRow(String nodeId, String batchId) {
+        String row = new StringBuilder().append(nodeId).append(IncrementalUpdateConstants.NODEID_BS_DELIM)
+                .append(batchId).toString();
+        return Bytes.of(row);
+    }
+    
+    /**
+     * Given a Byte row, return the query node Id
+     * @param row - the Byte row used to identify the BatchInformation
+     * @return - the queryId that the batch task is performed on
+     */
+    public static String getNodeId(Bytes row) {
+        String[] stringArray = row.toString().split(IncrementalUpdateConstants.NODEID_BS_DELIM);;
+        Preconditions.checkArgument(stringArray.length == 2);
+        return stringArray[0];
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/JoinBatchBindingSetUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/JoinBatchBindingSetUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/JoinBatchBindingSetUpdater.java
new file mode 100644
index 0000000..a266341
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/JoinBatchBindingSetUpdater.java
@@ -0,0 +1,184 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.client.scanner.ColumnScanner;
+import org.apache.fluo.api.client.scanner.RowScanner;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.ColumnValue;
+import org.apache.fluo.api.data.RowColumn;
+import org.apache.fluo.api.data.Span;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater.IterativeJoin;
+import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater.LeftOuterJoin;
+import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater.NaturalJoin;
+import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater.Side;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation.Task;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
+import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Performs updates to BindingSets in the JoinBindingSet column in batch fashion.
+ */
+public class JoinBatchBindingSetUpdater extends AbstractBatchBindingSetUpdater {
+
+    private static final Logger log = Logger.getLogger(JoinBatchBindingSetUpdater.class);
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
+    private static final FluoQueryMetadataDAO dao = new FluoQueryMetadataDAO();
+
+    /**
+     * Processes {@link JoinBatchInformation}. Updates the BindingSets
+     * associated with the specified nodeId. The BindingSets are processed in
+     * batch fashion, where the number of results is indicated by
+     * {@link JoinBatchInformation#getBatchSize()}. BindingSets are either
+     * Added, Deleted, or Updated according to
+     * {@link JoinBatchInformation#getTask()}. In the event that the number of
+     * entries that need to be updated exceeds the batch size, the row of the
+     * first unprocessed BindingSets is used to create a new JoinBatch job to
+     * process the remaining BindingSets.
+     * @throws Exception 
+     */
+    @Override
+    public void processBatch(TransactionBase tx, Bytes row, BatchInformation batch) throws Exception {
+        super.processBatch(tx, row, batch);
+        String nodeId = BatchRowKeyUtil.getNodeId(row);
+        Preconditions.checkArgument(batch instanceof JoinBatchInformation);
+        JoinBatchInformation joinBatch = (JoinBatchInformation) batch;
+        Task task = joinBatch.getTask();
+
+        // Figure out which join algorithm we are going to use.
+        final IterativeJoin joinAlgorithm;
+        switch (joinBatch.getJoinType()) {
+        case NATURAL_JOIN:
+            joinAlgorithm = new NaturalJoin();
+            break;
+        case LEFT_OUTER_JOIN:
+            joinAlgorithm = new LeftOuterJoin();
+            break;
+        default:
+            throw new RuntimeException("Unsupported JoinType: " + joinBatch.getJoinType());
+        }
+
+        Set<VisibilityBindingSet> bsSet = new HashSet<>();
+        Optional<RowColumn> rowCol = fillSiblingBatch(tx, joinBatch, bsSet);
+
+        // Iterates over the resulting BindingSets from the join.
+        final Iterator<VisibilityBindingSet> newJoinResults;
+        VisibilityBindingSet bs = joinBatch.getBs();
+        if (joinBatch.getSide() == Side.LEFT) {
+            newJoinResults = joinAlgorithm.newLeftResult(bs, bsSet.iterator());
+        } else {
+            newJoinResults = joinAlgorithm.newRightResult(bsSet.iterator(), bs);
+        }
+
+        // Insert the new join binding sets to the Fluo table.
+        final JoinMetadata joinMetadata = dao.readJoinMetadata(tx, nodeId);
+        final VariableOrder joinVarOrder = joinMetadata.getVariableOrder();
+        while (newJoinResults.hasNext()) {
+            final VisibilityBindingSet newJoinResult = newJoinResults.next();
+            //create BindingSet value
+            Bytes bsBytes = BS_SERDE.serialize(newJoinResult);
+            //make rowId
+            Bytes rowKey = RowKeyUtil.makeRowKey(nodeId, joinVarOrder, newJoinResult);
+            final Column col = FluoQueryColumns.JOIN_BINDING_SET;
+            processTask(tx, task, rowKey, col, bsBytes);
+        }
+
+        // if batch limit met, there are additional entries to process
+        // update the span and register updated batch job
+        if (rowCol.isPresent()) {
+            Span newSpan = getNewSpan(rowCol.get(), joinBatch.getSpan());
+            joinBatch.setSpan(newSpan);
+            BatchInformationDAO.addBatch(tx, nodeId, joinBatch);
+        }
+
+    }
+
+    private void processTask(TransactionBase tx, Task task, Bytes row, Column column, Bytes value) {
+        switch (task) {
+        case Add:
+            tx.set(row, column, value);
+            break;
+        case Delete:
+            tx.delete(row, column);
+            break;
+        case Update:
+            log.trace("The Task Update is not supported for JoinBatchBindingSetUpdater.  Batch will not be processed.");
+            break;
+        default:
+            log.trace("Invalid Task type.  Aborting batch operation.");
+            break;
+        }
+    }
+
+    /**
+     * Fetches batch to be processed by scanning over the Span specified by the
+     * {@link JoinBatchInformation}. The number of results is less than or equal
+     * to the batch size specified by the JoinBatchInformation.
+     * 
+     * @param tx - Fluo transaction in which batch operation is performed
+     * @param batch - batch order to be processed
+     * @param bsSet- set that batch results are added to
+     * @return Set - containing results of sibling scan.
+     * @throws Exception 
+     */
+    private Optional<RowColumn> fillSiblingBatch(TransactionBase tx, JoinBatchInformation batch, Set<VisibilityBindingSet> bsSet) throws Exception {
+
+        Span span = batch.getSpan();
+        Column column = batch.getColumn();
+        int batchSize = batch.getBatchSize();
+
+        RowScanner rs = tx.scanner().over(span).fetch(column).byRow().build();
+        Iterator<ColumnScanner> colScannerIter = rs.iterator();
+
+        boolean batchLimitMet = false;
+        Bytes row = span.getStart().getRow();
+        while (colScannerIter.hasNext() && !batchLimitMet) {
+            ColumnScanner colScanner = colScannerIter.next();
+            row = colScanner.getRow();
+            Iterator<ColumnValue> iter = colScanner.iterator();
+            while (iter.hasNext()) {
+                if (bsSet.size() >= batchSize) {
+                    batchLimitMet = true;
+                    break;
+                }
+                bsSet.add(BS_SERDE.deserialize(iter.next().getValue()));
+            }
+        }
+
+        if (batchLimitMet) {
+            return Optional.of(new RowColumn(row, column));
+        } else {
+            return Optional.empty();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/JoinBatchInformation.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/JoinBatchInformation.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/JoinBatchInformation.java
new file mode 100644
index 0000000..71ac557
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/JoinBatchInformation.java
@@ -0,0 +1,255 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.util.Objects;
+
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.Span;
+import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater.Side;
+import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.openrdf.query.Binding;
+
+import jline.internal.Preconditions;
+
+/**
+ * This class updates join results based on parameters specified for the join's
+ * children. The join has two children, and for one child a VisibilityBindingSet
+ * is specified along with the Side of that child. This BindingSet represents an
+ * update to that join child. For the other child, a Span, Column and
+ * VariableOrder are specified. This is so that the sibling node (the node that
+ * wasn't updated) can be scanned to obtain results that can be joined with the
+ * VisibilityBindingSet. The assumption here is that the Span is derived from
+ * the {@link Binding}s of common variables between the join children, with
+ * Values ordered according to the indicated {@link VariableOrder}. This class
+ * represents a batch order to perform a given task on join BindingSet results.
+ * The {@link Task} is to Add, Delete, or Update. This batch order is processed
+ * by the {@link BatchObserver} and used with the nodeId provided to the
+ * Observer to process the Task specified by the batch order. If the Task is to
+ * add, the BatchBindingSetUpdater returned by
+ * {@link JoinBatchInformation#getBatchUpdater()} will scan the join's child for
+ * results using the indicated Span and Column. These results are joined with
+ * the indicated VisibilityBindingSet, and the results are added to the parent
+ * join. The other Tasks are performed analogously.
+ *
+ */
+public class JoinBatchInformation extends AbstractSpanBatchInformation {
+
+    private static final BatchBindingSetUpdater updater = new JoinBatchBindingSetUpdater();
+    private VisibilityBindingSet bs; //update for join child indicated by side
+    private VariableOrder varOrder; //variable order for child indicated by Span
+    private Side side;  //join child that was updated by bs
+    private JoinType join;
+    /**
+     * @param batchSize - batch size that Tasks are performed in
+     * @param task - Add, Delete, or Update
+     * @param column - Column of join child to be scanned
+     * @param span - span of join child to be scanned (derived from common variables of left and right join children)
+     * @param bs - BindingSet to be joined with results of child scan
+     * @param varOrder - VariableOrder used to form join (order for join child corresponding to Span)
+     * @param side - The side of the child that the VisibilityBindingSet update occurred at
+     * @param join - JoinType (left, right, natural inner)
+     */
+    public JoinBatchInformation(int batchSize, Task task, Column column, Span span, VisibilityBindingSet bs, VariableOrder varOrder, Side side, JoinType join) {
+        super(batchSize, task, column, span);
+        this.bs = Preconditions.checkNotNull(bs);
+        this.varOrder = Preconditions.checkNotNull(varOrder);
+        this.side = Preconditions.checkNotNull(side);
+        this.join = Preconditions.checkNotNull(join);
+    }
+    
+    public JoinBatchInformation(Task task, Column column, Span span, VisibilityBindingSet bs, VariableOrder varOrder, Side side, JoinType join) {
+        this(DEFAULT_BATCH_SIZE, task, column, span, bs, varOrder, side, join);
+    }
+    
+    /**
+     * Indicates the join child that the BindingSet result {@link JoinBatchInformation#getBs()} updated.
+     * This BindingSet is join with the results obtained by scanning over the value of {@link JoinBatchInformation#getSpan()}.
+     * @return {@link Side} indicating which side new result occurred on in join
+     */
+    public Side getSide() {
+        return side;
+    }
+    
+    /**
+     * @return {@link JoinType} indicating type of join (left join, right join, natural inner join,...)
+     */
+    public JoinType getJoinType() {
+        return join;
+    }
+    
+    /**
+     * Returns the VariableOrder for the join child corresponding to the Span.
+     * @return {@link VariableOrder} used to join {@link VisibilityBindingSet}s.
+     */
+    public VariableOrder getVarOrder() {
+        return varOrder;
+    }
+
+   /**
+    * Sets the VisibilityBindingSet that represents an update to the join child.  The join child
+    * updated is indicated by the value of {@link JoinBatchInformation#getSide()}.
+    * @return VisibilityBindingSet that will be joined with results returned by scan over given
+    * {@link Span}.
+    */
+   public VisibilityBindingSet getBs() {
+        return bs;
+    }
+    
+   /**
+    * @return BatchBindingSetUpdater used to apply {@link Task} to results formed by joining the given
+    * VisibilityBindingSet with the results returned by scanned over the Span.
+    */
+    @Override
+    public BatchBindingSetUpdater getBatchUpdater() {
+        return updater;
+    }
+    
+    @Override
+    public String toString() {
+        return new StringBuilder()
+                .append("Span Batch Information {\n")
+                .append("    Batch Size: " + super.getBatchSize() + "\n")
+                .append("    Task: " + super.getTask() + "\n")
+                .append("    Column: " + super.getColumn() + "\n")
+                .append("    VariableOrder: " + varOrder + "\n")
+                .append("    Join Type: " + join + "\n")
+                .append("    Join Side: " + side + "\n")
+                .append("    Binding Set: " + bs + "\n")
+                .append("}")
+                .toString();
+    }
+    
+    @Override
+    public boolean equals(Object other) {
+        if (this == other) {
+            return true;
+        }
+
+        if (!(other instanceof JoinBatchInformation)) {
+            return false;
+        }
+
+        JoinBatchInformation batch = (JoinBatchInformation) other;
+        return super.equals(other) &&  Objects.equals(this.bs, batch.bs) && Objects.equals(this.join, batch.join)
+                && Objects.equals(this.side, batch.side) && Objects.equals(this.varOrder, batch.varOrder);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(super.getBatchSize(), super.getColumn(), super.getSpan(), super.getTask(), bs, join, side, varOrder);
+    }
+    
+    
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static class Builder {
+
+        private int batchSize = DEFAULT_BATCH_SIZE;
+        private Task task;
+        private Column column;
+        private Span span;
+        private VisibilityBindingSet bs;
+        private VariableOrder varOrder;
+        private JoinType join;
+        private Side side;
+   
+        /**
+         * @param batchSize - batch size that {@link Task}s are performed in
+         */
+        public Builder setBatchSize(int batchSize) {
+            this.batchSize = batchSize;
+            return this;
+        }
+     
+        /**
+         * @param task - Task performed (Add, Delete, Update)
+         */
+        public Builder setTask(Task task) {
+            this.task = task;
+            return this;
+        }
+        
+        /**
+         * @param column - Column of join child to be scanned
+         */
+        public Builder setColumn(Column column) {
+            this.column = column;
+            return this;
+        }
+        
+        /**
+         * Span to scan results for one child of the join. The Span corresponds to the side of 
+         * the join that is not indicated by Side.  So if Side is Left, then the
+         * Span will scan the right child of the join.  It is assumed that the span is derived from
+         * the common variables of the left and right join children.
+         * @param span - Span over join child to be scanned
+         */
+        public Builder setSpan(Span span) {
+            this.span = span;
+            return this;
+        }
+      
+        /**
+         * Sets the BindingSet that corresponds to an update to the join child indicated
+         * by Side.  
+         * @param bs - BindingSet update of join child to be joined with results of scan
+         */
+        public Builder setBs(VisibilityBindingSet bs) {
+            this.bs = bs;
+            return this;
+        }
+        
+        /**
+         * @param join - JoinType (left, right, natural inner)
+         */
+        public Builder setJoinType(JoinType join) {
+            this.join = join;
+            return this;
+        }
+        
+        /**
+         * Indicates the join child corresponding to the VisibilityBindingSet update
+         * @param side - side of join the child BindingSet update appeared at
+         */
+        public Builder setSide(Side side) {
+            this.side = side;
+            return this;
+        }
+   
+        /**
+         * Sets the variable order for the join child corresponding to the Span
+         * @param varOrder - Variable order used to join BindingSet with result of scan
+         */
+        public Builder setVarOrder(VariableOrder varOrder) {
+            this.varOrder = varOrder;
+            return this;
+        }
+        
+        /**
+         * @return an instance of {@link JoinBatchInformation} constructed from the parameters passed to this Builder
+         */
+        public JoinBatchInformation build() {
+            return new JoinBatchInformation(batchSize, task, column, span, bs, varOrder, side, join); 
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/SpanBatchBindingSetUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/SpanBatchBindingSetUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/SpanBatchBindingSetUpdater.java
new file mode 100644
index 0000000..749a77d
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/SpanBatchBindingSetUpdater.java
@@ -0,0 +1,128 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.util.Iterator;
+import java.util.Optional;
+
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.client.scanner.ColumnScanner;
+import org.apache.fluo.api.client.scanner.RowScanner;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.ColumnValue;
+import org.apache.fluo.api.data.RowColumn;
+import org.apache.fluo.api.data.Span;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation.Task;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * This class processes {@link SpanBatchDeleteInformation} objects by
+ * deleting the entries in the Fluo Column corresponding to the {@link Span}
+ * of the BatchInformation object.  This class will delete entries until the
+ * batch size is met, and then create a new SpanBatchDeleteInformation object
+ * with an updated Span whose starting point is the stopping point of this
+ * batch.  If the batch limit is not met, then a new batch is not created and
+ * the task is complete.
+ *
+ */
+public class SpanBatchBindingSetUpdater extends AbstractBatchBindingSetUpdater {
+
+    private static final Logger log = Logger.getLogger(SpanBatchBindingSetUpdater.class);
+
+    /**
+     * Process SpanBatchDeleteInformation objects by deleting all entries indicated
+     * by Span until batch limit is met.
+     * @param tx - Fluo Transaction
+     * @param row - Byte row identifying BatchInformation
+     * @param batch - SpanBatchDeleteInformation object to be processed
+     */
+    @Override
+    public void processBatch(TransactionBase tx, Bytes row, BatchInformation batch) throws Exception {
+        super.processBatch(tx, row, batch);
+        Preconditions.checkArgument(batch instanceof SpanBatchDeleteInformation);
+        SpanBatchDeleteInformation spanBatch = (SpanBatchDeleteInformation) batch;
+        Task task = spanBatch.getTask();
+        int batchSize = spanBatch.getBatchSize();
+        Span span = spanBatch.getSpan();
+        Column column = batch.getColumn();
+        Optional<RowColumn> rowCol = Optional.empty();
+
+        switch (task) {
+        case Add:
+            log.trace("The Task Add is not supported for SpanBatchBindingSetUpdater.  Batch " + batch + " will not be processed.");
+            break;
+        case Delete:
+            rowCol = deleteBatch(tx, span, column, batchSize);
+            break;
+        case Update:
+            log.trace("The Task Update is not supported for SpanBatchBindingSetUpdater.  Batch " + batch + " will not be processed.");
+            break;
+        default:
+            log.trace("Invalid Task type.  Aborting batch operation.");
+            break;
+        }
+
+        if (rowCol.isPresent()) {
+            Span newSpan = getNewSpan(rowCol.get(), spanBatch.getSpan());
+            log.trace("Batch size met.  There are remaining results that need to be deleted.  Creating a new batch of size: "
+                    + spanBatch.getBatchSize() + " with Span: " + newSpan + " and Column: " + column);
+            spanBatch.setSpan(newSpan);
+            BatchInformationDAO.addBatch(tx, BatchRowKeyUtil.getNodeId(row), spanBatch);
+        }
+    }
+
+    private Optional<RowColumn> deleteBatch(TransactionBase tx, Span span, Column column, int batchSize) {
+
+        log.trace("Deleting batch of size: " + batchSize + " using Span: " + span + " and Column: " + column);
+        RowScanner rs = tx.scanner().over(span).fetch(column).byRow().build();
+        try {
+            Iterator<ColumnScanner> colScannerIter = rs.iterator();
+
+            int count = 0;
+            boolean batchLimitMet = false;
+            Bytes row = span.getStart().getRow();
+            while (colScannerIter.hasNext() && !batchLimitMet) {
+                ColumnScanner colScanner = colScannerIter.next();
+                row = colScanner.getRow();
+                Iterator<ColumnValue> iter = colScanner.iterator();
+                while (iter.hasNext()) {
+                    if (count >= batchSize) {
+                        batchLimitMet = true;
+                        break;
+                    }
+                    ColumnValue colVal = iter.next();
+                    tx.delete(row, colVal.getColumn());
+                    count++;
+                }
+            }
+
+            if (batchLimitMet) {
+                return Optional.of(new RowColumn(row));
+            } else {
+                return Optional.empty();
+            }
+        } catch (Exception e) {
+            return Optional.empty();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/SpanBatchDeleteInformation.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/SpanBatchDeleteInformation.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/SpanBatchDeleteInformation.java
new file mode 100644
index 0000000..3b1e245
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/SpanBatchDeleteInformation.java
@@ -0,0 +1,95 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.Span;
+
+/**
+ * This class represents a batch order to delete all entries in the Fluo table indicated
+ * by the given Span and Column.  These batch orders are processed by the {@link BatchObserver},
+ * which uses this batch information along with the nodeId passed into the Observer to perform
+ * batch deletes.  
+ *
+ */
+public class SpanBatchDeleteInformation extends AbstractSpanBatchInformation {
+
+    private static final BatchBindingSetUpdater updater = new SpanBatchBindingSetUpdater();
+    
+    public SpanBatchDeleteInformation(int batchSize, Column column, Span span) {
+        super(batchSize, Task.Delete, column, span);
+    }
+    
+    /**
+     * @return Updater that applies the {@link Task} to the given {@link Span} and {@link Column}
+     */
+    @Override
+    public BatchBindingSetUpdater getBatchUpdater() {
+        return updater;
+    }
+    
+    
+    public static Builder builder() {
+        return new Builder();
+    }
+    
+    public static class Builder {
+
+        private int batchSize = DEFAULT_BATCH_SIZE;
+        private Column column;
+        private Span span;
+
+        /**
+         * @param batchSize - {@link Task}s are applied in batches of this size
+         */
+        public Builder setBatchSize(int batchSize) {
+            this.batchSize = batchSize;
+            return this;
+        }
+
+        /**
+         * Sets column to apply batch {@link Task} to
+         * @param column - column batch Task will be applied to
+         * @return
+         */
+        public Builder setColumn(Column column) {
+            this.column = column;
+            return this;
+        }
+
+        /**
+         * @param span - span that batch {@link Task} will be applied to
+         *            
+         */
+        public Builder setSpan(Span span) {
+            this.span = span;
+            return this;
+        }
+
+
+        /**
+         * @return an instance of {@link SpanBatchDeleteInformation} constructed from parameters passed to this Builder
+         */
+        public SpanBatchDeleteInformation build() {
+            return new SpanBatchDeleteInformation(batchSize, column, span);
+        }
+
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationSerializer.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationSerializer.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationSerializer.java
new file mode 100644
index 0000000..e6f69d0
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationSerializer.java
@@ -0,0 +1,58 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch.serializer;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.util.Optional;
+
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+/**
+ * Serializer/Deserializer for {@link BatchInformation} objects that uses the Gson
+ * Type Adapter {@link BatchInformationTypeAdapter} to do all of the serializing and deserializing.
+ * 
+ *
+ */
+public class BatchInformationSerializer {
+
+    private static Logger log = Logger.getLogger(BatchInformationSerializer.class);
+    private static Gson gson = new GsonBuilder().registerTypeHierarchyAdapter(BatchInformation.class, new BatchInformationTypeAdapter())
+            .create();
+
+    public static byte[] toBytes(BatchInformation arg0) {
+        try {
+            return gson.toJson(arg0).getBytes("UTF-8");
+        } catch (Exception e) {
+            log.info("Unable to serialize BatchInformation: " + arg0);
+            throw new RuntimeException(e);
+        }
+    }
+
+    public static Optional<BatchInformation> fromBytes(byte[] arg0) {
+        try {
+            String json = new String(arg0, "UTF-8");
+            return Optional.of(gson.fromJson(json, BatchInformation.class));
+        } catch (Exception e) {
+            log.info("Invalid String encoding.  BatchInformation cannot be deserialized.");
+            return Optional.empty();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationTypeAdapter.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationTypeAdapter.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationTypeAdapter.java
new file mode 100644
index 0000000..d7c15df
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationTypeAdapter.java
@@ -0,0 +1,73 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch.serializer;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.lang.reflect.Type;
+
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation;
+import org.apache.rya.indexing.pcj.fluo.app.batch.JoinBatchInformation;
+import org.apache.rya.indexing.pcj.fluo.app.batch.SpanBatchDeleteInformation;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+/**
+ * JsonSerializer/JsonDeserializer for serializing/deserializing
+ * {@link BatchInformation} objects. This makes use of the
+ * {@link BatchInformationTypeAdapterFactory} to retrieve the appropriate
+ * JsonSerializer/JsonDeserializer given the class name of the particular
+ * implementation of BatchInformation.
+ *
+ */
+public class BatchInformationTypeAdapter implements JsonSerializer<BatchInformation>, JsonDeserializer<BatchInformation> {
+
+    private static final Logger log = Logger.getLogger(BatchInformationTypeAdapter.class);
+    private static final BatchInformationTypeAdapterFactory factory = new BatchInformationTypeAdapterFactory();
+
+    @Override
+    public BatchInformation deserialize(JsonElement arg0, Type arg1, JsonDeserializationContext arg2) throws JsonParseException {
+        try {
+            JsonObject json = arg0.getAsJsonObject();
+            String type = json.get("class").getAsString();
+            JsonDeserializer<? extends BatchInformation> deserializer = factory.getDeserializerFromName(type);
+            return deserializer.deserialize(arg0, arg1, arg2);
+        } catch (Exception e) {
+            log.trace("Unable to deserialize JsonElement: " + arg0);
+            log.trace("Returning an empty Batch");
+            throw new JsonParseException(e);
+        }
+    }
+
+    @Override
+    public JsonElement serialize(BatchInformation batch, Type arg1, JsonSerializationContext arg2) {
+        JsonSerializer<? extends BatchInformation> serializer = factory.getSerializerFromName(batch.getClass().getName());
+        
+        if(batch instanceof SpanBatchDeleteInformation) {
+            return ((SpanBatchInformationTypeAdapter) serializer).serialize((SpanBatchDeleteInformation) batch, arg1, arg2);
+        } else {
+            return ((JoinBatchInformationTypeAdapter) serializer).serialize((JoinBatchInformation) batch, arg1, arg2);
+        }
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationTypeAdapterFactory.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationTypeAdapterFactory.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationTypeAdapterFactory.java
new file mode 100644
index 0000000..0221bc2
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/BatchInformationTypeAdapterFactory.java
@@ -0,0 +1,65 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch.serializer;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.util.Map;
+
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation;
+import org.apache.rya.indexing.pcj.fluo.app.batch.JoinBatchInformation;
+import org.apache.rya.indexing.pcj.fluo.app.batch.SpanBatchDeleteInformation;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonSerializer;
+
+/**
+ * Factory the uses class names to return the appropriate {@link JsonSerializer} and {@link JsonDeserializer} for serializing
+ * and deserializing {@link BatchInformation} objects.
+ *
+ */
+public class BatchInformationTypeAdapterFactory {
+
+    /**
+     * Retrieve the appropriate {@link JsonSerializer} using the class name of the {@link BatchInformation} implementation
+     * @param name - class name of the BatchInformation object
+     * @return JsonSerializer for serializing BatchInformation objects
+     */
+    public JsonSerializer<? extends BatchInformation> getSerializerFromName(String name) {
+        return serializers.get(name);
+    }
+    
+    /**
+     * Retrieve the appropriate {@link JsonDeserializer} using the class name of the {@link BatchInformation} implementation
+     * @param name - class name of the BatchInformation object
+     * @return JsonDeserializer for deserializing BatchInformation objects
+     */
+    public JsonDeserializer<? extends BatchInformation> getDeserializerFromName(String name) {
+        return deserializers.get(name);
+    }
+    
+    static final Map<String, JsonSerializer<? extends BatchInformation>> serializers = ImmutableMap.of(
+            SpanBatchDeleteInformation.class.getName(), new SpanBatchInformationTypeAdapter(),
+            JoinBatchInformation.class.getName(), new JoinBatchInformationTypeAdapter()
+        );
+    
+    static final Map<String, JsonDeserializer<? extends BatchInformation>> deserializers = ImmutableMap.of(
+            SpanBatchDeleteInformation.class.getName(), new SpanBatchInformationTypeAdapter(),
+            JoinBatchInformation.class.getName(), new JoinBatchInformationTypeAdapter()
+        );
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/JoinBatchInformationTypeAdapter.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/JoinBatchInformationTypeAdapter.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/JoinBatchInformationTypeAdapter.java
new file mode 100644
index 0000000..9f3f1a6
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/JoinBatchInformationTypeAdapter.java
@@ -0,0 +1,94 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch.serializer;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.lang.reflect.Type;
+
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.RowColumn;
+import org.apache.fluo.api.data.Span;
+import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater.Side;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchInformation.Task;
+import org.apache.rya.indexing.pcj.fluo.app.batch.JoinBatchInformation;
+import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata.JoinType;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
+
+import com.google.common.base.Joiner;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+/**
+ * JsonSerializer/JsonDeserializer to serialize/deserialize {@link JoinBatchInformation} objects.
+ *
+ */
+public class JoinBatchInformationTypeAdapter implements JsonSerializer<JoinBatchInformation>, JsonDeserializer<JoinBatchInformation> {
+
+    private static final VisibilityBindingSetStringConverter converter = new VisibilityBindingSetStringConverter();
+
+    @Override
+    public JsonElement serialize(JoinBatchInformation batch, Type typeOfSrc, JsonSerializationContext context) {
+        JsonObject result = new JsonObject();
+        result.add("class", new JsonPrimitive(batch.getClass().getName()));
+        result.add("batchSize", new JsonPrimitive(batch.getBatchSize()));
+        result.add("task", new JsonPrimitive(batch.getTask().name()));
+        Column column = batch.getColumn();
+        result.add("column", new JsonPrimitive(column.getsFamily() + "\u0000" + column.getsQualifier()));
+        Span span = batch.getSpan();
+        result.add("span", new JsonPrimitive(span.getStart().getsRow() + "\u0000" + span.getEnd().getsRow()));
+        result.add("startInc", new JsonPrimitive(span.isStartInclusive()));
+        result.add("endInc", new JsonPrimitive(span.isEndInclusive()));
+        result.add("varOrder", new JsonPrimitive(Joiner.on(";").join(batch.getVarOrder().getVariableOrders())));
+        result.add("side", new JsonPrimitive(batch.getSide().name()));
+        result.add("joinType", new JsonPrimitive(batch.getJoinType().name()));
+        String updateVarOrderString = Joiner.on(";").join(batch.getBs().getBindingNames());
+        VariableOrder updateVarOrder = new VariableOrder(updateVarOrderString);
+        result.add("bindingSet", new JsonPrimitive(converter.convert(batch.getBs(), updateVarOrder)));
+        result.add("updateVarOrder", new JsonPrimitive(updateVarOrderString));
+        return result;
+    }
+
+    @Override
+    public JoinBatchInformation deserialize(JsonElement element, Type typeOfT, JsonDeserializationContext context)
+            throws JsonParseException {
+        JsonObject json = element.getAsJsonObject();
+        int batchSize = json.get("batchSize").getAsInt();
+        Task task = Task.valueOf(json.get("task").getAsString());
+        String[] colArray = json.get("column").getAsString().split("\u0000");
+        Column column = new Column(colArray[0], colArray[1]);
+        String[] rows = json.get("span").getAsString().split("\u0000");
+        boolean startInc = json.get("startInc").getAsBoolean();
+        boolean endInc = json.get("endInc").getAsBoolean();
+        Span span = new Span(new RowColumn(rows[0]), startInc, new RowColumn(rows[1]), endInc);
+        VariableOrder varOrder = new VariableOrder(json.get("varOrder").getAsString());
+        VariableOrder updateVarOrder = new VariableOrder(json.get("updateVarOrder").getAsString());
+        VisibilityBindingSet bs = converter.convert(json.get("bindingSet").getAsString(), updateVarOrder);
+        Side side = Side.valueOf(json.get("side").getAsString());
+        JoinType join = JoinType.valueOf(json.get("joinType").getAsString());
+        return JoinBatchInformation.builder().setBatchSize(batchSize).setTask(task).setSpan(span).setColumn(column).setBs(bs).setVarOrder(varOrder)
+               .setSide(side).setJoinType(join).build();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/SpanBatchInformationTypeAdapter.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/SpanBatchInformationTypeAdapter.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/SpanBatchInformationTypeAdapter.java
new file mode 100644
index 0000000..98deb8e
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/batch/serializer/SpanBatchInformationTypeAdapter.java
@@ -0,0 +1,69 @@
+package org.apache.rya.indexing.pcj.fluo.app.batch.serializer;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import java.lang.reflect.Type;
+
+import org.apache.fluo.api.data.Column;
+import org.apache.fluo.api.data.RowColumn;
+import org.apache.fluo.api.data.Span;
+import org.apache.rya.indexing.pcj.fluo.app.batch.SpanBatchDeleteInformation;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+/**
+ * JsonSerializer/JsonDeserializer used to serialize/deserialize {@link SpanBatchDeleteInformation} objects.
+ *
+ */
+public class SpanBatchInformationTypeAdapter implements JsonSerializer<SpanBatchDeleteInformation>, JsonDeserializer<SpanBatchDeleteInformation> {
+
+    @Override
+    public SpanBatchDeleteInformation deserialize(JsonElement element, Type typeOfT, JsonDeserializationContext context) throws JsonParseException {
+        JsonObject json = element.getAsJsonObject();
+        int batchSize = json.get("batchSize").getAsInt();
+        String[] colArray = json.get("column").getAsString().split("\u0000");
+        Column column = new Column(colArray[0], colArray[1]);
+        String[] rows = json.get("span").getAsString().split("\u0000");
+        boolean startInc = json.get("startInc").getAsBoolean();
+        boolean endInc = json.get("endInc").getAsBoolean();
+        Span span = new Span(new RowColumn(rows[0]), startInc, new RowColumn(rows[1]), endInc);
+        return SpanBatchDeleteInformation.builder().setBatchSize(batchSize).setSpan(span).setColumn(column).build();
+    }
+
+    @Override
+    public JsonElement serialize(SpanBatchDeleteInformation batch, Type typeOfSrc, JsonSerializationContext context) {
+        JsonObject result = new JsonObject();
+        result.add("class", new JsonPrimitive(batch.getClass().getName()));
+        result.add("batchSize", new JsonPrimitive(batch.getBatchSize()));
+        Column column = batch.getColumn();
+        result.add("column", new JsonPrimitive(column.getsFamily() + "\u0000" + column.getsQualifier()));
+        Span span = batch.getSpan();
+        result.add("span", new JsonPrimitive(span.getStart().getsRow() + "\u0000" + span.getEnd().getsRow()));
+        result.add("startInc", new JsonPrimitive(span.isStartInclusive()));
+        result.add("endInc", new JsonPrimitive(span.isEndInclusive()));
+        return result;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaBindingSetExporter.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaBindingSetExporter.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaBindingSetExporter.java
index 152d156..7c4b3cc 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaBindingSetExporter.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/kafka/KafkaBindingSetExporter.java
@@ -36,16 +36,16 @@ import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
  * Incrementally exports SPARQL query results to Kafka topics.
  */
 public class KafkaBindingSetExporter implements IncrementalBindingSetExporter {
+    
     private static final Logger log = Logger.getLogger(KafkaBindingSetExporter.class);
-
     private final KafkaProducer<String, VisibilityBindingSet> producer;
 
+
     /**
      * Constructs an instance given a Kafka producer.
      *
-     * @param producer
-     *            for sending result set alerts to a broker. (not null)
-     *            Can be created and configured by {@link KafkaBindingSetExporterFactory}
+     * @param producer for sending result set alerts to a broker. (not null) Can be created and configured by
+     *            {@link KafkaBindingSetExporterFactory}
      */
     public KafkaBindingSetExporter(KafkaProducer<String, VisibilityBindingSet> producer) {
         super();

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporter.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporter.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporter.java
index 84d3ce6..54c39b7 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporter.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporter.java
@@ -24,8 +24,11 @@ import static java.util.Objects.requireNonNull;
 import java.util.Collections;
 
 import org.apache.fluo.api.client.TransactionBase;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalBindingSetExporter;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryStorageException;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
@@ -36,14 +39,16 @@ import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
 public class RyaBindingSetExporter implements IncrementalBindingSetExporter {
 
     private final PrecomputedJoinStorage pcjStorage;
+    private final PeriodicQueryResultStorage periodicStorage;
 
     /**
      * Constructs an instance of {@link RyaBindingSetExporter}.
      *
      * @param pcjStorage - The PCJ storage the new results will be exported to. (not null)
      */
-    public RyaBindingSetExporter(final PrecomputedJoinStorage pcjStorage) {
+    public RyaBindingSetExporter(final PrecomputedJoinStorage pcjStorage, PeriodicQueryResultStorage periodicStorage) {
         this.pcjStorage = checkNotNull(pcjStorage);
+        this.periodicStorage = checkNotNull(periodicStorage);
     }
 
     @Override
@@ -59,8 +64,12 @@ public class RyaBindingSetExporter implements IncrementalBindingSetExporter {
         final String pcjId = fluoTx.gets(queryId, FluoQueryColumns.RYA_PCJ_ID);
 
         try {
-            pcjStorage.addResults(pcjId, Collections.singleton(result));
-        } catch (final PCJStorageException e) {
+            if (result.hasBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID)) {
+                periodicStorage.addPeriodicQueryResults(pcjId, Collections.singleton(result));
+            } else {
+                pcjStorage.addResults(pcjId, Collections.singleton(result));
+            }
+        } catch (final PCJStorageException | PeriodicQueryStorageException e) {
             throw new ResultExportException("A result could not be exported to Rya.", e);
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporterFactory.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporterFactory.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporterFactory.java
index 86d593f..82ce9c6 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporterFactory.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/export/rya/RyaBindingSetExporterFactory.java
@@ -28,8 +28,10 @@ import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalBindingSetExporter;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalBindingSetExporterFactory;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPeriodicQueryResultStorage;
 
 import com.google.common.base.Optional;
 
@@ -62,9 +64,10 @@ public class RyaBindingSetExporterFactory implements IncrementalBindingSetExport
                 // Setup Rya PCJ Storage.
                 final String ryaInstanceName = params.getRyaInstanceName().get();
                 final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, ryaInstanceName);
-
+                final PeriodicQueryResultStorage periodicStorage = new AccumuloPeriodicQueryResultStorage(accumuloConn, ryaInstanceName);
+                
                 // Make the exporter.
-                final IncrementalBindingSetExporter exporter = new RyaBindingSetExporter(pcjStorage);
+                final IncrementalBindingSetExporter exporter = new RyaBindingSetExporter(pcjStorage, periodicStorage);
                 return Optional.of(exporter);
 
             } catch (final AccumuloException | AccumuloSecurityException e) {

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java
index ac131e3..3a731c2 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/BindingSetUpdater.java
@@ -30,12 +30,14 @@ import org.apache.rya.indexing.pcj.fluo.app.ConstructQueryResultUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.FilterResultUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.JoinResultUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.PeriodicQueryUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.QueryResultUpdater;
 import org.apache.rya.indexing.pcj.fluo.app.query.AggregationMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.ConstructQueryMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FilterMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata;
+import org.apache.rya.indexing.pcj.fluo.app.query.PeriodicQueryMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.QueryMetadata;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
 
@@ -50,7 +52,6 @@ import edu.umd.cs.findbugs.annotations.NonNull;
 @DefaultAnnotation(NonNull.class)
 public abstract class BindingSetUpdater extends AbstractObserver {
     private static final Logger log = Logger.getLogger(BindingSetUpdater.class);
-
     // DAO
     private final FluoQueryMetadataDAO queryDao = new FluoQueryMetadataDAO();
 
@@ -60,6 +61,7 @@ public abstract class BindingSetUpdater extends AbstractObserver {
     private final QueryResultUpdater queryUpdater = new QueryResultUpdater();
     private final AggregationResultUpdater aggregationUpdater = new AggregationResultUpdater();
     private final ConstructQueryResultUpdater constructUpdater = new ConstructQueryResultUpdater();
+    private final PeriodicQueryUpdater periodicQueryUpdater = new PeriodicQueryUpdater();
 
     @Override
     public abstract ObservedColumn getObservedColumn();
@@ -131,6 +133,15 @@ public abstract class BindingSetUpdater extends AbstractObserver {
                     throw new RuntimeException("Could not process a Join node.", e);
                 }
                 break;
+                
+            case PERIODIC_QUERY:
+                final PeriodicQueryMetadata parentPeriodicQuery = queryDao.readPeriodicQueryMetadata(tx, parentNodeId);
+                try{
+                    periodicQueryUpdater.updatePeriodicBinResults(tx, observedBindingSet, parentPeriodicQuery);
+                } catch(Exception e) {
+                    throw new RuntimeException("Could not process PeriodicBin node.", e);
+                }
+                break;
 
             case AGGREGATION:
                 final AggregationMetadata parentAggregation = queryDao.readAggregationMetadata(tx, parentNodeId);
@@ -141,8 +152,9 @@ public abstract class BindingSetUpdater extends AbstractObserver {
                 }
                 break;
 
+
             default:
-                throw new IllegalArgumentException("The parent node's NodeType must be of type Filter, Join, or Query, but was " + parentNodeType);
+                throw new IllegalArgumentException("The parent node's NodeType must be of type Filter, Join, PeriodicBin or Query, but was " + parentNodeType);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java
index f5c7177..ee03334 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/FilterObserver.java
@@ -23,11 +23,11 @@ import static java.util.Objects.requireNonNull;
 import org.apache.fluo.api.client.TransactionBase;
 import org.apache.fluo.api.data.Bytes;
 import org.apache.rya.indexing.pcj.fluo.app.BindingSetRow;
-import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.query.FilterMetadata;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
 import org.openrdf.query.BindingSet;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java
index 141ccc7..28e31d8 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/JoinObserver.java
@@ -23,11 +23,11 @@ import static java.util.Objects.requireNonNull;
 import org.apache.fluo.api.client.TransactionBase;
 import org.apache.fluo.api.data.Bytes;
 import org.apache.rya.indexing.pcj.fluo.app.BindingSetRow;
-import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.JoinMetadata;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
 import org.openrdf.query.BindingSet;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/PeriodicQueryObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/PeriodicQueryObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/PeriodicQueryObserver.java
new file mode 100644
index 0000000..e7072e7
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/PeriodicQueryObserver.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.fluo.app.observers;
+
+import static java.util.Objects.requireNonNull;
+
+import org.apache.fluo.api.client.TransactionBase;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.fluo.app.BindingSetRow;
+import org.apache.rya.indexing.pcj.fluo.app.PeriodicQueryUpdater;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
+import org.apache.rya.indexing.pcj.fluo.app.query.PeriodicQueryMetadata;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
+
+/**
+ * This Observer is responsible for assigning Periodic Bin Ids to BindingSets.
+ * This class delegates to the {@link BindingSetUpdater} process method, which
+ * uses the {@link PeriodicQueryUpdater} to extract the time stamp from the BindingSet.
+ * The PeriodicQueryUpdater creates one instance of the given BindingSet for each bin
+ * that the time stamp is assigned to by the updater, and these BindingSets are written
+ * to the parent node of the given PeriodicQueryMetadata node.
+ *
+ */
+public class PeriodicQueryObserver extends BindingSetUpdater {
+
+    private static final VisibilityBindingSetSerDe BS_SERDE = new VisibilityBindingSetSerDe();
+    private final FluoQueryMetadataDAO queryDao = new FluoQueryMetadataDAO();
+
+    @Override
+    public ObservedColumn getObservedColumn() {
+        return new ObservedColumn(FluoQueryColumns.PERIODIC_QUERY_BINDING_SET, NotificationType.STRONG);
+    }
+
+    @Override
+    public Observation parseObservation(final TransactionBase tx, final Bytes row) throws Exception {
+        requireNonNull(tx);
+        requireNonNull(row);
+
+        // Read the Join metadata.
+        final String periodicBinNodeId = BindingSetRow.make(row).getNodeId();
+        final PeriodicQueryMetadata periodicBinMetadata = queryDao.readPeriodicQueryMetadata(tx, periodicBinNodeId);
+
+        // Read the Visibility Binding Set from the Value.
+        final Bytes valueBytes = tx.get(row, FluoQueryColumns.PERIODIC_QUERY_BINDING_SET);
+        final VisibilityBindingSet periodicBinBindingSet = BS_SERDE.deserialize(valueBytes);
+
+        // Figure out which node needs to handle the new metadata.
+        final String parentNodeId = periodicBinMetadata.getParentNodeId();
+
+        return new Observation(periodicBinNodeId, periodicBinBindingSet, parentNodeId);
+    }
+    
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java
index b675ba7..fbdca08 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/QueryResultObserver.java
@@ -29,7 +29,6 @@ import org.apache.fluo.api.data.Column;
 import org.apache.fluo.api.observer.AbstractObserver;
 import org.apache.log4j.Logger;
 import org.apache.rya.accumulo.utils.VisibilitySimplifier;
-import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalBindingSetExporter;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalBindingSetExporter.ResultExportException;
 import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalBindingSetExporterFactory;
@@ -38,6 +37,7 @@ import org.apache.rya.indexing.pcj.fluo.app.export.kafka.KafkaBindingSetExporter
 import org.apache.rya.indexing.pcj.fluo.app.export.rya.RyaBindingSetExporterFactory;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
 
 import com.google.common.base.Optional;
 import com.google.common.collect.ImmutableSet;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java
index b0548b4..69a651e 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/StatementPatternObserver.java
@@ -23,11 +23,11 @@ import static java.util.Objects.requireNonNull;
 import org.apache.fluo.api.client.TransactionBase;
 import org.apache.fluo.api.data.Bytes;
 import org.apache.rya.indexing.pcj.fluo.app.BindingSetRow;
-import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.StatementPatternMetadata;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
 import org.openrdf.query.BindingSet;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java
index 3c43885..6fc8e91 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/observers/TripleObserver.java
@@ -34,12 +34,12 @@ import org.apache.fluo.api.observer.AbstractObserver;
 import org.apache.log4j.Logger;
 import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.indexing.pcj.fluo.app.IncUpdateDAO;
-import org.apache.rya.indexing.pcj.fluo.app.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryMetadataDAO;
 import org.apache.rya.indexing.pcj.fluo.app.query.StatementPatternMetadata;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetStringConverter;
 
 import com.google.common.base.Charsets;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/AggregationMetadata.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/AggregationMetadata.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/AggregationMetadata.java
index 3bc8da6..ff42a0f 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/AggregationMetadata.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/AggregationMetadata.java
@@ -321,6 +321,13 @@ public class AggregationMetadata extends CommonNodeMetadata {
             this.varOrder = varOrder;
             return this;
         }
+        
+        /**
+         * @return the variable order of binding sets that are emitted by this node.
+         */
+        public VariableOrder getVariableOrder() {
+            return varOrder;
+        }
 
         /**
          * @param parentNodeId - The Node ID of this node's parent.
@@ -330,6 +337,10 @@ public class AggregationMetadata extends CommonNodeMetadata {
             this.parentNodeId = parentNodeId;
             return this;
         }
+       
+        public String getParentNodeId() {
+            return parentNodeId;
+        }
 
         /**
          * @param childNodeId - The Node ID of this node's child.
@@ -360,6 +371,13 @@ public class AggregationMetadata extends CommonNodeMetadata {
             this.groupByVariables = groupByVariables;
             return this;
         }
+        
+        /**
+         * @return variable order that defines how data is grouped for the aggregation function
+         */
+        public VariableOrder getGroupByVariableOrder() {
+            return groupByVariables;
+        }
 
         /**
          * @return An instance of {@link AggregationMetadata} build using this builder's values.

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FilterMetadata.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FilterMetadata.java b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FilterMetadata.java
index 8866bd4..7e2e995 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FilterMetadata.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.app/src/main/java/org/apache/rya/indexing/pcj/fluo/app/query/FilterMetadata.java
@@ -18,19 +18,19 @@
  */
 package org.apache.rya.indexing.pcj.fluo.app.query;
 
-import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkNotNull;
 
-import edu.umd.cs.findbugs.annotations.Nullable;
-import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
-import edu.umd.cs.findbugs.annotations.NonNull;
-import net.jcip.annotations.Immutable;
-
 import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.rya.indexing.pcj.fluo.app.util.FilterSerializer;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 
 import com.google.common.base.Objects;
 
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
+import net.jcip.annotations.Immutable;
+
 /**
  * Metadata that is specific to Filter nodes.
  */
@@ -38,8 +38,7 @@ import com.google.common.base.Objects;
 @DefaultAnnotation(NonNull.class)
 public class FilterMetadata extends CommonNodeMetadata {
 
-    private final String originalSparql;
-    private final int filterIndexWithinSparql;
+    private final String filterSparql;
     private final String parentNodeId;
     private final String childNodeId;
 
@@ -48,7 +47,7 @@ public class FilterMetadata extends CommonNodeMetadata {
      *
      * @param nodeId - The ID the Fluo app uses to reference this node. (not null)
      * @param varOrder - The variable order of binding sets that are emitted by this node. (not null)
-     * @param originalSparql - The original SPARQL query the filter is derived from. (not null)
+     * @param filterSparql - SPARQL query representing the filter as generated by {@link FilterSerializer#serialize}. (not null)
      * @param filterIndexWithinSparql - The index of the filter within the original SPARQL query
      *   that this node processes. (not null)
      * @param parentNodeId - The node id of this node's parent. (not null)
@@ -57,14 +56,11 @@ public class FilterMetadata extends CommonNodeMetadata {
     public FilterMetadata(
             final String nodeId,
             final VariableOrder varOrder,
-            final String originalSparql,
-            final int filterIndexWithinSparql,
+            final String filterSparql,
             final String parentNodeId,
             final String childNodeId) {
         super(nodeId, varOrder);
-        this.originalSparql = checkNotNull(originalSparql);
-        checkArgument(filterIndexWithinSparql >= 0 , "filterIndexWithinSparql must be >= 0, was " + filterIndexWithinSparql);
-        this.filterIndexWithinSparql = filterIndexWithinSparql;
+        this.filterSparql = checkNotNull(filterSparql);
         this.parentNodeId = checkNotNull(parentNodeId);
         this.childNodeId = checkNotNull(childNodeId);
     }
@@ -72,16 +68,8 @@ public class FilterMetadata extends CommonNodeMetadata {
     /**
      * @return The original SPARQL query the filter is derived from.
      */
-    public String getOriginalSparql() {
-        return originalSparql;
-    }
-
-    /**
-     * @return The index of the filter within the original SPARQL query that
-     *   this node processes.
-     */
-    public int getFilterIndexWithinSparql() {
-        return filterIndexWithinSparql;
+    public String getFilterSparql() {
+        return filterSparql;
     }
 
     /**
@@ -103,8 +91,7 @@ public class FilterMetadata extends CommonNodeMetadata {
         return Objects.hashCode(
                 super.getNodeId(),
                 super.getVariableOrder(),
-                originalSparql,
-                filterIndexWithinSparql,
+                filterSparql,
                 parentNodeId,
                 childNodeId);
     }
@@ -119,8 +106,7 @@ public class FilterMetadata extends CommonNodeMetadata {
             if(super.equals(o)) {
                 final FilterMetadata filterMetadata = (FilterMetadata)o;
                 return new EqualsBuilder()
-                        .append(originalSparql, filterMetadata.originalSparql)
-                        .append(filterIndexWithinSparql, filterMetadata.filterIndexWithinSparql)
+                        .append(filterSparql, filterMetadata.filterSparql)
                         .append(parentNodeId, filterMetadata.parentNodeId)
                         .append(childNodeId, filterMetadata.childNodeId)
                         .isEquals();
@@ -140,8 +126,7 @@ public class FilterMetadata extends CommonNodeMetadata {
                 .append("    Variable Order: " + super.getVariableOrder() + "\n")
                 .append("    Parent Node ID: " + parentNodeId + "\n")
                 .append("    Child Node ID: " + childNodeId + "\n")
-                .append("    Original SPARQL: " + originalSparql + "\n")
-                .append("    Filter Index Within SPARQL: " + filterIndexWithinSparql + "\n")
+                .append("    Original SPARQL: " + filterSparql + "\n")
                 .append("}")
                 .toString();
     }
@@ -164,8 +149,7 @@ public class FilterMetadata extends CommonNodeMetadata {
 
         private final String nodeId;
         private VariableOrder varOrder;
-        private String originalSparql;
-        private int filterIndexWithinSparql;
+        private String filterSparql;
         private String parentNodeId;
         private String childNodeId;
 
@@ -202,20 +186,8 @@ public class FilterMetadata extends CommonNodeMetadata {
          * @param originalSparql - The original SPARQL query the filter is derived from.
          * @return This builder so that method invocations may be chained.
          */
-        public Builder setOriginalSparql(final String originalSparql) {
-            this.originalSparql = originalSparql;
-            return this;
-        }
-
-        /**
-         * Set the index of the filter within the original SPARQL query that this node processes.
-         *
-         * @param filterIndexWithinSparql - The index of the filter within the original
-         * SPARQL query that this node processes.
-         * @return This builder so that method invocations may be chained.
-         */
-        public Builder setFilterIndexWithinSparql(final int filterIndexWithinSparql) {
-            this.filterIndexWithinSparql = filterIndexWithinSparql;
+        public Builder setFilterSparql(final String originalSparql) {
+            this.filterSparql = originalSparql;
             return this;
         }
 
@@ -248,8 +220,7 @@ public class FilterMetadata extends CommonNodeMetadata {
             return new FilterMetadata(
                     nodeId,
                     varOrder,
-                    originalSparql,
-                    filterIndexWithinSparql,
+                    filterSparql,
                     parentNodeId,
                     childNodeId);
         }


[9/9] incubator-rya git commit: RYA-280-Periodic Query Service. Closes #177.

Posted by ca...@apache.org.
RYA-280-Periodic Query Service. Closes #177.


Project: http://git-wip-us.apache.org/repos/asf/incubator-rya/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-rya/commit/2ca85427
Tree: http://git-wip-us.apache.org/repos/asf/incubator-rya/tree/2ca85427
Diff: http://git-wip-us.apache.org/repos/asf/incubator-rya/diff/2ca85427

Branch: refs/heads/master
Commit: 2ca854271c2eb928e9ccd10d40599c8c535952fa
Parents: ab8035a
Author: Caleb Meier <ca...@parsons.com>
Authored: Fri Apr 14 19:20:25 2017 -0700
Committer: Caleb Meier <ca...@parsons.com>
Committed: Wed Aug 2 14:00:45 2017 -0700

----------------------------------------------------------------------
 common/rya.api/pom.xml                          |   6 +-
 .../client/accumulo/AccumuloCreatePCJIT.java    |   1 +
 extras/pom.xml                                  |   1 +
 .../pcj/storage/PeriodicQueryResultStorage.java | 115 +++++
 .../storage/PeriodicQueryStorageException.java  |  49 ++
 .../storage/PeriodicQueryStorageMetadata.java   |  99 ++++
 .../AccumuloPeriodicQueryResultStorage.java     | 270 ++++++++++
 .../AccumuloValueBindingSetIterator.java        |  73 +++
 .../pcj/storage/accumulo/PcjTables.java         |   5 +-
 .../accumulo/PeriodicQueryTableNameFactory.java |  55 ++
 .../accumulo/VisibilityBindingSetSerDe.java     |  76 +++
 .../accumulo/VisibilityBindingSetSerDeTest.java |  52 ++
 .../accumulo/accumulo/AccumuloPcjStorageIT.java | 276 ----------
 .../integration/AccumuloPcjStorageIT.java       | 276 ++++++++++
 .../AccumuloPeriodicQueryResultStorageIT.java   | 271 ++++++++++
 extras/rya.pcj.fluo/README.md                   |  25 +-
 .../rya/indexing/pcj/fluo/api/CreatePcj.java    | 184 +++++--
 .../rya/indexing/pcj/fluo/api/DeletePcj.java    |  53 +-
 extras/rya.pcj.fluo/pcj.fluo.app/pom.xml        | 101 ++--
 .../rya/indexing/pcj/fluo/app/FilterFinder.java |  84 ---
 .../pcj/fluo/app/FilterResultUpdater.java       |  14 +-
 .../fluo/app/IncrementalUpdateConstants.java    |   5 +
 .../pcj/fluo/app/JoinResultUpdater.java         |   1 +
 .../rya/indexing/pcj/fluo/app/NodeType.java     |   4 +
 .../pcj/fluo/app/PeriodicQueryUpdater.java      | 138 +++++
 .../pcj/fluo/app/QueryResultUpdater.java        |   3 +-
 .../pcj/fluo/app/VisibilityBindingSetSerDe.java |  77 ---
 .../batch/AbstractBatchBindingSetUpdater.java   |  55 ++
 .../app/batch/AbstractSpanBatchInformation.java | 101 ++++
 .../fluo/app/batch/BasicBatchInformation.java   |  81 +++
 .../fluo/app/batch/BatchBindingSetUpdater.java  |  43 ++
 .../pcj/fluo/app/batch/BatchInformation.java    |  57 +++
 .../pcj/fluo/app/batch/BatchInformationDAO.java |  59 +++
 .../pcj/fluo/app/batch/BatchObserver.java       |  63 +++
 .../pcj/fluo/app/batch/BatchRowKeyUtil.java     |  68 +++
 .../app/batch/JoinBatchBindingSetUpdater.java   | 184 +++++++
 .../fluo/app/batch/JoinBatchInformation.java    | 255 ++++++++++
 .../app/batch/SpanBatchBindingSetUpdater.java   | 128 +++++
 .../app/batch/SpanBatchDeleteInformation.java   |  95 ++++
 .../serializer/BatchInformationSerializer.java  |  58 +++
 .../serializer/BatchInformationTypeAdapter.java |  73 +++
 .../BatchInformationTypeAdapterFactory.java     |  65 +++
 .../JoinBatchInformationTypeAdapter.java        |  94 ++++
 .../SpanBatchInformationTypeAdapter.java        |  69 +++
 .../export/kafka/KafkaBindingSetExporter.java   |   8 +-
 .../app/export/rya/RyaBindingSetExporter.java   |  15 +-
 .../rya/RyaBindingSetExporterFactory.java       |   7 +-
 .../fluo/app/observers/BindingSetUpdater.java   |  16 +-
 .../pcj/fluo/app/observers/FilterObserver.java  |   2 +-
 .../pcj/fluo/app/observers/JoinObserver.java    |   2 +-
 .../app/observers/PeriodicQueryObserver.java    |  72 +++
 .../fluo/app/observers/QueryResultObserver.java |   2 +-
 .../app/observers/StatementPatternObserver.java |   2 +-
 .../pcj/fluo/app/observers/TripleObserver.java  |   2 +-
 .../pcj/fluo/app/query/AggregationMetadata.java |  18 +
 .../pcj/fluo/app/query/FilterMetadata.java      |  67 +--
 .../indexing/pcj/fluo/app/query/FluoQuery.java  |  81 ++-
 .../pcj/fluo/app/query/FluoQueryColumns.java    |  66 ++-
 .../fluo/app/query/FluoQueryMetadataDAO.java    | 119 ++++-
 .../fluo/app/query/PeriodicQueryMetadata.java   | 287 +++++++++++
 .../pcj/fluo/app/query/PeriodicQueryNode.java   | 154 ++++++
 .../pcj/fluo/app/query/QueryMetadata.java       |  15 +
 .../fluo/app/query/SparqlFluoQueryBuilder.java  |  89 +++-
 .../pcj/fluo/app/util/FilterSerializer.java     | 127 +++++
 .../pcj/fluo/app/util/FluoClientFactory.java    |  56 ++
 .../pcj/fluo/app/util/PeriodicQueryUtil.java    | 381 ++++++++++++++
 .../indexing/pcj/fluo/app/FilterFinderTest.java |  84 ---
 .../fluo/app/VisibilityBindingSetSerDeTest.java |  51 --
 .../BatchInformationSerializerTest.java         |  73 +++
 .../fluo/app/query/PeriodicQueryUtilTest.java   | 229 +++++++++
 .../fluo/client/util/QueryReportRenderer.java   |   3 +-
 .../rya.pcj.fluo/pcj.fluo.integration/pom.xml   |   5 +
 .../rya/indexing/pcj/fluo/FluoITBase.java       | 282 ----------
 .../indexing/pcj/fluo/KafkaExportITBase.java    | 370 --------------
 .../rya/indexing/pcj/fluo/RyaExportITBase.java  |  85 ----
 .../pcj/fluo/api/CountStatementsIT.java         |   2 +-
 .../indexing/pcj/fluo/api/GetPcjMetadataIT.java |   2 +-
 .../indexing/pcj/fluo/api/GetQueryReportIT.java |   2 +-
 .../indexing/pcj/fluo/api/ListQueryIdsIT.java   |   2 +-
 .../fluo/app/query/FluoQueryMetadataDAOIT.java  |  46 +-
 .../pcj/fluo/integration/BatchDeleteIT.java     | 316 ++++++++++++
 .../pcj/fluo/integration/CreateDeleteIT.java    |   2 +-
 .../indexing/pcj/fluo/integration/InputIT.java  |   2 +-
 .../pcj/fluo/integration/KafkaExportIT.java     |   2 +-
 .../integration/KafkaRyaSubGraphExportIT.java   |   2 +-
 .../indexing/pcj/fluo/integration/QueryIT.java  | 359 ++++++++++++-
 .../pcj/fluo/integration/RyaExportIT.java       |   2 +-
 .../RyaInputIncrementalUpdateIT.java            |   2 +-
 .../pcj/fluo/integration/StreamingTestIT.java   |   2 +-
 .../HistoricStreamingVisibilityIT.java          |   2 +-
 .../pcj/fluo/visibility/PcjVisibilityIT.java    |   3 +-
 extras/rya.pcj.fluo/pcj.fluo.test.base/pom.xml  | 108 ++++
 .../org/apache/rya/kafka/base/KafkaITBase.java  |  78 +++
 .../rya/pcj/fluo/test/base/FluoITBase.java      | 300 +++++++++++
 .../pcj/fluo/test/base/KafkaExportITBase.java   | 370 ++++++++++++++
 .../rya/pcj/fluo/test/base/RyaExportITBase.java |  80 +++
 extras/rya.pcj.fluo/pom.xml                     |   1 +
 .../periodic.service.integration.tests/pom.xml  |  77 +++
 .../PeriodicNotificationApplicationIT.java      | 509 +++++++++++++++++++
 .../PeriodicNotificationProviderIT.java         |  68 +++
 .../PeriodicNotificationExporterIT.java         | 130 +++++
 .../PeriodicNotificationProcessorIT.java        | 121 +++++
 .../pruner/PeriodicNotificationBinPrunerIT.java | 286 +++++++++++
 .../PeriodicCommandNotificationConsumerIT.java  | 120 +++++
 .../src/test/resources/notification.properties  |  35 ++
 .../periodic.service.notification/pom.xml       | 107 ++++
 .../periodic/notification/api/BinPruner.java    |  40 ++
 .../notification/api/BindingSetExporter.java    |  38 ++
 .../notification/api/CreatePeriodicQuery.java   | 113 ++++
 .../periodic/notification/api/LifeCycle.java    |  45 ++
 .../rya/periodic/notification/api/NodeBin.java  |  77 +++
 .../periodic/notification/api/Notification.java |  34 ++
 .../api/NotificationCoordinatorExecutor.java    |  41 ++
 .../notification/api/NotificationProcessor.java |  41 ++
 .../api/PeriodicNotificationClient.java         |  64 +++
 .../PeriodicApplicationException.java           |  47 ++
 .../PeriodicNotificationApplication.java        | 207 ++++++++
 ...dicNotificationApplicationConfiguration.java | 254 +++++++++
 .../PeriodicNotificationApplicationFactory.java | 140 +++++
 ...PeriodicNotificationCoordinatorExecutor.java | 159 ++++++
 .../notification/exporter/BindingSetRecord.java |  80 +++
 .../exporter/KafkaExporterExecutor.java         | 109 ++++
 .../KafkaPeriodicBindingSetExporter.java        |  98 ++++
 .../notification/BasicNotification.java         |  76 +++
 .../notification/CommandNotification.java       |  99 ++++
 .../notification/PeriodicNotification.java      | 178 +++++++
 .../notification/TimestampedNotification.java   |  69 +++
 .../NotificationProcessorExecutor.java          | 114 +++++
 .../TimestampedNotificationProcessor.java       | 203 ++++++++
 .../notification/pruner/AccumuloBinPruner.java  |  66 +++
 .../notification/pruner/FluoBinPruner.java      |  76 +++
 .../pruner/PeriodicQueryPruner.java             | 108 ++++
 .../pruner/PeriodicQueryPrunerExecutor.java     | 104 ++++
 .../recovery/PeriodicNotificationProvider.java  | 138 +++++
 .../kafka/KafkaNotificationProvider.java        | 123 +++++
 .../KafkaNotificationRegistrationClient.java    |  80 +++
 .../kafka/PeriodicNotificationConsumer.java     |  88 ++++
 .../BasicNotificationTypeAdapter.java           |  55 ++
 .../serialization/BindingSetSerDe.java          | 105 ++++
 .../CommandNotificationSerializer.java          |  76 +++
 .../CommandNotificationTypeAdapter.java         |  89 ++++
 .../PeriodicNotificationTypeAdapter.java        |  73 +++
 .../CommandNotificationSerializerTest.java      |  60 +++
 extras/rya.periodic.service/pom.xml             |  39 ++
 144 files changed, 11783 insertions(+), 1593 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/common/rya.api/pom.xml
----------------------------------------------------------------------
diff --git a/common/rya.api/pom.xml b/common/rya.api/pom.xml
index 94f191d..3c80a13 100644
--- a/common/rya.api/pom.xml
+++ b/common/rya.api/pom.xml
@@ -71,9 +71,9 @@ under the License.
             <artifactId>jcip-annotations</artifactId>
         </dependency>
         <dependency>
-			<groupId>com.esotericsoftware.kryo</groupId>
-			<artifactId>kryo</artifactId>
-			<version>2.24.0</version>
+            <groupId>com.esotericsoftware.kryo</groupId>
+            <artifactId>kryo</artifactId>
+            <version>2.24.0</version>
 		</dependency>
         <dependency>
             <groupId>org.apache.hadoop</groupId>

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java
----------------------------------------------------------------------
diff --git a/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java
index cb4b29a..9bbf01f 100644
--- a/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java
+++ b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloCreatePCJIT.java
@@ -80,6 +80,7 @@ public class AccumuloCreatePCJIT extends FluoITBase {
             assertEquals(sparql, pcjMetadata.getSparql());
             assertEquals(0L, pcjMetadata.getCardinality());
 
+
             // Verify a Query ID was added for the query within the Fluo app.
             final List<String> fluoQueryIds = new ListQueryIds().listQueryIds(fluoClient);
             assertEquals(1, fluoQueryIds.size());

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/pom.xml
----------------------------------------------------------------------
diff --git a/extras/pom.xml b/extras/pom.xml
index 6acb51f..a2c8d58 100644
--- a/extras/pom.xml
+++ b/extras/pom.xml
@@ -33,6 +33,7 @@ under the License.
     <modules>
         <module>rya.prospector</module>
         <module>rya.manual</module>
+        <module>rya.periodic.service</module>
         <module>rya.console</module>
         <module>indexing</module>
         <module>rya.indexing.pcj</module>

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryResultStorage.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryResultStorage.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryResultStorage.java
new file mode 100644
index 0000000..697b350
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryResultStorage.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Optional;
+
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.openrdf.query.BindingSet;
+
+/**
+ * Interface for storing and retrieving Periodic Query Results.
+ *
+ */
+public interface PeriodicQueryResultStorage {
+    
+    /**
+     * Binding name for the periodic bin id
+     */
+    public static String PeriodicBinId = "periodicBinId";
+
+    /**
+     * Creates a PeriodicQuery result storage layer for the given SPARQL query
+     * @param sparql - SPARQL query
+     * @return - id of the storage layer for the given SPARQL query
+     * @throws PeriodicQueryStorageException
+     */
+    public String createPeriodicQuery(String sparql) throws PeriodicQueryStorageException;
+    
+    /**
+     * Creates a PeriodicQuery result storage layer for the given SPARQL query with the given id
+     * @param queryId - id of the storage layer for the given SPARQL query
+     * @param sparql - SPARQL query whose periodic results will be stored
+     * @return - id of the storage layer 
+     * @throws PeriodicQueryStorageException
+     */
+    public String createPeriodicQuery(String queryId, String sparql) throws PeriodicQueryStorageException;
+    
+    /**
+     * Creates a PeriodicQuery result storage layer for the given SPARQL query with the given id
+     * whose results are written in the order indicated by the specified VariableOrder.
+     * @param queryId - id of the storage layer for the given SPARQL query
+     * @param sparql - SPARQL query whose periodic results will be stored
+     * @param varOrder - VariableOrder indicating the order that results will be written in
+     * @return - id of the storage layer 
+     * @throws PeriodicQueryStorageException
+     */
+    public void createPeriodicQuery(String queryId, String sparql, VariableOrder varOrder) throws PeriodicQueryStorageException;
+    
+    /**
+     * Retrieve the {@link PeriodicQueryStorageMetdata} for the give query id
+     * @param queryID - id of the query whose metadata will be returned
+     * @return PeriodicQueryStorageMetadata
+     * @throws PeriodicQueryStorageException
+     */
+    public PeriodicQueryStorageMetadata getPeriodicQueryMetadata(String queryID) throws PeriodicQueryStorageException;;
+    
+    /**
+     * Add periodic query results to the storage layer indicated by the given query id
+     * @param queryId - id indicating the storage layer that results will be added to
+     * @param results - query results to be added to storage
+     * @throws PeriodicQueryStorageException
+     */
+    public void addPeriodicQueryResults(String queryId, Collection<VisibilityBindingSet> results) throws PeriodicQueryStorageException;;
+    
+    /**
+     * Deletes periodic query results from the storage layer
+     * @param queryId - id indicating the storage layer that results will be deleted from
+     * @param binID - bin id indicating the periodic id of results to be deleted
+     * @throws PeriodicQueryStorageException
+     */
+    public void deletePeriodicQueryResults(String queryId, long binID) throws PeriodicQueryStorageException;;
+    
+    /**
+     * Deletes all results for the storage layer indicated by the given query id 
+     * @param queryID - id indicating the storage layer whose results will be deleted
+     * @throws PeriodicQueryStorageException
+     */
+    public void deletePeriodicQuery(String queryID) throws PeriodicQueryStorageException;;
+    
+    /**
+     * List results in the given storage layer indicated by the query id
+     * @param queryId - id indicating the storage layer whose results will be listed
+     * @param binID - Optional id to indicate that only results with specific periodic id be listed
+     * @return
+     * @throws PeriodicQueryStorageException
+     */
+    public CloseableIterator<BindingSet> listResults(String queryId, Optional<Long> binID) throws PeriodicQueryStorageException;;
+    
+    /**
+     * List all storage tables containing periodic results.
+     * @return List of Strings with names of all tables containing periodic results
+     */
+    public List<String> listPeriodicTables();
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryStorageException.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryStorageException.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryStorageException.java
new file mode 100644
index 0000000..f9e6969
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryStorageException.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage;
+
+/**
+ * This Exception is thrown by any implementation of {@link PeriodicQueryResultStorage}
+ * when any of its methods fail. 
+ *
+ */
+public class PeriodicQueryStorageException extends Exception {
+
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Constructs an instance of {@link PeriodicQueryStorageException}.
+     *
+     * @param message - Describes why the exception is being thrown.
+     */
+    public PeriodicQueryStorageException(final String message) {
+        super(message);
+    }
+
+    /**
+     * Constructs an instance of {@link PeriodicQueryStorageException}.
+     *
+     * @param message - Describes why the exception is being thrown.
+     * @param cause - The exception that caused this one to be thrown.
+     */
+    public PeriodicQueryStorageException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryStorageMetadata.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryStorageMetadata.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryStorageMetadata.java
new file mode 100644
index 0000000..9ce3522
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/PeriodicQueryStorageMetadata.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage;
+
+import java.util.Objects;
+
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+
+import com.google.common.base.Preconditions;
+
+/**
+ *  Metadata for a given PeriodicQueryStorage table. 
+ */
+public class PeriodicQueryStorageMetadata {
+
+    private String sparql;
+    private VariableOrder varOrder;
+
+    /**
+     * Create a PeriodicQueryStorageMetadata object
+     * @param sparql - SPARQL query whose results are stored in table
+     * @param varOrder - order that BindingSet values are written in in table
+     */
+    public PeriodicQueryStorageMetadata(String sparql, VariableOrder varOrder) {
+        this.sparql = Preconditions.checkNotNull(sparql);
+        this.varOrder = Preconditions.checkNotNull(varOrder);
+    }
+    
+    /**
+     * Copy constructor.
+     * @param metadata - PeriodicQueryStorageMetadata object whose data is copied
+     */
+    public PeriodicQueryStorageMetadata(PcjMetadata metadata) {
+        this(metadata.getSparql(), metadata.getVarOrders().iterator().next());
+    }
+    
+
+    /**
+     * @return SPARQL query whose results are stored in the table
+     */
+    public String getSparql() {
+        return sparql;
+    }
+    
+    /**
+     * @return VariableOrder indicating the order that BindingSet Values are written in in table
+     */
+    public VariableOrder getVariableOrder() {
+        return varOrder;
+    }
+    
+    @Override
+    public int hashCode() {
+        return Objects.hash(sparql, varOrder);
+    }
+   
+    @Override
+    public boolean equals(final Object o) {
+        if (o == this) {
+            return true;
+        }
+
+        if (o instanceof PeriodicQueryStorageMetadata) {
+                PeriodicQueryStorageMetadata metadata = (PeriodicQueryStorageMetadata) o;
+                return new EqualsBuilder().append(sparql, metadata.sparql).append(varOrder, metadata.varOrder).isEquals();
+        }
+
+        return false;
+    }
+    
+    @Override
+    public String toString() {
+        return new StringBuilder()
+                .append("PeriodicQueryStorageMetadata {\n")
+                .append("    SPARQL: " + sparql + "\n")
+                .append("    Variable Order: " + varOrder + "\n")
+                .append("}")
+                .toString();
+    }
+    
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPeriodicQueryResultStorage.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPeriodicQueryResultStorage.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPeriodicQueryResultStorage.java
new file mode 100644
index 0000000..d7a50a7
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloPeriodicQueryResultStorage.java
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage.accumulo;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchDeleter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.hadoop.io.Text;
+import org.apache.rya.indexing.pcj.storage.PCJIdFactory;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryStorageException;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryStorageMetadata;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
+import org.apache.rya.indexing.pcj.storage.accumulo.BindingSetConverter.BindingSetConversionException;
+import org.openrdf.model.impl.LiteralImpl;
+import org.openrdf.model.vocabulary.XMLSchema;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.MalformedQueryException;
+import org.openrdf.query.algebra.AggregateOperatorBase;
+import org.openrdf.query.algebra.ExtensionElem;
+import org.openrdf.query.algebra.TupleExpr;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+import org.openrdf.query.algebra.helpers.QueryModelVisitorBase;
+import org.openrdf.query.parser.sparql.SPARQLParser;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * This class is the Accumulo implementation of {@link PeriodicQueryResultStorage} for
+ * creating, deleting, and interacting with tables where PeriodicQuery results are stored.
+ */
+public class AccumuloPeriodicQueryResultStorage implements PeriodicQueryResultStorage {
+
+    private String ryaInstance;
+    private Connector accumuloConn;
+    private Authorizations auths;
+    private final PCJIdFactory pcjIdFactory = new PCJIdFactory();
+    private final AccumuloPcjSerializer converter = new AccumuloPcjSerializer();
+    private static final PcjTables pcjTables = new PcjTables();
+    private static final PeriodicQueryTableNameFactory tableNameFactory = new PeriodicQueryTableNameFactory();
+
+    /**
+     * Creates a AccumuloPeriodicQueryResultStorage Object.
+     * @param accumuloConn - Accumulo Connector for connecting to an Accumulo instance
+     * @param ryaInstance - Rya Instance name for connecting to Rya
+     */
+    public AccumuloPeriodicQueryResultStorage(Connector accumuloConn, String ryaInstance) {
+        this.accumuloConn = Preconditions.checkNotNull(accumuloConn);
+        this.ryaInstance = Preconditions.checkNotNull(ryaInstance);
+        String user = accumuloConn.whoami();
+        try {
+            this.auths = accumuloConn.securityOperations().getUserAuthorizations(user);
+        } catch (AccumuloException | AccumuloSecurityException e) {
+            throw new RuntimeException("Unable access user: " + user + "authorizations.");
+        }
+    }
+
+    @Override
+    public String createPeriodicQuery(String sparql) throws PeriodicQueryStorageException {
+        Preconditions.checkNotNull(sparql);
+        String queryId = pcjIdFactory.nextId();
+        return createPeriodicQuery(queryId, sparql);
+    }
+    
+    @Override
+    public String createPeriodicQuery(String queryId, String sparql) throws PeriodicQueryStorageException {
+        Set<String> bindingNames;
+        try {
+            bindingNames = new AggregateVariableRemover().getNonAggregationVariables(sparql);
+        } catch (MalformedQueryException e) {
+            throw new PeriodicQueryStorageException(e.getMessage());
+        }
+        List<String> varOrderList = new ArrayList<>();
+        varOrderList.add(PeriodicQueryResultStorage.PeriodicBinId);
+        varOrderList.addAll(bindingNames);
+        createPeriodicQuery(queryId, sparql, new VariableOrder(varOrderList));
+        return queryId;
+    }
+
+    @Override
+    public void createPeriodicQuery(String queryId, String sparql, VariableOrder order) throws PeriodicQueryStorageException {
+        Preconditions.checkNotNull(sparql);
+        Preconditions.checkNotNull(queryId);
+        Preconditions.checkNotNull(order);
+        Preconditions.checkArgument(PeriodicQueryResultStorage.PeriodicBinId.equals(order.getVariableOrders().get(0)),
+                "periodicBinId binding name must occur first in VariableOrder.");
+        String tableName = tableNameFactory.makeTableName(ryaInstance, queryId);
+        Set<VariableOrder> varOrders = new HashSet<>();
+        varOrders.add(order);
+        try {
+            pcjTables.createPcjTable(accumuloConn, tableName, varOrders, sparql);
+        } catch (Exception e) {
+            throw new PeriodicQueryStorageException(e.getMessage());
+        }
+    }
+
+    @Override
+    public PeriodicQueryStorageMetadata getPeriodicQueryMetadata(String queryId) throws PeriodicQueryStorageException {
+        try {
+            return new PeriodicQueryStorageMetadata(
+                    pcjTables.getPcjMetadata(accumuloConn, tableNameFactory.makeTableName(ryaInstance, queryId)));
+        } catch (Exception e) {
+            throw new PeriodicQueryStorageException(e.getMessage());
+        }
+    }
+
+    @Override
+    public void addPeriodicQueryResults(String queryId, Collection<VisibilityBindingSet> results) throws PeriodicQueryStorageException {
+        results.forEach(x -> Preconditions.checkArgument(x.hasBinding(PeriodicQueryResultStorage.PeriodicBinId),
+                "BindingSet must contain periodBinId binding."));
+        try {
+            pcjTables.addResults(accumuloConn, tableNameFactory.makeTableName(ryaInstance, queryId), results);
+        } catch (Exception e) {
+            throw new PeriodicQueryStorageException(e.getMessage());
+        }
+    }
+
+    @Override
+    public void deletePeriodicQueryResults(String queryId, long binId) throws PeriodicQueryStorageException {
+        String tableName = tableNameFactory.makeTableName(ryaInstance, queryId);
+        try {
+            Text prefix = getRowPrefix(binId);
+            BatchDeleter deleter = accumuloConn.createBatchDeleter(tableName, auths, 1, new BatchWriterConfig());
+            deleter.setRanges(Collections.singleton(Range.prefix(prefix)));
+            deleter.delete();
+        } catch (Exception e) {
+            throw new PeriodicQueryStorageException(e.getMessage());
+        }
+    }
+
+    public void deletePeriodicQueryResults(String queryId) throws PeriodicQueryStorageException {
+        try {
+            pcjTables.purgePcjTable(accumuloConn, tableNameFactory.makeTableName(ryaInstance, queryId));
+        } catch (Exception e) {
+            throw new PeriodicQueryStorageException(e.getMessage());
+        }
+    }
+
+    @Override
+    public void deletePeriodicQuery(String queryId) throws PeriodicQueryStorageException {
+        try {
+            pcjTables.dropPcjTable(accumuloConn, tableNameFactory.makeTableName(ryaInstance, queryId));
+        } catch (Exception e) {
+            throw new PeriodicQueryStorageException(e.getMessage());
+        }
+    }
+
+    @Override
+    public CloseableIterator<BindingSet> listResults(String queryId, Optional<Long> binId)
+            throws PeriodicQueryStorageException {
+        requireNonNull(queryId);
+
+        String tableName = tableNameFactory.makeTableName(ryaInstance, queryId);
+        // Fetch the Variable Orders for the binding sets and choose one of
+        // them. It
+        // doesn't matter which one we choose because they all result in the
+        // same output.
+        final PeriodicQueryStorageMetadata metadata = getPeriodicQueryMetadata(queryId);
+        final VariableOrder varOrder = metadata.getVariableOrder();
+
+        try {
+            // Fetch only the Binding Sets whose Variable Order matches the
+            // selected one.
+            final Scanner scanner = accumuloConn.createScanner(tableName, auths);
+            scanner.fetchColumnFamily(new Text(varOrder.toString()));
+            if (binId.isPresent()) {
+                scanner.setRange(Range.prefix(getRowPrefix(binId.get())));
+            }
+            return new AccumuloValueBindingSetIterator(scanner);
+
+        } catch (Exception e) {
+            throw new PeriodicQueryStorageException(String.format("PCJ Table does not exist for name '%s'.", tableName), e);
+        }
+    }
+    
+    private Text getRowPrefix(long binId) throws BindingSetConversionException {
+        QueryBindingSet bs = new QueryBindingSet();
+        bs.addBinding(PeriodicQueryResultStorage.PeriodicBinId, new LiteralImpl(Long.toString(binId), XMLSchema.LONG));
+        
+        return new Text(converter.convert(bs, new VariableOrder(PeriodicQueryResultStorage.PeriodicBinId)));
+    }
+
+    @Override
+    public List<String> listPeriodicTables() {
+
+        final List<String> periodicTables = new ArrayList<>();
+        final String periodicPrefix = ryaInstance + PeriodicQueryTableNameFactory.PeriodicTableSuffix;
+        boolean foundInstance = false;
+
+        for (final String tableName : accumuloConn.tableOperations().list()) {
+            if (tableName.startsWith(ryaInstance)) {
+                // This table is part of the target Rya instance.
+                foundInstance = true;
+
+                if (tableName.startsWith(periodicPrefix)) {
+                    periodicTables.add(tableName);
+                }
+            } else if (foundInstance) {
+                // We have encountered the first table name that does not start
+                // with the rya instance name after those that do. Because the
+                // list is sorted, there can't be any more pcj tables for the
+                // target instance in the list.
+                break;
+            }
+        }
+        return periodicTables;
+    }
+    
+    /**
+     * Class for removing any aggregate variables from the ProjectionElementList
+     * of the parsed SPARQL queries. This ensures that only non-aggregation
+     * values are contained in the Accumulo row.  The non-aggregation variables
+     * are not updated while the aggregation variables are, so they are included in
+     * the serialized BindingSet in the Accumulo Value field, which is overwritten
+     * if an entry with the same Key and different Value (updated aggregation) is 
+     * written to the table.
+     *
+     */
+    static class AggregateVariableRemover extends QueryModelVisitorBase<RuntimeException> {
+        
+        private Set<String> bindingNames;
+        
+        public Set<String> getNonAggregationVariables(String sparql) throws MalformedQueryException {
+            TupleExpr te = new SPARQLParser().parseQuery(sparql, null).getTupleExpr();
+            bindingNames = te.getBindingNames();
+            te.visit(this);
+            return bindingNames;
+        }
+        
+        @Override
+        public void meet(ExtensionElem node) {
+            if(node.getExpr() instanceof AggregateOperatorBase) {
+                bindingNames.remove(node.getName());
+            }
+        }
+        
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloValueBindingSetIterator.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloValueBindingSetIterator.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloValueBindingSetIterator.java
new file mode 100644
index 0000000..946c712
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/AccumuloValueBindingSetIterator.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage.accumulo;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
+import org.openrdf.query.BindingSet;
+
+/**
+ * Implementation of CloseableIterator for retrieving results from a {@link PeriodicQueryResultStorage}
+ * table.
+ *
+ */
+public class AccumuloValueBindingSetIterator implements CloseableIterator<BindingSet>{
+    
+    private final Scanner scanner;
+    private final Iterator<Entry<Key, Value>> iter;
+    private final VisibilityBindingSetSerDe bsSerDe = new VisibilityBindingSetSerDe();
+    
+    public AccumuloValueBindingSetIterator(Scanner scanner) {
+        this.scanner = scanner;
+        iter = scanner.iterator();
+    }
+    
+    @Override
+    public boolean hasNext() {
+        return iter.hasNext();
+    }
+    
+    @Override 
+    public BindingSet next() {
+        try {
+            return bsSerDe.deserialize(Bytes.of(iter.next().getValue().get())).set;
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+    
+    @Override
+    public void close() {
+        scanner.close();
+    }
+
+    @Override
+    public void remove() {
+        throw new UnsupportedOperationException();
+    }
+    
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java
index 5d13597..d5451ae 100644
--- a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PcjTables.java
@@ -403,6 +403,7 @@ public class PcjTables {
 
         final Set<Mutation> mutations = new HashSet<>();
         final AccumuloPcjSerializer converter = new AccumuloPcjSerializer();
+        VisibilityBindingSetSerDe bsSerDe = new VisibilityBindingSetSerDe();
 
         for(final VariableOrder varOrder : varOrders) {
             try {
@@ -412,9 +413,9 @@ public class PcjTables {
                 // Row ID = binding set values, Column Family = variable order of the binding set.
                 final Mutation addResult = new Mutation(rowKey);
                 final String visibility = result.getVisibility();
-                addResult.put(varOrder.toString(), "", new ColumnVisibility(visibility), "");
+                addResult.put(varOrder.toString(), "", new ColumnVisibility(visibility), new Value(bsSerDe.serialize(result).toArray()));
                 mutations.add(addResult);
-            } catch(final BindingSetConversionException e) {
+            } catch(Exception e) {
                 throw new PCJStorageException("Could not serialize a result.", e);
             }
         }

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PeriodicQueryTableNameFactory.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PeriodicQueryTableNameFactory.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PeriodicQueryTableNameFactory.java
new file mode 100644
index 0000000..561cad2
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/PeriodicQueryTableNameFactory.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage.accumulo;
+
+import static java.util.Objects.requireNonNull;
+
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+
+/**
+ * Class for creating the names of {@link PeriodicQueryResultStorage} tables.
+ *
+ */
+public class PeriodicQueryTableNameFactory {
+
+    public static final String PeriodicTableSuffix = "PERIODIC_QUERY_";
+    
+    /**
+     * Creates name of a table from the indicating rya instance and query id
+     * @param ryaInstance - name of rya instance table will belong to
+     * @param queryId - id of query whose results will be stored in this table
+     * @return - name of PeriodicQueryResultStorage table
+     */
+    public String makeTableName(final String ryaInstance, final String queryId) {
+        requireNonNull(ryaInstance);
+        requireNonNull(queryId);
+        return ryaInstance + PeriodicTableSuffix + queryId.toString().replaceAll("-", "");
+    }
+
+    /**
+     * Extract query id from PeriodicQueryResultStorage table name
+     * @param periodTableName - name of table
+     * @return - query id whose results are stored in table
+     */
+    public String getPeriodicQueryId(final String periodTableName) {
+        requireNonNull(periodTableName);
+        return periodTableName.split(PeriodicTableSuffix)[1];
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/VisibilityBindingSetSerDe.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/VisibilityBindingSetSerDe.java b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/VisibilityBindingSetSerDe.java
new file mode 100644
index 0000000..ae43a9a
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/main/java/org/apache/rya/indexing/pcj/storage/accumulo/VisibilityBindingSetSerDe.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage.accumulo;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+
+import org.apache.fluo.api.data.Bytes;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * Serializes and deserializes a {@link VisibilityBindingSet} to and from {@link Bytes} objects.
+ */
+@DefaultAnnotation(NonNull.class)
+public class VisibilityBindingSetSerDe {
+
+    /**
+     * Serializes a {@link VisibilityBindingSet} into a {@link Bytes} object.
+     *
+     * @param bindingSet - The binding set that will be serialized. (not null)
+     * @return The serialized object.
+     * @throws Exception A problem was encountered while serializing the object.
+     */
+    public Bytes serialize(final VisibilityBindingSet bindingSet) throws Exception {
+        requireNonNull(bindingSet);
+
+        final ByteArrayOutputStream boas = new ByteArrayOutputStream();
+        try(final ObjectOutputStream oos = new ObjectOutputStream(boas)) {
+            oos.writeObject(bindingSet);
+        }
+
+        return Bytes.of(boas.toByteArray());
+    }
+
+    /**
+     * Deserializes a {@link VisibilityBindingSet} from a {@link Bytes} object.
+     *
+     * @param bytes - The bytes that will be deserialized. (not null)
+     * @return The deserialized object.
+     * @throws Exception A problem was encountered while deserializing the object.
+     */
+    public VisibilityBindingSet deserialize(final Bytes bytes) throws Exception {
+        requireNonNull(bytes);
+
+        try(final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes.toArray()))) {
+            final Object o = ois.readObject();
+            if(o instanceof VisibilityBindingSet) {
+                return (VisibilityBindingSet) o;
+            } else {
+                throw new Exception("Deserialized Object is not a VisibilityBindingSet. Was: " + o.getClass());
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/VisibilityBindingSetSerDeTest.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/VisibilityBindingSetSerDeTest.java b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/VisibilityBindingSetSerDeTest.java
new file mode 100644
index 0000000..16f56c1
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/VisibilityBindingSetSerDeTest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.rya.indexing.pcj.storage.accumulo;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.fluo.api.data.Bytes;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSetSerDe;
+import org.junit.Test;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.query.impl.MapBindingSet;
+
+/**
+ * Tests the methods of {@link VisibilityBindingSetSerDe}.
+ */
+public class VisibilityBindingSetSerDeTest {
+
+    @Test
+    public void rountTrip() throws Exception {
+        final ValueFactory vf = new ValueFactoryImpl();
+
+        final MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("name", vf.createLiteral("Alice"));
+        bs.addBinding("age", vf.createLiteral(5));
+        final VisibilityBindingSet original = new VisibilityBindingSet(bs, "u");
+
+        final VisibilityBindingSetSerDe serde = new VisibilityBindingSetSerDe();
+        final Bytes bytes = serde.serialize(original);
+        final VisibilityBindingSet result = serde.deserialize(bytes);
+
+        assertEquals(original, result);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/accumulo/AccumuloPcjStorageIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/accumulo/AccumuloPcjStorageIT.java b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/accumulo/AccumuloPcjStorageIT.java
deleted file mode 100644
index 98ed4c7..0000000
--- a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/accumulo/AccumuloPcjStorageIT.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.rya.indexing.pcj.storage.accumulo.accumulo;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.rya.accumulo.AccumuloRyaITBase;
-import org.apache.rya.accumulo.instance.AccumuloRyaInstanceDetailsRepository;
-import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails;
-import org.apache.rya.api.instance.RyaDetailsRepository;
-import org.apache.rya.api.instance.RyaDetailsRepository.NotInitializedException;
-import org.apache.rya.api.instance.RyaDetailsRepository.RyaDetailsRepositoryException;
-import org.apache.rya.indexing.pcj.storage.PcjMetadata;
-import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
-import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
-import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
-import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
-import org.apache.rya.indexing.pcj.storage.accumulo.ShiftVarOrderFactory;
-import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
-import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
-import org.junit.Test;
-import org.openrdf.model.impl.URIImpl;
-import org.openrdf.query.BindingSet;
-import org.openrdf.query.MalformedQueryException;
-import org.openrdf.query.impl.MapBindingSet;
-
-import com.google.common.collect.ImmutableMap;
-
-/**
- * Integration tests the methods of {@link AccumuloPcjStorage}.
- * </p>
- * These tests ensures that the PCJ tables are maintained and that these operations
- * also update the Rya instance's details.
- */
-public class AccumuloPcjStorageIT extends AccumuloRyaITBase {
-
-    @Test
-    public void createPCJ() throws AccumuloException, AccumuloSecurityException, PCJStorageException, NotInitializedException, RyaDetailsRepositoryException {
-        // Setup the PCJ storage that will be tested against.
-        final Connector connector = super.getClusterInstance().getConnector();
-        final String ryaInstanceName = super.getRyaInstanceName();
-        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
-            // Create a PCJ.
-            final String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
-
-            // Ensure the Rya details have been updated to include the PCJ's ID.
-            final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(connector, ryaInstanceName);
-
-            final ImmutableMap<String, PCJDetails> detailsMap = detailsRepo.getRyaInstanceDetails()
-                    .getPCJIndexDetails()
-                    .getPCJDetails();
-
-            final PCJDetails expectedDetails = PCJDetails.builder()
-                    .setId( pcjId )
-                    .build();
-
-            assertEquals(expectedDetails, detailsMap.get(pcjId));
-        }
-    }
-
-    @Test
-    public void dropPCJ() throws AccumuloException, AccumuloSecurityException, PCJStorageException, NotInitializedException, RyaDetailsRepositoryException {
-        // Setup the PCJ storage that will be tested against.
-        final Connector connector = super.getClusterInstance().getConnector();
-        final String ryaInstanceName = super.getRyaInstanceName();
-        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
-            // Create a PCJ.
-            final String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
-
-            // Delete the PCJ that was just created.
-            pcjStorage.dropPcj(pcjId);
-
-            // Ensure the Rya details have been updated to no longer include the PCJ's ID.
-            final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(connector, ryaInstanceName);
-
-            final ImmutableMap<String, PCJDetails> detailsMap = detailsRepo.getRyaInstanceDetails()
-                    .getPCJIndexDetails()
-                    .getPCJDetails();
-
-            assertFalse( detailsMap.containsKey(pcjId) );
-        }
-    }
-
-    @Test
-    public void listPcjs() throws AccumuloException, AccumuloSecurityException, PCJStorageException {
-        // Setup the PCJ storage that will be tested against.
-        final Connector connector = super.getClusterInstance().getConnector();
-        final String ryaInstanceName = super.getRyaInstanceName();
-        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
-            // Create a few PCJs and hold onto their IDs.
-            final List<String> expectedIds = new ArrayList<>();
-
-            String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
-            expectedIds.add( pcjId );
-
-            pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
-            expectedIds.add( pcjId );
-
-            pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
-            expectedIds.add( pcjId );
-
-            // Fetch the PCJ names
-            final List<String> pcjIds = pcjStorage.listPcjs();
-
-            // Ensure the expected IDs match the fetched IDs.
-            Collections.sort(expectedIds);
-            Collections.sort(pcjIds);
-            assertEquals(expectedIds, pcjIds);
-        }
-    }
-
-    @Test
-    public void getPcjMetadata() throws AccumuloException, AccumuloSecurityException, PCJStorageException, MalformedQueryException {
-        // Setup the PCJ storage that will be tested against.
-        final Connector connector = super.getClusterInstance().getConnector();
-        final String ryaInstanceName = super.getRyaInstanceName();
-        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
-            // Create a PCJ.
-            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
-            final String pcjId = pcjStorage.createPcj(sparql);
-
-            // Fetch the PCJ's metadata.
-            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
-
-            // Ensure it has the expected values.
-            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
-            final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 0L, varOrders);
-            assertEquals(expectedMetadata, metadata);
-        }
-    }
-
-    @Test
-    public void addResults() throws AccumuloException, AccumuloSecurityException, PCJStorageException, MalformedQueryException {
-        // Setup the PCJ storage that will be tested against.
-        final Connector connector = super.getClusterInstance().getConnector();
-        final String ryaInstanceName = super.getRyaInstanceName();
-        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
-            // Create a PCJ.
-            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
-            final String pcjId = pcjStorage.createPcj(sparql);
-
-            // Add some binding sets to it.
-            final Set<VisibilityBindingSet> results = new HashSet<>();
-
-            final MapBindingSet aliceBS = new MapBindingSet();
-            aliceBS.addBinding("a", new URIImpl("http://Alice"));
-            aliceBS.addBinding("b", new URIImpl("http://Person"));
-            results.add( new VisibilityBindingSet(aliceBS, "") );
-
-            final MapBindingSet charlieBS = new MapBindingSet();
-            charlieBS.addBinding("a", new URIImpl("http://Charlie"));
-            charlieBS.addBinding("b", new URIImpl("http://Comedian"));
-            results.add( new VisibilityBindingSet(charlieBS, "") );
-
-            pcjStorage.addResults(pcjId, results);
-
-            // Make sure the PCJ metadata was updated.
-            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
-
-            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
-            final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 2L, varOrders);
-            assertEquals(expectedMetadata, metadata);
-        }
-    }
-
-    @Test
-    public void listResults() throws Exception {
-        // Setup the PCJ storage that will be tested against.
-        final Connector connector = super.getClusterInstance().getConnector();
-        final String ryaInstanceName = super.getRyaInstanceName();
-        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
-            // Create a PCJ.
-            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
-            final String pcjId = pcjStorage.createPcj(sparql);
-
-            // Add some binding sets to it.
-            final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
-
-            final MapBindingSet aliceBS = new MapBindingSet();
-            aliceBS.addBinding("a", new URIImpl("http://Alice"));
-            aliceBS.addBinding("b", new URIImpl("http://Person"));
-            expectedResults.add( new VisibilityBindingSet(aliceBS, "") );
-
-            final MapBindingSet charlieBS = new MapBindingSet();
-            charlieBS.addBinding("a", new URIImpl("http://Charlie"));
-            charlieBS.addBinding("b", new URIImpl("http://Comedian"));
-            expectedResults.add( new VisibilityBindingSet(charlieBS, "") );
-
-            pcjStorage.addResults(pcjId, expectedResults);
-
-            // List the results that were stored.
-            final Set<BindingSet> results = new HashSet<>();
-            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
-                while(resultsIt.hasNext()) {
-                    results.add( resultsIt.next() );
-                }
-            }
-
-            assertEquals(expectedResults, results);
-        }
-    }
-
-    @Test
-    public void purge() throws Exception {
-        // Setup the PCJ storage that will be tested against.
-        final Connector connector = super.getClusterInstance().getConnector();
-        final String ryaInstanceName = super.getRyaInstanceName();
-        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
-            // Create a PCJ.
-            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
-            final String pcjId = pcjStorage.createPcj(sparql);
-
-            // Add some binding sets to it.
-            final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
-
-            final MapBindingSet aliceBS = new MapBindingSet();
-            aliceBS.addBinding("a", new URIImpl("http://Alice"));
-            aliceBS.addBinding("b", new URIImpl("http://Person"));
-            expectedResults.add( new VisibilityBindingSet(aliceBS, "") );
-
-            final MapBindingSet charlieBS = new MapBindingSet();
-            charlieBS.addBinding("a", new URIImpl("http://Charlie"));
-            charlieBS.addBinding("b", new URIImpl("http://Comedian"));
-            expectedResults.add( new VisibilityBindingSet(charlieBS, "") );
-
-            pcjStorage.addResults(pcjId, expectedResults);
-
-            // Purge the PCJ.
-            pcjStorage.purge(pcjId);
-
-            // List the results that were stored.
-            final Set<BindingSet> results = new HashSet<>();
-            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
-                while(resultsIt.hasNext()) {
-                    results.add( resultsIt.next() );
-                }
-            }
-
-            assertTrue( results.isEmpty() );
-
-            // Make sure the PCJ metadata was updated.
-            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
-
-            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
-            final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 0L, varOrders);
-            assertEquals(expectedMetadata, metadata);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/integration/AccumuloPcjStorageIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/integration/AccumuloPcjStorageIT.java b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/integration/AccumuloPcjStorageIT.java
new file mode 100644
index 0000000..2964d91
--- /dev/null
+++ b/extras/rya.indexing.pcj/src/test/java/org/apache/rya/indexing/pcj/storage/accumulo/integration/AccumuloPcjStorageIT.java
@@ -0,0 +1,276 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.indexing.pcj.storage.accumulo.integration;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.rya.accumulo.AccumuloRyaITBase;
+import org.apache.rya.accumulo.instance.AccumuloRyaInstanceDetailsRepository;
+import org.apache.rya.api.instance.RyaDetails.PCJIndexDetails.PCJDetails;
+import org.apache.rya.api.instance.RyaDetailsRepository;
+import org.apache.rya.api.instance.RyaDetailsRepository.NotInitializedException;
+import org.apache.rya.api.instance.RyaDetailsRepository.RyaDetailsRepositoryException;
+import org.apache.rya.indexing.pcj.storage.PcjMetadata;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.ShiftVarOrderFactory;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.junit.Test;
+import org.openrdf.model.impl.URIImpl;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.MalformedQueryException;
+import org.openrdf.query.impl.MapBindingSet;
+
+import com.google.common.collect.ImmutableMap;
+
+/**
+ * Integration tests the methods of {@link AccumuloPcjStorage}.
+ * </p>
+ * These tests ensures that the PCJ tables are maintained and that these operations
+ * also update the Rya instance's details.
+ */
+public class AccumuloPcjStorageIT extends AccumuloRyaITBase {
+
+    @Test
+    public void createPCJ() throws AccumuloException, AccumuloSecurityException, PCJStorageException, NotInitializedException, RyaDetailsRepositoryException {
+        // Setup the PCJ storage that will be tested against.
+        final Connector connector = super.getClusterInstance().getConnector();
+        final String ryaInstanceName = super.getRyaInstanceName();
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+
+            // Ensure the Rya details have been updated to include the PCJ's ID.
+            final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(connector, ryaInstanceName);
+
+            final ImmutableMap<String, PCJDetails> detailsMap = detailsRepo.getRyaInstanceDetails()
+                    .getPCJIndexDetails()
+                    .getPCJDetails();
+
+            final PCJDetails expectedDetails = PCJDetails.builder()
+                    .setId( pcjId )
+                    .build();
+
+            assertEquals(expectedDetails, detailsMap.get(pcjId));
+        }
+    }
+
+    @Test
+    public void dropPCJ() throws AccumuloException, AccumuloSecurityException, PCJStorageException, NotInitializedException, RyaDetailsRepositoryException {
+        // Setup the PCJ storage that will be tested against.
+        final Connector connector = super.getClusterInstance().getConnector();
+        final String ryaInstanceName = super.getRyaInstanceName();
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+
+            // Delete the PCJ that was just created.
+            pcjStorage.dropPcj(pcjId);
+
+            // Ensure the Rya details have been updated to no longer include the PCJ's ID.
+            final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(connector, ryaInstanceName);
+
+            final ImmutableMap<String, PCJDetails> detailsMap = detailsRepo.getRyaInstanceDetails()
+                    .getPCJIndexDetails()
+                    .getPCJDetails();
+
+            assertFalse( detailsMap.containsKey(pcjId) );
+        }
+    }
+
+    @Test
+    public void listPcjs() throws AccumuloException, AccumuloSecurityException, PCJStorageException {
+        // Setup the PCJ storage that will be tested against.
+        final Connector connector = super.getClusterInstance().getConnector();
+        final String ryaInstanceName = super.getRyaInstanceName();
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a few PCJs and hold onto their IDs.
+            final List<String> expectedIds = new ArrayList<>();
+
+            String pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+            expectedIds.add( pcjId );
+
+            pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+            expectedIds.add( pcjId );
+
+            pcjId = pcjStorage.createPcj("SELECT * WHERE { ?a <http://isA> ?b } ");
+            expectedIds.add( pcjId );
+
+            // Fetch the PCJ names
+            final List<String> pcjIds = pcjStorage.listPcjs();
+
+            // Ensure the expected IDs match the fetched IDs.
+            Collections.sort(expectedIds);
+            Collections.sort(pcjIds);
+            assertEquals(expectedIds, pcjIds);
+        }
+    }
+
+    @Test
+    public void getPcjMetadata() throws AccumuloException, AccumuloSecurityException, PCJStorageException, MalformedQueryException {
+        // Setup the PCJ storage that will be tested against.
+        final Connector connector = super.getClusterInstance().getConnector();
+        final String ryaInstanceName = super.getRyaInstanceName();
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
+            final String pcjId = pcjStorage.createPcj(sparql);
+
+            // Fetch the PCJ's metadata.
+            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
+
+            // Ensure it has the expected values.
+            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
+            final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 0L, varOrders);
+            assertEquals(expectedMetadata, metadata);
+        }
+    }
+
+    @Test
+    public void addResults() throws AccumuloException, AccumuloSecurityException, PCJStorageException, MalformedQueryException {
+        // Setup the PCJ storage that will be tested against.
+        final Connector connector = super.getClusterInstance().getConnector();
+        final String ryaInstanceName = super.getRyaInstanceName();
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
+            final String pcjId = pcjStorage.createPcj(sparql);
+
+            // Add some binding sets to it.
+            final Set<VisibilityBindingSet> results = new HashSet<>();
+
+            final MapBindingSet aliceBS = new MapBindingSet();
+            aliceBS.addBinding("a", new URIImpl("http://Alice"));
+            aliceBS.addBinding("b", new URIImpl("http://Person"));
+            results.add( new VisibilityBindingSet(aliceBS, "") );
+
+            final MapBindingSet charlieBS = new MapBindingSet();
+            charlieBS.addBinding("a", new URIImpl("http://Charlie"));
+            charlieBS.addBinding("b", new URIImpl("http://Comedian"));
+            results.add( new VisibilityBindingSet(charlieBS, "") );
+
+            pcjStorage.addResults(pcjId, results);
+
+            // Make sure the PCJ metadata was updated.
+            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
+
+            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
+            final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 2L, varOrders);
+            assertEquals(expectedMetadata, metadata);
+        }
+    }
+
+    @Test
+    public void listResults() throws Exception {
+        // Setup the PCJ storage that will be tested against.
+        final Connector connector = super.getClusterInstance().getConnector();
+        final String ryaInstanceName = super.getRyaInstanceName();
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
+            final String pcjId = pcjStorage.createPcj(sparql);
+
+            // Add some binding sets to it.
+            final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
+
+            final MapBindingSet aliceBS = new MapBindingSet();
+            aliceBS.addBinding("a", new URIImpl("http://Alice"));
+            aliceBS.addBinding("b", new URIImpl("http://Person"));
+            expectedResults.add( new VisibilityBindingSet(aliceBS, "") );
+
+            final MapBindingSet charlieBS = new MapBindingSet();
+            charlieBS.addBinding("a", new URIImpl("http://Charlie"));
+            charlieBS.addBinding("b", new URIImpl("http://Comedian"));
+            expectedResults.add( new VisibilityBindingSet(charlieBS, "") );
+
+            pcjStorage.addResults(pcjId, expectedResults);
+
+            // List the results that were stored.
+            final Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add( resultsIt.next() );
+                }
+            }
+
+            assertEquals(expectedResults, results);
+        }
+    }
+
+    @Test
+    public void purge() throws Exception {
+        // Setup the PCJ storage that will be tested against.
+        final Connector connector = super.getClusterInstance().getConnector();
+        final String ryaInstanceName = super.getRyaInstanceName();
+        try(final PrecomputedJoinStorage pcjStorage =  new AccumuloPcjStorage(connector, ryaInstanceName)) {
+            // Create a PCJ.
+            final String sparql = "SELECT * WHERE { ?a <http://isA> ?b }";
+            final String pcjId = pcjStorage.createPcj(sparql);
+
+            // Add some binding sets to it.
+            final Set<VisibilityBindingSet> expectedResults = new HashSet<>();
+
+            final MapBindingSet aliceBS = new MapBindingSet();
+            aliceBS.addBinding("a", new URIImpl("http://Alice"));
+            aliceBS.addBinding("b", new URIImpl("http://Person"));
+            expectedResults.add( new VisibilityBindingSet(aliceBS, "") );
+
+            final MapBindingSet charlieBS = new MapBindingSet();
+            charlieBS.addBinding("a", new URIImpl("http://Charlie"));
+            charlieBS.addBinding("b", new URIImpl("http://Comedian"));
+            expectedResults.add( new VisibilityBindingSet(charlieBS, "") );
+
+            pcjStorage.addResults(pcjId, expectedResults);
+
+            // Purge the PCJ.
+            pcjStorage.purge(pcjId);
+
+            // List the results that were stored.
+            final Set<BindingSet> results = new HashSet<>();
+            try(CloseableIterator<BindingSet> resultsIt = pcjStorage.listResults(pcjId)) {
+                while(resultsIt.hasNext()) {
+                    results.add( resultsIt.next() );
+                }
+            }
+
+            assertTrue( results.isEmpty() );
+
+            // Make sure the PCJ metadata was updated.
+            final PcjMetadata metadata = pcjStorage.getPcjMetadata(pcjId);
+
+            final Set<VariableOrder> varOrders = new ShiftVarOrderFactory().makeVarOrders(sparql);
+            final PcjMetadata expectedMetadata = new PcjMetadata(sparql, 0L, varOrders);
+            assertEquals(expectedMetadata, metadata);
+        }
+    }
+}
\ No newline at end of file


[3/9] incubator-rya git commit: RYA-280-Periodic Query Service. Closes #177.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationIT.java b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationIT.java
new file mode 100644
index 0000000..cb7557c
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationIT.java
@@ -0,0 +1,509 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.application;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Files;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Set;
+
+import javax.xml.datatype.DatatypeConfigurationException;
+import javax.xml.datatype.DatatypeFactory;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.config.FluoConfiguration;
+import org.apache.fluo.core.client.FluoClientImpl;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.rya.api.resolver.RdfToRyaConversions;
+import org.apache.rya.indexing.accumulo.ConfigUtils;
+import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.apache.rya.indexing.pcj.fluo.app.util.FluoClientFactory;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPeriodicQueryResultStorage;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
+import org.apache.rya.periodic.notification.api.CreatePeriodicQuery;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+import org.apache.rya.periodic.notification.registration.kafka.KafkaNotificationRegistrationClient;
+import org.apache.rya.periodic.notification.serialization.BindingSetSerDe;
+import org.apache.rya.periodic.notification.serialization.CommandNotificationSerializer;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.openrdf.model.Statement;
+import org.openrdf.model.Value;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.LiteralImpl;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.model.vocabulary.XMLSchema;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+
+import kafka.server.KafkaConfig;
+import kafka.server.KafkaServer;
+import kafka.utils.MockTime;
+import kafka.utils.TestUtils;
+import kafka.utils.Time;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+import kafka.zk.EmbeddedZookeeper;
+
+public class PeriodicNotificationApplicationIT extends RyaExportITBase {
+
+    private PeriodicNotificationApplication app;
+    private KafkaNotificationRegistrationClient registrar;
+    private KafkaProducer<String, CommandNotification> producer;
+    private Properties props;
+    private Properties kafkaProps;
+    PeriodicNotificationApplicationConfiguration conf;
+    
+    private static final String ZKHOST = "127.0.0.1";
+    private static final String BROKERHOST = "127.0.0.1";
+    private static final String BROKERPORT = "9092";
+    private ZkUtils zkUtils;
+    private KafkaServer kafkaServer;
+    private EmbeddedZookeeper zkServer;
+    private ZkClient zkClient;
+    
+    @Before
+    public void init() throws Exception {
+        setUpKafka();
+        props = getProps();
+        conf = new PeriodicNotificationApplicationConfiguration(props);
+        kafkaProps = getKafkaProperties(conf);
+        app = PeriodicNotificationApplicationFactory.getPeriodicApplication(props);
+        producer = new KafkaProducer<>(kafkaProps, new StringSerializer(), new CommandNotificationSerializer());
+        registrar = new KafkaNotificationRegistrationClient(conf.getNotificationTopic(), producer);
+    }
+    
+    private void setUpKafka() throws Exception {
+        // Setup Kafka.
+        zkServer = new EmbeddedZookeeper();
+        final String zkConnect = ZKHOST + ":" + zkServer.port();
+        zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
+        zkUtils = ZkUtils.apply(zkClient, false);
+
+        // setup Brokersparql
+        final Properties brokerProps = new Properties();
+        brokerProps.setProperty("zookeeper.connect", zkConnect);
+        brokerProps.setProperty("broker.id", "0");
+        brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
+        brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
+        final KafkaConfig config = new KafkaConfig(brokerProps);
+        final Time mock = new MockTime();
+        kafkaServer = TestUtils.createServer(config, mock);
+    }
+    
+    @Test
+    public void periodicApplicationWithAggAndGroupByTest() throws Exception {
+
+        String sparql = "prefix function: <http://org.apache.rya/function#> " // n
+                + "prefix time: <http://www.w3.org/2006/time#> " // n
+                + "select ?type (count(?obs) as ?total) where {" // n
+                + "Filter(function:periodic(?time, 1, .25, time:minutes)) " // n
+                + "?obs <uri:hasTime> ?time. " // n
+                + "?obs <uri:hasObsType> ?type } group by ?type"; // n
+        
+        //make data
+        int periodMult = 15;
+        final ValueFactory vf = new ValueFactoryImpl();
+        final DatatypeFactory dtf = DatatypeFactory.newInstance();
+        //Sleep until current time aligns nicely with period to makell
+        //results more predictable
+        while(System.currentTimeMillis() % (periodMult*1000) > 500);
+        ZonedDateTime time = ZonedDateTime.now();
+
+        ZonedDateTime zTime1 = time.minusSeconds(2*periodMult);
+        String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT);
+
+        ZonedDateTime zTime2 = zTime1.minusSeconds(periodMult);
+        String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT);
+
+        ZonedDateTime zTime3 = zTime2.minusSeconds(periodMult);
+        String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT);
+
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time1))),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasObsType"), vf.createLiteral("ship")),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time1))),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasObsType"), vf.createLiteral("airplane")),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time2))),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasObsType"), vf.createLiteral("ship")),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time2))),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasObsType"), vf.createLiteral("airplane")),
+                vf.createStatement(vf.createURI("urn:obs_5"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time3))),
+                vf.createStatement(vf.createURI("urn:obs_5"), vf.createURI("uri:hasObsType"), vf.createLiteral("automobile")));
+        
+        try (FluoClient fluo = FluoClientFactory.getFluoClient(conf.getFluoAppName(), Optional.of(conf.getFluoTableName()), conf)) {
+            Connector connector = ConfigUtils.getConnector(conf);
+            PeriodicQueryResultStorage storage = new AccumuloPeriodicQueryResultStorage(connector, conf.getTablePrefix());
+            CreatePeriodicQuery periodicQuery = new CreatePeriodicQuery(fluo, storage);
+            String id = periodicQuery.createQueryAndRegisterWithKafka(sparql, registrar);
+            addData(statements);
+            app.start();
+//            
+            Multimap<Long, BindingSet> actual = HashMultimap.create();
+            try (KafkaConsumer<String, BindingSet> consumer = new KafkaConsumer<>(kafkaProps, new StringDeserializer(), new BindingSetSerDe())) {
+                consumer.subscribe(Arrays.asList(id));
+                long end = System.currentTimeMillis() + 4*periodMult*1000;
+                long lastBinId = 0L;
+                long binId = 0L;
+                List<Long> ids = new ArrayList<>();
+                while (System.currentTimeMillis() < end) {
+                    ConsumerRecords<String, BindingSet> records = consumer.poll(periodMult*1000);
+                    for(ConsumerRecord<String, BindingSet> record: records){
+                        BindingSet result = record.value();
+                        binId = Long.parseLong(result.getBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID).getValue().stringValue());
+                        if(lastBinId != binId) {
+                            lastBinId = binId;
+                            ids.add(binId);
+                        }
+                        actual.put(binId, result);
+                    }
+                }
+                
+                Map<Long, Set<BindingSet>> expected = new HashMap<>();
+                
+                Set<BindingSet> expected1 = new HashSet<>();
+                QueryBindingSet bs1 = new QueryBindingSet();
+                bs1.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(0)));
+                bs1.addBinding("total", new LiteralImpl("2", XMLSchema.INTEGER));
+                bs1.addBinding("type", vf.createLiteral("airplane"));
+                
+                QueryBindingSet bs2 = new QueryBindingSet();
+                bs2.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(0)));
+                bs2.addBinding("total", new LiteralImpl("2", XMLSchema.INTEGER));
+                bs2.addBinding("type", vf.createLiteral("ship"));
+                
+                QueryBindingSet bs3 = new QueryBindingSet();
+                bs3.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(0)));
+                bs3.addBinding("total", new LiteralImpl("1", XMLSchema.INTEGER));
+                bs3.addBinding("type", vf.createLiteral("automobile"));
+                
+                expected1.add(bs1);
+                expected1.add(bs2);
+                expected1.add(bs3);
+                
+                Set<BindingSet> expected2 = new HashSet<>();
+                QueryBindingSet bs4 = new QueryBindingSet();
+                bs4.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(1)));
+                bs4.addBinding("total", new LiteralImpl("2", XMLSchema.INTEGER));
+                bs4.addBinding("type", vf.createLiteral("airplane"));
+                
+                QueryBindingSet bs5 = new QueryBindingSet();
+                bs5.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(1)));
+                bs5.addBinding("total", new LiteralImpl("2", XMLSchema.INTEGER));
+                bs5.addBinding("type", vf.createLiteral("ship"));
+                
+                expected2.add(bs4);
+                expected2.add(bs5);
+                
+                Set<BindingSet> expected3 = new HashSet<>();
+                QueryBindingSet bs6 = new QueryBindingSet();
+                bs6.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(2)));
+                bs6.addBinding("total", new LiteralImpl("1", XMLSchema.INTEGER));
+                bs6.addBinding("type", vf.createLiteral("ship"));
+                
+                QueryBindingSet bs7 = new QueryBindingSet();
+                bs7.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(2)));
+                bs7.addBinding("total", new LiteralImpl("1", XMLSchema.INTEGER));
+                bs7.addBinding("type", vf.createLiteral("airplane"));
+                
+                expected3.add(bs6);
+                expected3.add(bs7);
+                
+                expected.put(ids.get(0), expected1);
+                expected.put(ids.get(1), expected2);
+                expected.put(ids.get(2), expected3);
+                
+                Assert.assertEquals(3, actual.asMap().size());
+                for(Long ident: ids) {
+                    Assert.assertEquals(expected.get(ident), actual.get(ident));
+                }
+            }
+            
+            Set<BindingSet> expectedResults = new HashSet<>();
+            try (CloseableIterator<BindingSet> results = storage.listResults(id, Optional.empty())) {
+                results.forEachRemaining(x -> expectedResults.add(x));
+                Assert.assertEquals(0, expectedResults.size());
+            }
+        }
+    }
+    
+    
+    @Test
+    public void periodicApplicationWithAggTest() throws Exception {
+
+        String sparql = "prefix function: <http://org.apache.rya/function#> " // n
+                + "prefix time: <http://www.w3.org/2006/time#> " // n
+                + "select (count(?obs) as ?total) where {" // n
+                + "Filter(function:periodic(?time, 1, .25, time:minutes)) " // n
+                + "?obs <uri:hasTime> ?time. " // n
+                + "?obs <uri:hasId> ?id } "; // n
+        
+        //make data
+        int periodMult = 15;
+        final ValueFactory vf = new ValueFactoryImpl();
+        final DatatypeFactory dtf = DatatypeFactory.newInstance();
+        //Sleep until current time aligns nicely with period to make
+        //results more predictable
+        while(System.currentTimeMillis() % (periodMult*1000) > 500);
+        ZonedDateTime time = ZonedDateTime.now();
+
+        ZonedDateTime zTime1 = time.minusSeconds(2*periodMult);
+        String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT);
+
+        ZonedDateTime zTime2 = zTime1.minusSeconds(periodMult);
+        String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT);
+
+        ZonedDateTime zTime3 = zTime2.minusSeconds(periodMult);
+        String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT);
+
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time1))),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time2))),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time3))),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasId"), vf.createLiteral("id_3")));
+        
+        try (FluoClient fluo = FluoClientFactory.getFluoClient(conf.getFluoAppName(), Optional.of(conf.getFluoTableName()), conf)) {
+            Connector connector = ConfigUtils.getConnector(conf);
+            PeriodicQueryResultStorage storage = new AccumuloPeriodicQueryResultStorage(connector, conf.getTablePrefix());
+            CreatePeriodicQuery periodicQuery = new CreatePeriodicQuery(fluo, storage);
+            String id = periodicQuery.createQueryAndRegisterWithKafka(sparql, registrar);
+            addData(statements);
+            app.start();
+//            
+            Multimap<Long, BindingSet> expected = HashMultimap.create();
+            try (KafkaConsumer<String, BindingSet> consumer = new KafkaConsumer<>(kafkaProps, new StringDeserializer(), new BindingSetSerDe())) {
+                consumer.subscribe(Arrays.asList(id));
+                long end = System.currentTimeMillis() + 4*periodMult*1000;
+                long lastBinId = 0L;
+                long binId = 0L;
+                List<Long> ids = new ArrayList<>();
+                while (System.currentTimeMillis() < end) {
+                    ConsumerRecords<String, BindingSet> records = consumer.poll(periodMult*1000);
+                    for(ConsumerRecord<String, BindingSet> record: records){
+                        BindingSet result = record.value();
+                        binId = Long.parseLong(result.getBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID).getValue().stringValue());
+                        if(lastBinId != binId) {
+                            lastBinId = binId;
+                            ids.add(binId);
+                        }
+                        expected.put(binId, result);
+                    }
+                }
+                
+                Assert.assertEquals(3, expected.asMap().size());
+                int i = 0;
+                for(Long ident: ids) {
+                    Assert.assertEquals(1, expected.get(ident).size());
+                    BindingSet bs = expected.get(ident).iterator().next();
+                    Value val = bs.getValue("total");
+                    int total = Integer.parseInt(val.stringValue());
+                    Assert.assertEquals(3-i, total);
+                    i++;
+                }
+            }
+            
+            
+            Set<BindingSet> expectedResults = new HashSet<>();
+            try (CloseableIterator<BindingSet> results = storage.listResults(id, Optional.empty())) {
+                results.forEachRemaining(x -> expectedResults.add(x));
+                Assert.assertEquals(0, expectedResults.size());
+            }
+        }
+
+    }
+    
+    
+    @Test
+    public void periodicApplicationTest() throws Exception {
+
+        String sparql = "prefix function: <http://org.apache.rya/function#> " // n
+                + "prefix time: <http://www.w3.org/2006/time#> " // n
+                + "select ?obs ?id where {" // n
+                + "Filter(function:periodic(?time, 1, .25, time:minutes)) " // n
+                + "?obs <uri:hasTime> ?time. " // n
+                + "?obs <uri:hasId> ?id } "; // n
+        
+        //make data
+        int periodMult = 15;
+        final ValueFactory vf = new ValueFactoryImpl();
+        final DatatypeFactory dtf = DatatypeFactory.newInstance();
+        //Sleep until current time aligns nicely with period to make
+        //results more predictable
+        while(System.currentTimeMillis() % (periodMult*1000) > 500);
+        ZonedDateTime time = ZonedDateTime.now();
+
+        ZonedDateTime zTime1 = time.minusSeconds(2*periodMult);
+        String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT);
+
+        ZonedDateTime zTime2 = zTime1.minusSeconds(periodMult);
+        String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT);
+
+        ZonedDateTime zTime3 = zTime2.minusSeconds(periodMult);
+        String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT);
+
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time1))),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time2))),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time3))),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasId"), vf.createLiteral("id_3")));
+        
+        try (FluoClient fluo = FluoClientFactory.getFluoClient(conf.getFluoAppName(), Optional.of(conf.getFluoTableName()), conf)) {
+            Connector connector = ConfigUtils.getConnector(conf);
+            PeriodicQueryResultStorage storage = new AccumuloPeriodicQueryResultStorage(connector, conf.getTablePrefix());
+            CreatePeriodicQuery periodicQuery = new CreatePeriodicQuery(fluo, storage);
+            String id = periodicQuery.createQueryAndRegisterWithKafka(sparql, registrar);
+            addData(statements);
+            app.start();
+//            
+            Multimap<Long, BindingSet> expected = HashMultimap.create();
+            try (KafkaConsumer<String, BindingSet> consumer = new KafkaConsumer<>(kafkaProps, new StringDeserializer(), new BindingSetSerDe())) {
+                consumer.subscribe(Arrays.asList(id));
+                long end = System.currentTimeMillis() + 4*periodMult*1000;
+                long lastBinId = 0L;
+                long binId = 0L;
+                List<Long> ids = new ArrayList<>();
+                while (System.currentTimeMillis() < end) {
+                    ConsumerRecords<String, BindingSet> records = consumer.poll(periodMult*1000);
+                    for(ConsumerRecord<String, BindingSet> record: records){
+                        BindingSet result = record.value();
+                        binId = Long.parseLong(result.getBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID).getValue().stringValue());
+                        if(lastBinId != binId) {
+                            lastBinId = binId;
+                            ids.add(binId);
+                        }
+                        expected.put(binId, result);
+                    }
+                }
+                
+                Assert.assertEquals(3, expected.asMap().size());
+                int i = 0;
+                for(Long ident: ids) {
+                    Assert.assertEquals(3-i, expected.get(ident).size());
+                    i++;
+                }
+            }
+            
+            
+            Set<BindingSet> expectedResults = new HashSet<>();
+            try (CloseableIterator<BindingSet> results = storage.listResults(id, Optional.empty())) {
+                results.forEachRemaining(x -> expectedResults.add(x));
+                Assert.assertEquals(0, expectedResults.size());
+            }
+        }
+
+    }
+    
+    
+    @After
+    public void shutdown() {
+        registrar.close();
+        app.stop();
+        teardownKafka();
+    }
+    
+    private void teardownKafka() {
+        kafkaServer.shutdown();
+        zkClient.close();
+        zkServer.shutdown();
+    }
+    
+    private void addData(Collection<Statement> statements) throws DatatypeConfigurationException {
+        // add statements to Fluo
+        try (FluoClient fluo = new FluoClientImpl(getFluoConfiguration())) {
+            InsertTriples inserter = new InsertTriples();
+            statements.forEach(x -> inserter.insert(fluo, RdfToRyaConversions.convertStatement(x)));
+            getMiniFluo().waitForObservers();
+//            FluoITHelper.printFluoTable(fluo);
+        }
+
+    }
+
+    private Properties getKafkaProperties(PeriodicNotificationApplicationConfiguration conf) { 
+        Properties kafkaProps = new Properties();
+        kafkaProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, conf.getBootStrapServers());
+        kafkaProps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, conf.getNotificationClientId());
+        kafkaProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, conf.getNotificationGroupId());
+        kafkaProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+        return kafkaProps;
+    }
+
+    
+    private Properties getProps() throws IOException {
+        
+        Properties props = new Properties();
+        try(InputStream in = new FileInputStream("src/test/resources/notification.properties")) {
+            props.load(in);
+        } 
+        
+        FluoConfiguration fluoConf = getFluoConfiguration();
+        props.setProperty("accumulo.user", getUsername());
+        props.setProperty("accumulo.password", getPassword());
+        props.setProperty("accumulo.instance", getMiniAccumuloCluster().getInstanceName());
+        props.setProperty("accumulo.zookeepers", getMiniAccumuloCluster().getZooKeepers());
+        props.setProperty("accumulo.rya.prefix", getRyaInstanceName());
+        props.setProperty(PeriodicNotificationApplicationConfiguration.FLUO_APP_NAME, fluoConf.getApplicationName());
+        props.setProperty(PeriodicNotificationApplicationConfiguration.FLUO_TABLE_NAME, fluoConf.getAccumuloTable());
+        return props;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/application/PeriodicNotificationProviderIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/application/PeriodicNotificationProviderIT.java b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/application/PeriodicNotificationProviderIT.java
new file mode 100644
index 0000000..1902248
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/application/PeriodicNotificationProviderIT.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.application;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.core.client.FluoClientImpl;
+import org.apache.fluo.recipes.test.AccumuloExportITBase;
+import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
+import org.apache.rya.periodic.notification.coordinator.PeriodicNotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.notification.TimestampedNotification;
+import org.apache.rya.periodic.notification.recovery.PeriodicNotificationProvider;
+import org.junit.Test;
+import org.openrdf.query.MalformedQueryException;
+
+import org.junit.Assert;
+
+public class PeriodicNotificationProviderIT extends AccumuloExportITBase {
+
+    @Test
+    public void testProvider() throws MalformedQueryException, InterruptedException {
+        
+        String sparql = "prefix function: <http://org.apache.rya/function#> " // n
+                + "prefix time: <http://www.w3.org/2006/time#> " // n
+                + "select ?id (count(?obs) as ?total) where {" // n
+                + "Filter(function:periodic(?time, 1, .25, time:minutes)) " // n
+                + "?obs <uri:hasTime> ?time. " // n
+                + "?obs <uri:hasId> ?id } group by ?id"; // n
+        
+        BlockingQueue<TimestampedNotification> notifications = new LinkedBlockingQueue<>();
+        PeriodicNotificationCoordinatorExecutor coord = new PeriodicNotificationCoordinatorExecutor(2, notifications);
+        PeriodicNotificationProvider provider = new PeriodicNotificationProvider();
+        CreatePcj pcj = new CreatePcj();
+        
+        String id = null;
+        try(FluoClient fluo = new FluoClientImpl(getFluoConfiguration())) {
+            id = pcj.createPcj(sparql, fluo);
+            provider.processRegisteredNotifications(coord, fluo.newSnapshot());
+        }
+        
+        TimestampedNotification notification = notifications.take();
+        Assert.assertEquals(5000, notification.getInitialDelay());
+        Assert.assertEquals(15000, notification.getPeriod());
+        Assert.assertEquals(TimeUnit.MILLISECONDS, notification.getTimeUnit());
+        Assert.assertEquals(id, notification.getId());
+        
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/exporter/PeriodicNotificationExporterIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/exporter/PeriodicNotificationExporterIT.java b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/exporter/PeriodicNotificationExporterIT.java
new file mode 100644
index 0000000..c0efc4f
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/exporter/PeriodicNotificationExporterIT.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.exporter;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.kafka.base.KafkaITBase;
+import org.apache.rya.periodic.notification.serialization.BindingSetSerDe;
+import org.junit.Assert;
+import org.junit.Test;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+
+public class PeriodicNotificationExporterIT extends KafkaITBase {
+
+    private static final ValueFactory vf = new ValueFactoryImpl();
+    
+    @Test
+    public void testExporter() throws InterruptedException {
+        
+        BlockingQueue<BindingSetRecord> records = new LinkedBlockingQueue<>();
+        Properties props = createKafkaConfig();
+        
+        KafkaExporterExecutor exporter = new KafkaExporterExecutor(new KafkaProducer<String, BindingSet>(props), 1, records);
+        exporter.start();
+        
+        QueryBindingSet bs1 = new QueryBindingSet();
+        bs1.addBinding(PeriodicQueryResultStorage.PeriodicBinId, vf.createLiteral(1L));
+        bs1.addBinding("name", vf.createURI("uri:Bob"));
+        BindingSetRecord record1 = new BindingSetRecord(bs1, "topic1");
+        
+        QueryBindingSet bs2 = new QueryBindingSet();
+        bs2.addBinding(PeriodicQueryResultStorage.PeriodicBinId, vf.createLiteral(2L));
+        bs2.addBinding("name", vf.createURI("uri:Joe"));
+        BindingSetRecord record2 = new BindingSetRecord(bs2, "topic2");
+        
+        records.add(record1);
+        records.add(record2);
+        
+        Set<BindingSet> expected1 = new HashSet<>();
+        expected1.add(bs1);
+        Set<BindingSet> expected2 = new HashSet<>();
+        expected2.add(bs2);
+        
+        Set<BindingSet> actual1 = getBindingSetsFromKafka("topic1");
+        Set<BindingSet> actual2 = getBindingSetsFromKafka("topic2");
+        
+        Assert.assertEquals(expected1, actual1);
+        Assert.assertEquals(expected2, actual2);
+        
+        exporter.stop();
+        
+    }
+    
+    
+    private Properties createKafkaConfig() {
+        Properties props = new Properties();
+        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
+        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group0");
+        props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "consumer0");
+        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+        props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
+        props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, BindingSetSerDe.class.getName());
+        props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
+        props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BindingSetSerDe.class.getName());
+
+        return props;
+    }
+    
+    
+    private KafkaConsumer<String, BindingSet> makeBindingSetConsumer(final String TopicName) {
+        // setup consumer
+        final Properties consumerProps = createKafkaConfig();
+        final KafkaConsumer<String, BindingSet> consumer = new KafkaConsumer<>(consumerProps);
+        consumer.subscribe(Arrays.asList(TopicName));
+        return consumer;
+    }
+    
+    private Set<BindingSet> getBindingSetsFromKafka(String topic) {
+        KafkaConsumer<String, BindingSet> consumer = null;
+
+        try {
+            consumer = makeBindingSetConsumer(topic);
+            ConsumerRecords<String, BindingSet> records = consumer.poll(5000);
+
+            Set<BindingSet> bindingSets = new HashSet<>();
+            records.forEach(x -> bindingSets.add(x.value()));
+
+            return bindingSets;
+
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        } finally {
+            if (consumer != null) {
+                consumer.close();
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/processor/PeriodicNotificationProcessorIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/processor/PeriodicNotificationProcessorIT.java b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/processor/PeriodicNotificationProcessorIT.java
new file mode 100644
index 0000000..fa60e48
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/processor/PeriodicNotificationProcessorIT.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.processor;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.fluo.recipes.test.AccumuloExportITBase;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.periodic.notification.api.NodeBin;
+import org.apache.rya.periodic.notification.exporter.BindingSetRecord;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+import org.apache.rya.periodic.notification.notification.TimestampedNotification;
+import org.junit.Assert;
+import org.junit.Test;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+
+public class PeriodicNotificationProcessorIT extends AccumuloExportITBase {
+
+    private static final ValueFactory vf = new ValueFactoryImpl();
+    private static final String RYA_INSTANCE_NAME = "rya_";
+    
+    @Test
+    public void periodicProcessorTest() throws Exception {
+        
+        String id = UUID.randomUUID().toString().replace("-", "");
+        BlockingQueue<TimestampedNotification> notifications = new LinkedBlockingQueue<>();
+        BlockingQueue<NodeBin> bins = new LinkedBlockingQueue<>();
+        BlockingQueue<BindingSetRecord> bindingSets = new LinkedBlockingQueue<>();
+        
+        TimestampedNotification ts1 = new TimestampedNotification(
+                PeriodicNotification.builder().id(id).initialDelay(0).period(2000).timeUnit(TimeUnit.SECONDS).build());  
+        long binId1 = (ts1.getTimestamp().getTime()/ts1.getPeriod())*ts1.getPeriod();
+        
+        Thread.sleep(2000);
+        
+        TimestampedNotification ts2 = new TimestampedNotification(
+                PeriodicNotification.builder().id(id).initialDelay(0).period(2000).timeUnit(TimeUnit.SECONDS).build());  
+        long binId2 = (ts2.getTimestamp().getTime()/ts2.getPeriod())*ts2.getPeriod();
+        
+        Set<NodeBin> expectedBins = new HashSet<>();
+        expectedBins.add(new NodeBin(id, binId1));
+        expectedBins.add(new NodeBin(id, binId2));
+        
+        Set<BindingSet> expected = new HashSet<>();
+        Set<VisibilityBindingSet> storageResults = new HashSet<>();
+        
+        QueryBindingSet bs1 = new QueryBindingSet();
+        bs1.addBinding("periodicBinId", vf.createLiteral(binId1));
+        bs1.addBinding("id", vf.createLiteral(1));
+        expected.add(bs1);
+        storageResults.add(new VisibilityBindingSet(bs1));
+        
+        QueryBindingSet bs2 = new QueryBindingSet();
+        bs2.addBinding("periodicBinId", vf.createLiteral(binId1));
+        bs2.addBinding("id", vf.createLiteral(2));
+        expected.add(bs2);
+        storageResults.add(new VisibilityBindingSet(bs2));
+        
+        QueryBindingSet bs3 = new QueryBindingSet();
+        bs3.addBinding("periodicBinId", vf.createLiteral(binId2));
+        bs3.addBinding("id", vf.createLiteral(3));
+        expected.add(bs3);
+        storageResults.add(new VisibilityBindingSet(bs3));
+        
+        QueryBindingSet bs4 = new QueryBindingSet();
+        bs4.addBinding("periodicBinId", vf.createLiteral(binId2));
+        bs4.addBinding("id", vf.createLiteral(4));
+        expected.add(bs4);
+        storageResults.add(new VisibilityBindingSet(bs4));
+        
+        PeriodicQueryResultStorage periodicStorage = new AccumuloPeriodicQueryResultStorage(super.getAccumuloConnector(),
+                RYA_INSTANCE_NAME);
+        periodicStorage.createPeriodicQuery(id, "select ?id where {?obs <urn:hasId> ?id.}", new VariableOrder("periodicBinId", "id"));
+        periodicStorage.addPeriodicQueryResults(id, storageResults);
+
+        NotificationProcessorExecutor processor = new NotificationProcessorExecutor(periodicStorage, notifications, bins, bindingSets, 1);
+        processor.start();
+        
+        notifications.add(ts1);
+        notifications.add(ts2);
+
+        Thread.sleep(5000);
+        
+        Assert.assertEquals(expectedBins.size(), bins.size());
+        Assert.assertEquals(true, bins.containsAll(expectedBins));
+        
+        Set<BindingSet> actual = new HashSet<>();
+        bindingSets.forEach(x -> actual.add(x.getBindingSet()));
+        Assert.assertEquals(expected, actual);
+        
+        processor.stop();
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/pruner/PeriodicNotificationBinPrunerIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/pruner/PeriodicNotificationBinPrunerIT.java b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/pruner/PeriodicNotificationBinPrunerIT.java
new file mode 100644
index 0000000..27acc9c
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/pruner/PeriodicNotificationBinPrunerIT.java
@@ -0,0 +1,286 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.pruner;
+
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import javax.xml.datatype.DatatypeFactory;
+
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.Snapshot;
+import org.apache.fluo.api.client.scanner.ColumnScanner;
+import org.apache.fluo.api.client.scanner.RowScanner;
+import org.apache.fluo.api.data.Bytes;
+import org.apache.fluo.api.data.ColumnValue;
+import org.apache.fluo.api.data.Span;
+import org.apache.fluo.core.client.FluoClientImpl;
+import org.apache.fluo.recipes.test.FluoITHelper;
+import org.apache.rya.api.resolver.RdfToRyaConversions;
+import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.apache.rya.indexing.pcj.fluo.app.NodeType;
+import org.apache.rya.indexing.pcj.fluo.app.query.FluoQueryColumns;
+import org.apache.rya.indexing.pcj.fluo.app.util.PeriodicQueryUtil;
+import org.apache.rya.indexing.pcj.fluo.app.util.RowKeyUtil;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryStorageException;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
+import org.apache.rya.periodic.notification.api.CreatePeriodicQuery;
+import org.apache.rya.periodic.notification.api.NodeBin;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+import org.junit.Assert;
+import org.junit.Test;
+import org.openrdf.model.Statement;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.LiteralImpl;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.model.vocabulary.XMLSchema;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+import org.openrdf.query.impl.MapBindingSet;
+
+import com.google.common.collect.Sets;
+
+public class PeriodicNotificationBinPrunerIT extends RyaExportITBase {
+
+    
+    @Test
+    public void periodicPrunerTest() throws Exception {
+
+        String sparql = "prefix function: <http://org.apache.rya/function#> " // n
+                + "prefix time: <http://www.w3.org/2006/time#> " // n
+                + "select ?id (count(?obs) as ?total) where {" // n
+                + "Filter(function:periodic(?time, 2, .5, time:hours)) " // n
+                + "?obs <uri:hasTime> ?time. " // n
+                + "?obs <uri:hasId> ?id } group by ?id"; // n
+
+        FluoClient fluo = new FluoClientImpl(super.getFluoConfiguration());
+
+        // initialize resources and create pcj
+        PeriodicQueryResultStorage periodicStorage = new AccumuloPeriodicQueryResultStorage(super.getAccumuloConnector(),
+                getRyaInstanceName());
+        CreatePeriodicQuery createPeriodicQuery = new CreatePeriodicQuery(fluo, periodicStorage);
+        PeriodicNotification notification = createPeriodicQuery.createPeriodicQuery(sparql);
+        String queryId = notification.getId();
+
+        // create statements to ingest into Fluo
+        final ValueFactory vf = new ValueFactoryImpl();
+        final DatatypeFactory dtf = DatatypeFactory.newInstance();
+        ZonedDateTime time = ZonedDateTime.now();
+        long currentTime = time.toInstant().toEpochMilli();
+
+        ZonedDateTime zTime1 = time.minusMinutes(30);
+        String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT);
+
+        ZonedDateTime zTime2 = zTime1.minusMinutes(30);
+        String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT);
+
+        ZonedDateTime zTime3 = zTime2.minusMinutes(30);
+        String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT);
+
+        ZonedDateTime zTime4 = zTime3.minusMinutes(30);
+        String time4 = zTime4.format(DateTimeFormatter.ISO_INSTANT);
+
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time1))),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time2))),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time3))),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasId"), vf.createLiteral("id_3")),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time4))),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasId"), vf.createLiteral("id_4")),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time4))),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"),
+                        vf.createLiteral(dtf.newXMLGregorianCalendar(time3))),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")));
+
+        // add statements to Fluo
+        InsertTriples inserter = new InsertTriples();
+        statements.forEach(x -> inserter.insert(fluo, RdfToRyaConversions.convertStatement(x)));
+
+        super.getMiniFluo().waitForObservers();
+
+        // FluoITHelper.printFluoTable(fluo);
+
+        // Create the expected results of the SPARQL query once the PCJ has been
+        // computed.
+        final Set<BindingSet> expected1 = new HashSet<>();
+        final Set<BindingSet> expected2 = new HashSet<>();
+        final Set<BindingSet> expected3 = new HashSet<>();
+        final Set<BindingSet> expected4 = new HashSet<>();
+
+        long period = 1800000;
+        long binId = (currentTime / period) * period;
+
+        long bin1 = binId;
+        long bin2 = binId + period;
+        long bin3 = binId + 2 * period;
+        long bin4 = binId + 3 * period;
+
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin1));
+        expected1.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin1));
+        expected1.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin1));
+        expected1.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_4", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin1));
+        expected1.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin2));
+        expected2.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin2));
+        expected2.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin2));
+        expected2.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin3));
+        expected3.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin3));
+        expected3.add(bs);
+
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(bin4));
+        expected4.add(bs);
+
+        // make sure that expected and actual results align after ingest
+        compareResults(periodicStorage, queryId, bin1, expected1);
+        compareResults(periodicStorage, queryId, bin2, expected2);
+        compareResults(periodicStorage, queryId, bin3, expected3);
+        compareResults(periodicStorage, queryId, bin4, expected4);
+
+        BlockingQueue<NodeBin> bins = new LinkedBlockingQueue<>();
+        PeriodicQueryPrunerExecutor pruner = new PeriodicQueryPrunerExecutor(periodicStorage, fluo, 1, bins);
+        pruner.start();
+
+        bins.add(new NodeBin(queryId, bin1));
+        bins.add(new NodeBin(queryId, bin2));
+        bins.add(new NodeBin(queryId, bin3));
+        bins.add(new NodeBin(queryId, bin4));
+
+        Thread.sleep(10000);
+
+        compareResults(periodicStorage, queryId, bin1, new HashSet<>());
+        compareResults(periodicStorage, queryId, bin2, new HashSet<>());
+        compareResults(periodicStorage, queryId, bin3, new HashSet<>());
+        compareResults(periodicStorage, queryId, bin4, new HashSet<>());
+
+        compareFluoCounts(fluo, queryId, bin1);
+        compareFluoCounts(fluo, queryId, bin2);
+        compareFluoCounts(fluo, queryId, bin3);
+        compareFluoCounts(fluo, queryId, bin4);
+
+        pruner.stop();
+
+    }
+    
+    private void compareResults(PeriodicQueryResultStorage periodicStorage, String queryId, long bin, Set<BindingSet> expected) throws PeriodicQueryStorageException, Exception {
+        try(CloseableIterator<BindingSet> iter = periodicStorage.listResults(queryId, Optional.of(bin))) {
+            Set<BindingSet> actual = new HashSet<>();
+            while(iter.hasNext()) {
+                actual.add(iter.next());
+            }
+            Assert.assertEquals(expected, actual);
+        }
+    }
+    
+    private void compareFluoCounts(FluoClient client, String queryId, long bin) {
+        QueryBindingSet bs = new QueryBindingSet();
+        bs.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, new LiteralImpl(Long.toString(bin), XMLSchema.LONG));
+        
+        VariableOrder varOrder = new VariableOrder(IncrementalUpdateConstants.PERIODIC_BIN_ID);
+        
+        try(Snapshot sx = client.newSnapshot()) {
+            String fluoQueryId = sx.get(Bytes.of(queryId), FluoQueryColumns.PCJ_ID_QUERY_ID).toString();
+            Set<String> ids = new HashSet<>();
+            PeriodicQueryUtil.getPeriodicQueryNodeAncestorIds(sx, fluoQueryId, ids);
+            for(String id: ids) {
+                NodeType optNode = NodeType.fromNodeId(id).orNull();
+                if(optNode == null) throw new RuntimeException("Invalid NodeType.");
+                Bytes prefix = RowKeyUtil.makeRowKey(id,varOrder, bs);
+                RowScanner scanner = sx.scanner().fetch(optNode.getResultColumn()).over(Span.prefix(prefix)).byRow().build();
+                int count = 0;
+                Iterator<ColumnScanner> colScannerIter = scanner.iterator();
+                while(colScannerIter.hasNext()) {
+                    ColumnScanner colScanner = colScannerIter.next();
+                    String row = colScanner.getRow().toString();
+                    Iterator<ColumnValue> values = colScanner.iterator();
+                    while(values.hasNext()) {
+                        values.next();
+                        count++;
+                    }
+                }
+                Assert.assertEquals(0, count);
+            }
+        }
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/registration/kafka/PeriodicCommandNotificationConsumerIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/registration/kafka/PeriodicCommandNotificationConsumerIT.java b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/registration/kafka/PeriodicCommandNotificationConsumerIT.java
new file mode 100644
index 0000000..bde406f
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/java/org/apache/rya/periodic/notification/registration/kafka/PeriodicCommandNotificationConsumerIT.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */package org.apache.rya.periodic.notification.registration.kafka;
+
+import java.util.Properties;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.log4j.BasicConfigurator;
+import org.apache.rya.pcj.fluo.test.base.KafkaExportITBase;
+import org.apache.rya.periodic.notification.coordinator.PeriodicNotificationCoordinatorExecutor;
+import org.apache.rya.periodic.notification.notification.CommandNotification;
+import org.apache.rya.periodic.notification.notification.TimestampedNotification;
+import org.apache.rya.periodic.notification.serialization.CommandNotificationSerializer;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class PeriodicCommandNotificationConsumerIT extends KafkaExportITBase {
+
+    private static final String topic = "topic";
+    private KafkaNotificationRegistrationClient registration;
+    private PeriodicNotificationCoordinatorExecutor coord;
+    private KafkaNotificationProvider provider;
+
+    @Test
+    public void kafkaNotificationProviderTest() throws InterruptedException {
+
+        BasicConfigurator.configure();
+
+        BlockingQueue<TimestampedNotification> notifications = new LinkedBlockingQueue<>();
+        Properties props = createKafkaConfig();
+        KafkaProducer<String, CommandNotification> producer = new KafkaProducer<>(props);
+        registration = new KafkaNotificationRegistrationClient(topic, producer);
+        coord = new PeriodicNotificationCoordinatorExecutor(1, notifications);
+        provider = new KafkaNotificationProvider(topic, new StringDeserializer(), new CommandNotificationSerializer(), props, coord, 1);
+        provider.start();
+
+        registration.addNotification("1", 1, 0, TimeUnit.SECONDS);
+        Thread.sleep(4000);
+        // check that notifications are being added to the blocking queue
+        Assert.assertEquals(true, notifications.size() > 0);
+
+        registration.deleteNotification("1");
+        Thread.sleep(2000);
+        int size = notifications.size();
+        // sleep for 2 seconds to ensure no more messages being produced
+        Thread.sleep(2000);
+        Assert.assertEquals(size, notifications.size());
+        
+        tearDown();
+    }
+
+    @Test
+    public void kafkaNotificationMillisProviderTest() throws InterruptedException {
+
+        BasicConfigurator.configure();
+
+        BlockingQueue<TimestampedNotification> notifications = new LinkedBlockingQueue<>();
+        Properties props = createKafkaConfig();
+        KafkaProducer<String, CommandNotification> producer = new KafkaProducer<>(props);
+        registration = new KafkaNotificationRegistrationClient(topic, producer);
+        coord = new PeriodicNotificationCoordinatorExecutor(1, notifications);
+        provider = new KafkaNotificationProvider(topic, new StringDeserializer(), new CommandNotificationSerializer(), props, coord, 1);
+        provider.start();
+
+        registration.addNotification("1", 1000, 0, TimeUnit.MILLISECONDS);
+        Thread.sleep(4000);
+        // check that notifications are being added to the blocking queue
+        Assert.assertEquals(true, notifications.size() > 0);
+
+        registration.deleteNotification("1");
+        Thread.sleep(2000);
+        int size = notifications.size();
+        // sleep for 2 seconds to ensure no more messages being produced
+        Thread.sleep(2000);
+        Assert.assertEquals(size, notifications.size());
+        
+        tearDown();
+    }
+
+    private void tearDown() {
+        registration.close();
+        provider.stop();
+        coord.stop();
+    }
+
+    private Properties createKafkaConfig() {
+        Properties props = new Properties();
+        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
+        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group0");
+        props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "consumer0");
+        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+        props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
+        props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, CommandNotificationSerializer.class.getName());
+
+        return props;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.integration.tests/src/test/resources/notification.properties
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.integration.tests/src/test/resources/notification.properties b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/resources/notification.properties
new file mode 100644
index 0000000..4b25b93
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.integration.tests/src/test/resources/notification.properties
@@ -0,0 +1,35 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#/
+accumulo.auths=
+accumulo.instance="instance"
+accumulo.user="root"
+accumulo.password="secret"
+accumulo.rya.prefix="rya_"
+accumulo.zookeepers=
+fluo.app.name="fluo_app"
+fluo.table.name="fluo_table"
+kafka.bootstrap.servers=127.0.0.1:9092
+kafka.notification.topic=notifications
+kafka.notification.client.id=consumer0
+kafka.notification.group.id=group0
+cep.coordinator.threads=1
+cep.producer.threads=1
+cep.exporter.threads=1
+cep.processor.threads=1
+cep.pruner.threads=1
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/pom.xml b/extras/rya.periodic.service/periodic.service.notification/pom.xml
new file mode 100644
index 0000000..2173888
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/pom.xml
@@ -0,0 +1,107 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+	<modelVersion>4.0.0</modelVersion>
+	<!-- Licensed to the Apache Software Foundation (ASF) under one or more 
+		contributor license agreements. See the NOTICE file distributed with this 
+		work for additional information regarding copyright ownership. The ASF licenses 
+		this file to you under the Apache License, Version 2.0 (the "License"); you 
+		may not use this file except in compliance with the License. You may obtain 
+		a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
+		required by applicable law or agreed to in writing, software distributed 
+		under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES 
+		OR CONDITIONS OF ANY KIND, either express or implied. See the License for 
+		the specific language governing permissions and limitations under the License. -->
+	<parent>
+		<groupId>org.apache.rya</groupId>
+		<artifactId>rya.periodic.service</artifactId>
+		<version>3.2.11-incubating-SNAPSHOT</version>
+	</parent>
+
+	<artifactId>rya.periodic.service.notification</artifactId>
+	
+	<name>Apache Rya Periodic Service Notification</name>
+    <description>Notifications for Rya Periodic Service</description>
+
+	<dependencies>
+
+		<dependency>
+			<groupId>org.apache.twill</groupId>
+			<artifactId>twill-api</artifactId>
+			<version>0.11.0</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.twill</groupId>
+			<artifactId>twill-yarn</artifactId>
+			<version>0.11.0</version>
+			<exclusions>
+				<exclusion>
+					<artifactId>kafka_2.10</artifactId>
+					<groupId>org.apache.kafka</groupId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<dependency>
+			<groupId>com.google.code.gson</groupId>
+			<artifactId>gson</artifactId>
+			<version>2.8.0</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.fluo</groupId>
+			<artifactId>fluo-api</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.fluo</groupId>
+			<artifactId>fluo-core</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.rya</groupId>
+			<artifactId>rya.indexing</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.openrdf.sesame</groupId>
+			<artifactId>sesame-query</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.rya</groupId>
+			<artifactId>rya.indexing.pcj</artifactId>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.rya</groupId>
+			<artifactId>rya.pcj.fluo.app</artifactId>
+		</dependency>
+	</dependencies>
+
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-compiler-plugin</artifactId>
+				<configuration>
+					<encoding>UTF-8</encoding>
+					<source>1.8</source>
+					<target>1.8</target>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-shade-plugin</artifactId>
+				<version>3.0.0</version>
+				<executions>
+					<execution>
+						<phase>package</phase>
+						<goals>
+							<goal>shade</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+
+
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/BinPruner.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/BinPruner.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/BinPruner.java
new file mode 100644
index 0000000..571ee1c
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/BinPruner.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.api;
+
+import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants;
+import org.openrdf.query.Binding;
+
+/**
+ * Object that cleans up old {@link BindingSet}s corresponding to the specified
+ * {@link NodeBin}. This class deletes all BindingSets with the bin 
+ * indicated by {@link NodeBin#getBin()}.  A BindingSet corresponds to a given
+ * bin if it contains a {@link Binding} with name {@link IncrementalUpdateConstants#PERIODIC_BIN_ID}
+ * and value equal to the given bin.
+ *
+ */
+public interface BinPruner {
+    
+    /**
+     * Cleans up all {@link BindingSet}s associated with the indicated {@link NodeBin}.
+     * @param bin - NodeBin that indicates which BindingSets to delete..
+     */
+    public void pruneBindingSetBin(NodeBin bin);
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/BindingSetExporter.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/BindingSetExporter.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/BindingSetExporter.java
new file mode 100644
index 0000000..500a435
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/BindingSetExporter.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.api;
+
+import org.apache.rya.indexing.pcj.fluo.app.export.IncrementalBindingSetExporter.ResultExportException;
+import org.apache.rya.periodic.notification.exporter.BindingSetRecord;
+
+/**
+ * An Object that is used to export {@link BindingSet}s to an external repository or queuing system.
+ *
+ */
+public interface BindingSetExporter {
+
+    /**
+     * This method exports the BindingSet to the external repository or queuing system
+     * that this BindingSetExporter is configured to export to.
+     * @param bindingSet - {@link BindingSet} to be exported
+     * @throws ResultExportException
+     */
+    public void exportNotification(BindingSetRecord bindingSet) throws ResultExportException;
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/CreatePeriodicQuery.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/CreatePeriodicQuery.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/CreatePeriodicQuery.java
new file mode 100644
index 0000000..7f71b52
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/CreatePeriodicQuery.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.api;
+
+import java.util.Optional;
+
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
+import org.apache.rya.indexing.pcj.fluo.app.query.PeriodicQueryNode;
+import org.apache.rya.indexing.pcj.fluo.app.util.PeriodicQueryUtil;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryStorageException;
+import org.apache.rya.periodic.notification.application.PeriodicNotificationApplication;
+import org.apache.rya.periodic.notification.notification.PeriodicNotification;
+import org.openrdf.query.MalformedQueryException;
+import org.openrdf.query.algebra.evaluation.function.Function;
+
+/**
+ * Object that creates a Periodic Query.  A Periodic Query is any query
+ * requesting periodic updates about events that occurred within a given
+ * window of time of this instant. This is also known as a rolling window
+ * query.  Period Queries can be expressed using SPARQL by including the
+ * {@link Function} indicated by the URI {@link PeriodicQueryUtil#PeriodicQueryURI}
+ * in the query.  The user must provide this Function with the following arguments:
+ * the temporal variable in the query that will be filtered on, the window of time
+ * that events must occur within, the period at which the user wants to receive updates,
+ * and the time unit.  The following query requests all observations that occurred
+ * within the last minute and requests updates every 15 seconds.  It also performs
+ * a count on those observations.
+ * <li>
+ * <li> prefix function: http://org.apache.rya/function#
+ * <li>               "prefix time: http://www.w3.org/2006/time# 
+ * <li>               "select (count(?obs) as ?total) where {
+ * <li>               "Filter(function:periodic(?time, 1, .25, time:minutes))
+ * <li>               "?obs uri:hasTime ?time.
+ * <li>               "?obs uri:hasId ?id }
+ * <li>
+ * 
+ * This class is responsible for taking a Periodic Query expressed as a SPARQL query
+ * and adding to Fluo and Kafka so that it can be processed by the {@link PeriodicNotificationApplication}.
+ */
+public class CreatePeriodicQuery {
+
+    private FluoClient fluoClient;
+    private PeriodicQueryResultStorage periodicStorage;
+    Function funciton;
+    PeriodicQueryUtil util;
+    
+    
+    public CreatePeriodicQuery(FluoClient fluoClient, PeriodicQueryResultStorage periodicStorage) {
+        this.fluoClient = fluoClient;
+        this.periodicStorage = periodicStorage;
+    }
+    
+    /**
+     * Creates a Periodic Query by adding the query to Fluo and using the resulting
+     * Fluo id to create a {@link PeriodicQueryResultStorage} table.
+     * @param sparql - sparql query registered to Fluo whose results are stored in PeriodicQueryResultStorage table
+     * @return PeriodicNotification that can be used to register register this query with the {@link PeriodicNotificationApplication}.
+     */
+    public PeriodicNotification createPeriodicQuery(String sparql) {
+        try {
+            Optional<PeriodicQueryNode> optNode = PeriodicQueryUtil.getPeriodicNode(sparql);
+            if(optNode.isPresent()) {
+                PeriodicQueryNode periodicNode = optNode.get();
+                CreatePcj createPcj = new CreatePcj();
+                String queryId = createPcj.createPcj(sparql, fluoClient);
+                periodicStorage.createPeriodicQuery(queryId, sparql);
+                PeriodicNotification notification = PeriodicNotification.builder().id(queryId).period(periodicNode.getPeriod())
+                        .timeUnit(periodicNode.getUnit()).build();
+                return notification;
+            } else {
+                throw new RuntimeException("Invalid PeriodicQuery.  Query must possess a PeriodicQuery Filter.");
+            }
+        } catch (MalformedQueryException | PeriodicQueryStorageException e) {
+            throw new RuntimeException(e);
+        }
+    }
+    
+    /**
+     * Creates a Periodic Query by adding the query to Fluo and using the resulting
+     * Fluo id to create a {@link PeriodicQueryResultStorage} table.  In addition, this
+     * method registers the PeriodicQuery with the PeriodicNotificationApplication to poll
+     * the PeriodicQueryResultStorage table at regular intervals and export results to Kafka.
+     * The PeriodicNotificationApp queries the result table at a regular interval indicated by the Period of
+     * the PeriodicQuery.
+     * @param sparql - sparql query registered to Fluo whose results are stored in PeriodicQueryResultStorage table
+     * @param PeriodicNotificationClient - registers the PeriodicQuery with the {@link PeriodicNotificationApplication}
+     * @return id of the PeriodicQuery and PeriodicQueryResultStorage table (these are the same)
+     */
+    public String createQueryAndRegisterWithKafka(String sparql, PeriodicNotificationClient periodicClient) {
+        PeriodicNotification notification = createPeriodicQuery(sparql);
+        periodicClient.addNotification(notification);
+        return notification.getId();
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/LifeCycle.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/LifeCycle.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/LifeCycle.java
new file mode 100644
index 0000000..b1e8bad
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/LifeCycle.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.api;
+
+/**
+ * Interface providing basic life cycle functionality,
+ * including stopping and starting any class implementing this
+ * interface and checking whether is it running.
+ *
+ */
+public interface LifeCycle {
+
+    /**
+     * Starts a running application.
+     */
+    public void start();
+
+    /**
+     * Stops a running application.
+     */
+    public void stop();
+    
+    /**
+     * Determine if application is currently running.
+     * @return true if application is running and false otherwise.
+     */
+    public boolean currentlyRunning();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NodeBin.java
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NodeBin.java b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NodeBin.java
new file mode 100644
index 0000000..3ed7979
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.notification/src/main/java/org/apache/rya/periodic/notification/api/NodeBin.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.periodic.notification.api;
+
+import java.util.Objects;
+
+/**
+ * Object used to indicate the id of a given Periodic Query
+ * along with a particular bin of results.  This Object is used
+ * by the {@link BinPruner} to clean up old query results after
+ * they have been processed.
+ *
+ */
+public class NodeBin {
+
+    private long bin;
+    private String nodeId;
+
+    public NodeBin(String nodeId, long bin) {
+        this.bin = bin;
+        this.nodeId = nodeId;
+    }
+
+    /**
+     * @return id of Periodic Query
+     */
+    public String getNodeId() {
+        return nodeId;
+    }
+/**
+ * @return bin id of results for a given Periodic Query 
+ */
+    public long getBin() {
+        return bin;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (this == other) {
+            return true;
+        }
+
+        if (other instanceof NodeBin) {
+            NodeBin bin = (NodeBin) other;
+            return this.bin == bin.bin && this.nodeId.equals(bin.nodeId);
+        }
+
+        return false;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(bin, nodeId);
+    }
+
+    @Override
+    public String toString() {
+        return new StringBuilder().append("Node Bin \n").append("   QueryId: " + nodeId + "\n").append("   Bin: " + bin + "\n").toString();
+    }
+
+}



[4/9] incubator-rya git commit: RYA-280-Periodic Query Service. Closes #177.

Posted by ca...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java
index 05dfd32..f330825 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/InputIT.java
@@ -29,12 +29,12 @@ import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.api.domain.RyaURI;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 import org.openrdf.model.Statement;
 import org.openrdf.model.ValueFactory;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java
index 219e079..ab7610d 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaExportIT.java
@@ -32,9 +32,9 @@ import java.util.UUID;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.rya.indexing.pcj.fluo.KafkaExportITBase;
 import org.apache.rya.indexing.pcj.storage.accumulo.VariableOrder;
 import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.pcj.fluo.test.base.KafkaExportITBase;
 import org.junit.Test;
 import org.openrdf.model.Statement;
 import org.openrdf.model.ValueFactory;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaRyaSubGraphExportIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaRyaSubGraphExportIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaRyaSubGraphExportIT.java
index c8167c7..7a4ed8d 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaRyaSubGraphExportIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/KafkaRyaSubGraphExportIT.java
@@ -46,7 +46,6 @@ import org.apache.rya.api.domain.RyaSubGraph;
 import org.apache.rya.api.domain.RyaURI;
 import org.apache.rya.api.resolver.RdfToRyaConversions;
 import org.apache.rya.indexing.pcj.fluo.ConstructGraphTestUtils;
-import org.apache.rya.indexing.pcj.fluo.KafkaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.fluo.app.export.kafka.KafkaExportParameters;
 import org.apache.rya.indexing.pcj.fluo.app.export.kafka.RyaSubGraphKafkaSerDe;
@@ -57,6 +56,7 @@ import org.apache.rya.indexing.pcj.fluo.app.observers.JoinObserver;
 import org.apache.rya.indexing.pcj.fluo.app.observers.StatementPatternObserver;
 import org.apache.rya.indexing.pcj.fluo.app.observers.TripleObserver;
 import org.apache.rya.indexing.pcj.fluo.app.query.FluoQuery;
+import org.apache.rya.pcj.fluo.test.base.KafkaExportITBase;
 import org.junit.Test;
 import org.openrdf.model.Statement;
 import org.openrdf.model.ValueFactory;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java
index f815a55..85c5030 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/QueryIT.java
@@ -22,20 +22,27 @@ import static java.util.Objects.requireNonNull;
 import static org.junit.Assert.assertEquals;
 
 import java.math.BigDecimal;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
 import java.util.Collection;
 import java.util.HashSet;
+import java.util.Optional;
 import java.util.Set;
 
 import javax.xml.datatype.DatatypeFactory;
 
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.core.client.FluoClientImpl;
 import org.apache.rya.api.client.RyaClient;
-import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
 import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
+import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
+import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
+import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPeriodicQueryResultStorage;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 import org.openrdf.model.Literal;
 import org.openrdf.model.Statement;
@@ -51,6 +58,7 @@ import org.openrdf.query.algebra.evaluation.ValueExprEvaluationException;
 import org.openrdf.query.algebra.evaluation.function.Function;
 import org.openrdf.query.algebra.evaluation.function.FunctionRegistry;
 import org.openrdf.query.impl.MapBindingSet;
+import org.openrdf.repository.RepositoryException;
 import org.openrdf.repository.sail.SailRepositoryConnection;
 
 import com.google.common.collect.Sets;
@@ -60,6 +68,8 @@ import com.google.common.collect.Sets;
  */
 public class QueryIT extends RyaExportITBase {
 
+    private enum ExporterType {Pcj, Periodic};
+    
     @Test
     public void optionalStatements() throws Exception {
         // A query that has optional statement patterns. This query is looking for all
@@ -100,7 +110,7 @@ public class QueryIT extends RyaExportITBase {
         expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        runTest(sparql, statements, expectedResults);
+        runTest(sparql, statements, expectedResults, ExporterType.Pcj);
     }
 
     /**
@@ -181,7 +191,7 @@ public class QueryIT extends RyaExportITBase {
         expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        runTest(sparql, statements, expectedResults);
+        runTest(sparql, statements, expectedResults, ExporterType.Pcj);
     }
 
     @Test
@@ -241,7 +251,7 @@ public class QueryIT extends RyaExportITBase {
         expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        runTest(sparql, statements, expectedResults);
+        runTest(sparql, statements, expectedResults, ExporterType.Pcj);
     }
 
     @Test
@@ -283,7 +293,7 @@ public class QueryIT extends RyaExportITBase {
         expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        runTest(sparql, statements, expectedResults);
+        runTest(sparql, statements, expectedResults, ExporterType.Pcj);
     }
 
     @Test
@@ -368,7 +378,7 @@ public class QueryIT extends RyaExportITBase {
         expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        runTest(sparql, statements, expectedResults);
+        runTest(sparql, statements, expectedResults, ExporterType.Pcj);
     }
 
     @Test
@@ -430,10 +440,295 @@ public class QueryIT extends RyaExportITBase {
         expectedResults.add(bs);
 
         // Verify the end results of the query match the expected results.
-        runTest(sparql, statements, expectedResults);
+        runTest(sparql, statements, expectedResults, ExporterType.Pcj);
     }
+    
+    
+    @Test
+    public void periodicQueryTestWithoutAggregation() throws Exception {
+        String query = "prefix function: <http://org.apache.rya/function#> " //n
+                + "prefix time: <http://www.w3.org/2006/time#> " //n
+                + "select ?id where {" //n
+                + "Filter(function:periodic(?time, 2, .5, time:hours)) " //n
+                + "?obs <uri:hasTime> ?time. " //n
+                + "?obs <uri:hasId> ?id }"; //n
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final DatatypeFactory dtf = DatatypeFactory.newInstance();
+        ZonedDateTime time = ZonedDateTime.now();
+        long currentTime = time.toInstant().toEpochMilli();
+        
+        ZonedDateTime zTime1 = time.minusMinutes(30);
+        String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT);
+        
+        ZonedDateTime zTime2 = zTime1.minusMinutes(30);
+        String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT);
+        
+        ZonedDateTime zTime3 = zTime2.minusMinutes(30);
+        String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT);
+        
+        ZonedDateTime zTime4 = zTime3.minusMinutes(30);
+        String time4 = zTime4.format(DateTimeFormatter.ISO_INSTANT);
+        
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time1))),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time2))),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time3))),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasId"), vf.createLiteral("id_3")),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time4))),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasId"), vf.createLiteral("id_4"))
+                );
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        long period = 1800000;
+        long binId = (currentTime/period)*period;
+        
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 2*period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 3*period));
+        expectedResults.add(bs);
 
-    public void runTest(final String sparql, final Collection<Statement> statements, final Collection<BindingSet> expectedResults) throws Exception {
+        bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 2*period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("id", vf.createLiteral("id_4", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expectedResults.add(bs);
+
+        // Verify the end results of the query match the expected results.
+        runTest(query, statements, expectedResults, ExporterType.Periodic);
+    }
+    
+    
+    @Test
+    public void periodicQueryTestWithAggregation() throws Exception {
+        String query = "prefix function: <http://org.apache.rya/function#> " //n
+                + "prefix time: <http://www.w3.org/2006/time#> " //n
+                + "select (count(?obs) as ?total) where {" //n
+                + "Filter(function:periodic(?time, 2, .5, time:hours)) " //n
+                + "?obs <uri:hasTime> ?time. " //n
+                + "?obs <uri:hasId> ?id }"; //n
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final DatatypeFactory dtf = DatatypeFactory.newInstance();
+        ZonedDateTime time = ZonedDateTime.now();
+        long currentTime = time.toInstant().toEpochMilli();
+        
+        ZonedDateTime zTime1 = time.minusMinutes(30);
+        String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT);
+        
+        ZonedDateTime zTime2 = zTime1.minusMinutes(30);
+        String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT);
+        
+        ZonedDateTime zTime3 = zTime2.minusMinutes(30);
+        String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT);
+        
+        ZonedDateTime zTime4 = zTime3.minusMinutes(30);
+        String time4 = zTime4.format(DateTimeFormatter.ISO_INSTANT);
+        
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time1))),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time2))),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time3))),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasId"), vf.createLiteral("id_3")),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time4))),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasId"), vf.createLiteral("id_4"))
+                );
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        long period = 1800000;
+        long binId = (currentTime/period)*period;
+        
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("4", XMLSchema.INTEGER));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("3", XMLSchema.INTEGER));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 2*period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 3*period));
+        expectedResults.add(bs);
+
+
+        // Verify the end results of the query match the expected results.
+        runTest(query, statements, expectedResults, ExporterType.Periodic);
+    }
+    
+    @Test
+    public void periodicQueryTestWithAggregationAndGroupBy() throws Exception {
+        String query = "prefix function: <http://org.apache.rya/function#> " //n
+                + "prefix time: <http://www.w3.org/2006/time#> " //n
+                + "select ?id (count(?obs) as ?total) where {" //n
+                + "Filter(function:periodic(?time, 2, .5, time:hours)) " //n
+                + "?obs <uri:hasTime> ?time. " //n
+                + "?obs <uri:hasId> ?id } group by ?id"; //n
+
+        // Create the Statements that will be loaded into Rya.
+        final ValueFactory vf = new ValueFactoryImpl();
+        final DatatypeFactory dtf = DatatypeFactory.newInstance();
+        ZonedDateTime time = ZonedDateTime.now();
+        long currentTime = time.toInstant().toEpochMilli();
+        
+        ZonedDateTime zTime1 = time.minusMinutes(30);
+        String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT);
+        
+        ZonedDateTime zTime2 = zTime1.minusMinutes(30);
+        String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT);
+        
+        ZonedDateTime zTime3 = zTime2.minusMinutes(30);
+        String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT);
+        
+        ZonedDateTime zTime4 = zTime3.minusMinutes(30);
+        String time4 = zTime4.format(DateTimeFormatter.ISO_INSTANT);
+        
+        final Collection<Statement> statements = Sets.newHashSet(
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time1))),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time2))),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time3))),
+                vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasId"), vf.createLiteral("id_3")),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time4))),
+                vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasId"), vf.createLiteral("id_4")),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time4))),
+                vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time3))),
+                vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2"))
+                );
+
+        // Create the expected results of the SPARQL query once the PCJ has been computed.
+        final Set<BindingSet> expectedResults = new HashSet<>();
+
+        long period = 1800000;
+        long binId = (currentTime/period)*period;
+        
+        MapBindingSet bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_4", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("2", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_3", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 2*period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_2", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 2*period));
+        expectedResults.add(bs);
+        
+        bs = new MapBindingSet();
+        bs.addBinding("total", vf.createLiteral("1", XMLSchema.INTEGER));
+        bs.addBinding("id", vf.createLiteral("id_1", XMLSchema.STRING));
+        bs.addBinding("periodicBinId", vf.createLiteral(binId + 3*period));
+        expectedResults.add(bs);
+
+        // Verify the end results of the query match the expected results.
+        runTest(query, statements, expectedResults, ExporterType.Periodic);
+    }
+    
+    
+    
+    
+
+    public void runTest(final String sparql, final Collection<Statement> statements, final Collection<BindingSet> expectedResults, ExporterType type ) throws Exception {
         requireNonNull(sparql);
         requireNonNull(statements);
         requireNonNull(expectedResults);
@@ -443,9 +738,38 @@ public class QueryIT extends RyaExportITBase {
 
         final RyaClient ryaClient = AccumuloRyaClientFactory.build(createConnectionDetails(), accumuloConn);
 
-        ryaClient.getCreatePCJ().createPCJ(getRyaInstanceName(), sparql);
-
-        // Write the data to Rya.
+        switch (type) {
+        case Pcj:
+            ryaClient.getCreatePCJ().createPCJ(getRyaInstanceName(), sparql);
+            addStatementsAndWait(statements);
+            // Fetch the value that is stored within the PCJ table.
+            try (final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, getRyaInstanceName())) {
+                final String pcjId = pcjStorage.listPcjs().get(0);
+                final Set<BindingSet> results = Sets.newHashSet(pcjStorage.listResults(pcjId));
+                // Ensure the result of the query matches the expected result.
+                assertEquals(expectedResults, results);
+            }
+            break;
+        case Periodic:
+            PeriodicQueryResultStorage periodicStorage = new AccumuloPeriodicQueryResultStorage(accumuloConn, getRyaInstanceName());
+            String periodicId = periodicStorage.createPeriodicQuery(sparql);
+            try (FluoClient fluo = new FluoClientImpl(super.getFluoConfiguration())) {
+                new CreatePcj().createPcj(periodicId, sparql, fluo);
+            }
+            addStatementsAndWait(statements);
+            final Set<BindingSet> results = Sets.newHashSet();
+            try (CloseableIterator<BindingSet> resultIter = periodicStorage.listResults(periodicId, Optional.empty())) {
+                while (resultIter.hasNext()) {
+                    results.add(resultIter.next());
+                }
+            }
+            assertEquals(expectedResults, results);
+            break;
+        }
+    }
+    
+    private void addStatementsAndWait(final Collection<Statement> statements) throws RepositoryException, Exception {
+     // Write the data to Rya.
         final SailRepositoryConnection ryaConn = super.getRyaSailRepository().getConnection();
         ryaConn.begin();
         ryaConn.add(statements);
@@ -454,14 +778,5 @@ public class QueryIT extends RyaExportITBase {
 
         // Wait for the Fluo application to finish computing the end result.
         super.getMiniFluo().waitForObservers();
-
-        // Fetch the value that is stored within the PCJ table.
-        try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(accumuloConn, getRyaInstanceName())) {
-            final String pcjId = pcjStorage.listPcjs().get(0);
-            final Set<BindingSet> results = Sets.newHashSet( pcjStorage.listResults(pcjId) );
-
-            // Ensure the result of the query matches the expected result.
-            assertEquals(expectedResults, results);
-        }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java
index 9c21afd..12c69ca 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaExportIT.java
@@ -28,11 +28,11 @@ import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.api.domain.RyaURI;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 import org.openrdf.model.ValueFactory;
 import org.openrdf.model.impl.ValueFactoryImpl;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java
index a8d470f..e6d287e 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/RyaInputIncrementalUpdateIT.java
@@ -28,12 +28,12 @@ import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.FluoFactory;
 import org.apache.rya.accumulo.AccumuloRyaDAO;
 import org.apache.rya.indexing.external.PrecomputedJoinIndexer;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.apache.rya.indexing.pcj.update.PrecomputedJoinUpdater;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 import org.openrdf.model.Statement;
 import org.openrdf.model.ValueFactory;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java
index 72759bb..3f51311 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/integration/StreamingTestIT.java
@@ -28,11 +28,11 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.fluo.api.client.FluoClient;
 import org.apache.fluo.api.client.FluoFactory;
 import org.apache.log4j.Logger;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Test;
 import org.openrdf.model.Resource;
 import org.openrdf.model.Statement;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java
index 150492f..ab42e89 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/HistoricStreamingVisibilityIT.java
@@ -32,10 +32,10 @@ import org.apache.rya.api.RdfCloudTripleStoreConfiguration;
 import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.api.resolver.RdfToRyaConversions;
 import org.apache.rya.indexing.accumulo.ConfigUtils;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.junit.Assert;
 import org.junit.Test;
 import org.openrdf.model.Statement;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java
index 46bc7b0..dc2f859 100644
--- a/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java
+++ b/extras/rya.pcj.fluo/pcj.fluo.integration/src/test/java/org/apache/rya/indexing/pcj/fluo/visibility/PcjVisibilityIT.java
@@ -44,19 +44,18 @@ import org.apache.hadoop.io.Text;
 import org.apache.rya.accumulo.AccumuloRdfConfiguration;
 import org.apache.rya.api.RdfCloudTripleStoreConfiguration;
 import org.apache.rya.api.client.RyaClient;
-import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
 import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
 import org.apache.rya.api.domain.RyaStatement;
 import org.apache.rya.api.domain.RyaURI;
 import org.apache.rya.indexing.accumulo.ConfigUtils;
 import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig;
-import org.apache.rya.indexing.pcj.fluo.RyaExportITBase;
 import org.apache.rya.indexing.pcj.fluo.api.CreatePcj;
 import org.apache.rya.indexing.pcj.fluo.api.InsertTriples;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage;
 import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator;
 import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage;
 import org.apache.rya.indexing.pcj.storage.accumulo.PcjTableNameFactory;
+import org.apache.rya.pcj.fluo.test.base.RyaExportITBase;
 import org.apache.rya.rdftriplestore.RyaSailRepository;
 import org.apache.rya.sail.config.RyaSailFactory;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.test.base/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.test.base/pom.xml b/extras/rya.pcj.fluo/pcj.fluo.test.base/pom.xml
new file mode 100644
index 0000000..67bd0f0
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.test.base/pom.xml
@@ -0,0 +1,108 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+  <modelVersion>4.0.0</modelVersion>
+  
+    <parent>
+        <groupId>org.apache.rya</groupId>
+        <artifactId>rya.pcj.fluo.parent</artifactId>
+        <version>3.2.11-incubating-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>rya.pcj.fluo.test.base</artifactId>
+
+    <name>Apache Rya Integration Base</name>
+    <description>Base classes for Integration tests.</description>
+
+    <dependencies>
+        <!-- Rya Runtime Dependencies. -->
+        <dependency>
+            <groupId>org.apache.rya</groupId>
+            <artifactId>rya.api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.rya</groupId>
+            <artifactId>rya.pcj.fluo.api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.rya</groupId>
+            <artifactId>rya.pcj.fluo.client</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.rya</groupId>
+            <artifactId>rya.indexing</artifactId>
+        </dependency>
+         <dependency>
+            <groupId>org.apache.fluo</groupId>
+            <artifactId>fluo-api</artifactId>
+        </dependency>
+
+        <!-- Testing dependencies. -->
+        <dependency>
+            <groupId>org.apache.fluo</groupId>
+            <artifactId>fluo-mini</artifactId>
+            <scope>compile</scope>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <scope>compile</scope>
+        </dependency>
+         <dependency>
+            <groupId>org.apache.fluo</groupId>
+            <artifactId>fluo-api</artifactId>
+        </dependency>
+
+        <dependency>
+          <groupId>org.apache.kafka</groupId>
+          <artifactId>kafka-clients</artifactId>
+          <version>0.10.1.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.kafka</groupId>
+            <artifactId>kafka_2.11</artifactId>
+            <version>0.10.1.0</version>
+            <exclusions>
+                <exclusion>
+                    <artifactId>slf4j-log4j12</artifactId>
+                    <groupId>org.slf4j</groupId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <!-- Testing dependencies. -->
+        <dependency>
+            <groupId>org.apache.kafka</groupId>
+            <artifactId>kafka_2.11</artifactId>
+            <version>0.10.1.0</version>
+            <classifier>test</classifier>
+            <scope>compile</scope>
+            <exclusions>
+                <exclusion>
+                    <artifactId>slf4j-log4j12</artifactId>
+                    <groupId>org.slf4j</groupId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+             <groupId>org.apache.fluo</groupId>
+            <artifactId>fluo-recipes-test</artifactId>
+            <scope>compile</scope>
+        </dependency>
+    </dependencies>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/kafka/base/KafkaITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/kafka/base/KafkaITBase.java b/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/kafka/base/KafkaITBase.java
new file mode 100644
index 0000000..b9be828
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/kafka/base/KafkaITBase.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.base;
+
+import java.nio.file.Files;
+import java.util.Properties;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.junit.After;
+import org.junit.Before;
+
+import kafka.server.KafkaConfig;
+import kafka.server.KafkaServer;
+import kafka.utils.MockTime;
+import kafka.utils.TestUtils;
+import kafka.utils.Time;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+import kafka.zk.EmbeddedZookeeper;
+
+public class KafkaITBase {
+
+    private static final String ZKHOST = "127.0.0.1";
+    private static final String BROKERHOST = "127.0.0.1";
+    private static final String BROKERPORT = "9092";
+    private KafkaServer kafkaServer;
+    private EmbeddedZookeeper zkServer;
+    private ZkClient zkClient;
+    
+    @Before
+    public void setupKafka() throws Exception {
+
+        // Setup Kafka.
+        zkServer = new EmbeddedZookeeper();
+        final String zkConnect = ZKHOST + ":" + zkServer.port();
+        zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
+        ZkUtils.apply(zkClient, false);
+
+        // setup Broker
+        final Properties brokerProps = new Properties();
+        brokerProps.setProperty("zookeeper.connect", zkConnect);
+        brokerProps.setProperty("broker.id", "0");
+        brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
+        brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
+        final KafkaConfig config = new KafkaConfig(brokerProps);
+        final Time mock = new MockTime();
+        kafkaServer = TestUtils.createServer(config, mock);
+    }
+    
+    /**
+     * Close all the Kafka mini server and mini-zookeeper
+     *
+     * @see org.apache.rya.indexing.pcj.fluo.ITBase#shutdownMiniResources()
+     */
+    @After
+    public void teardownKafka() {
+        kafkaServer.shutdown();
+        zkClient.close();
+        zkServer.shutdown();
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/FluoITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/FluoITBase.java b/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/FluoITBase.java
new file mode 100644
index 0000000..32ee962
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/FluoITBase.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.pcj.fluo.test.base;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.net.UnknownHostException;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.rya.accumulo.MiniAccumuloClusterInstance;
+import org.apache.rya.accumulo.MiniAccumuloSingleton;
+import org.apache.rya.accumulo.RyaTestInstanceRule;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloInstall;
+import org.apache.zookeeper.ClientCnxn;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.openrdf.repository.RepositoryConnection;
+import org.openrdf.repository.RepositoryException;
+import org.openrdf.sail.Sail;
+import org.openrdf.sail.SailException;
+
+import org.apache.fluo.api.client.FluoAdmin;
+import org.apache.fluo.api.client.FluoAdmin.AlreadyInitializedException;
+import org.apache.fluo.api.client.FluoClient;
+import org.apache.fluo.api.client.FluoFactory;
+import org.apache.fluo.api.config.FluoConfiguration;
+import org.apache.fluo.api.mini.MiniFluo;
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+import org.apache.rya.api.client.RyaClientException;
+import org.apache.rya.api.client.Install;
+import org.apache.rya.api.client.Install.DuplicateInstanceNameException;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.api.instance.RyaDetailsRepository.RyaDetailsRepositoryException;
+import org.apache.rya.api.persist.RyaDAOException;
+import org.apache.rya.indexing.accumulo.ConfigUtils;
+import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig;
+import org.apache.rya.rdftriplestore.RyaSailRepository;
+import org.apache.rya.rdftriplestore.inference.InferenceEngineException;
+import org.apache.rya.sail.config.RyaSailFactory;
+
+/**
+ * Integration tests that ensure the Fluo application processes PCJs results
+ * correctly.
+ * <p>
+ * This class is being ignored because it doesn't contain any unit tests.
+ */
+public abstract class FluoITBase {
+    private static final Logger log = Logger.getLogger(FluoITBase.class);
+
+    // Mini Accumulo Cluster
+    private static MiniAccumuloClusterInstance clusterInstance = MiniAccumuloSingleton.getInstance();
+    private static MiniAccumuloCluster cluster;
+
+    private static String instanceName = null;
+    private static String zookeepers = null;
+
+    protected static Connector accumuloConn = null;
+
+    // Fluo data store and connections.
+    protected MiniFluo fluo = null;
+    protected FluoConfiguration fluoConfig = null;
+    protected FluoClient fluoClient = null;
+
+    // Rya data store and connections.
+    protected RyaSailRepository ryaRepo = null;
+    protected RepositoryConnection ryaConn = null;
+
+    @Rule
+    public RyaTestInstanceRule testInstance = new RyaTestInstanceRule(false);
+
+    @BeforeClass
+    public static void beforeClass() throws Exception {
+        Logger.getLogger(ClientCnxn.class).setLevel(Level.ERROR);
+
+        // Setup and start the Mini Accumulo.
+        cluster = clusterInstance.getCluster();
+
+        // Store a connector to the Mini Accumulo.
+        instanceName = cluster.getInstanceName();
+        zookeepers = cluster.getZooKeepers();
+
+        final Instance instance = new ZooKeeperInstance(instanceName, zookeepers);
+        accumuloConn = instance.getConnector(clusterInstance.getUsername(), new PasswordToken(clusterInstance.getPassword()));
+    }
+
+    @Before
+    public void setupMiniResources() throws Exception {
+        // Initialize the Mini Fluo that will be used to store created queries.
+        fluoConfig = createFluoConfig();
+        preFluoInitHook();
+        FluoFactory.newAdmin(fluoConfig).initialize(new FluoAdmin.InitializationOptions()
+                .setClearTable(true)
+                .setClearZookeeper(true));
+        postFluoInitHook();
+        fluo = FluoFactory.newMiniFluo(fluoConfig);
+        fluoClient = FluoFactory.newClient(fluo.getClientConfiguration());
+
+        // Initialize the Rya that will be used by the tests.
+        ryaRepo = setupRya();
+        ryaConn = ryaRepo.getConnection();
+    }
+
+    @After
+    public void shutdownMiniResources() {
+        if (ryaConn != null) {
+            try {
+                log.info("Shutting down Rya Connection.");
+                ryaConn.close();
+                log.info("Rya Connection shut down.");
+            } catch (final Exception e) {
+                log.error("Could not shut down the Rya Connection.", e);
+            }
+        }
+
+        if (ryaRepo != null) {
+            try {
+                log.info("Shutting down Rya Repo.");
+                ryaRepo.shutDown();
+                log.info("Rya Repo shut down.");
+            } catch (final Exception e) {
+                log.error("Could not shut down the Rya Repo.", e);
+            }
+        }
+
+        if (fluoClient != null) {
+            try {
+                log.info("Shutting down Fluo Client.");
+                fluoClient.close();
+                log.info("Fluo Client shut down.");
+            } catch (final Exception e) {
+                log.error("Could not shut down the Fluo Client.", e);
+            }
+        }
+
+        if (fluo != null) {
+            try {
+                log.info("Shutting down Mini Fluo.");
+                fluo.close();
+                log.info("Mini Fluo shut down.");
+            } catch (final Exception e) {
+                log.error("Could not shut down the Mini Fluo.", e);
+            }
+        }
+    }
+
+    protected void preFluoInitHook() throws Exception {
+
+    }
+
+    protected void postFluoInitHook() throws Exception {
+
+    }
+
+    protected MiniAccumuloCluster getMiniAccumuloCluster() {
+        return cluster;
+    }
+
+    protected MiniFluo getMiniFluo() {
+        return fluo;
+    }
+
+    public RyaSailRepository getRyaSailRepository() {
+        return ryaRepo;
+    }
+
+    public Connector getAccumuloConnector() {
+        return accumuloConn;
+    }
+
+    public String getRyaInstanceName() {
+        return testInstance.getRyaInstanceName();
+    }
+
+    protected String getUsername() {
+        return clusterInstance.getUsername();
+    }
+
+    protected String getPassword() {
+        return clusterInstance.getPassword();
+    }
+
+    protected FluoConfiguration getFluoConfiguration() {
+        return fluoConfig;
+    }
+
+    public AccumuloConnectionDetails createConnectionDetails() {
+        return new AccumuloConnectionDetails(
+                clusterInstance.getUsername(),
+                clusterInstance.getPassword().toCharArray(),
+                clusterInstance.getInstanceName(),
+                clusterInstance.getZookeepers());
+    }
+
+    private FluoConfiguration createFluoConfig() {
+        // Configure how the mini fluo will run.
+        final FluoConfiguration config = new FluoConfiguration();
+        config.setMiniStartAccumulo(false);
+        config.setAccumuloInstance(instanceName);
+        config.setAccumuloUser(clusterInstance.getUsername());
+        config.setAccumuloPassword(clusterInstance.getPassword());
+        config.setInstanceZookeepers(zookeepers + "/fluo");
+        config.setAccumuloZookeepers(zookeepers);
+
+        config.setApplicationName(getRyaInstanceName());
+        config.setAccumuloTable("fluo" + getRyaInstanceName());
+        return config;
+    }
+
+    /**
+     * Sets up a Rya instance.
+     */
+    protected RyaSailRepository setupRya()
+            throws AccumuloException, AccumuloSecurityException, RepositoryException, RyaDAOException,
+            NumberFormatException, UnknownHostException, InferenceEngineException, AlreadyInitializedException,
+            RyaDetailsRepositoryException, DuplicateInstanceNameException, RyaClientException, SailException {
+        checkNotNull(instanceName);
+        checkNotNull(zookeepers);
+
+        // Setup Rya configuration values.
+        final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
+        conf.setTablePrefix(getRyaInstanceName());
+        conf.setDisplayQueryPlan(true);
+        conf.setBoolean(ConfigUtils.USE_MOCK_INSTANCE, false);
+        conf.set(ConfigUtils.CLOUDBASE_USER, clusterInstance.getUsername());
+        conf.set(ConfigUtils.CLOUDBASE_PASSWORD, clusterInstance.getPassword());
+        conf.set(ConfigUtils.CLOUDBASE_INSTANCE, clusterInstance.getInstanceName());
+        conf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, clusterInstance.getZookeepers());
+        conf.set(ConfigUtils.USE_PCJ, "true");
+        conf.set(ConfigUtils.FLUO_APP_NAME, getRyaInstanceName());
+        conf.set(ConfigUtils.PCJ_STORAGE_TYPE, PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType.ACCUMULO.toString());
+        conf.set(ConfigUtils.PCJ_UPDATER_TYPE, PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType.FLUO.toString());
+        conf.set(ConfigUtils.CLOUDBASE_AUTHS, "");
+
+        // Install the test instance of Rya.
+        final Install install = new AccumuloInstall(createConnectionDetails(), accumuloConn);
+
+        final InstallConfiguration installConfig = InstallConfiguration.builder()
+                .setEnableTableHashPrefix(true)
+                .setEnableEntityCentricIndex(true)
+                .setEnableFreeTextIndex(true)
+                .setEnableTemporalIndex(true)
+                .setEnablePcjIndex(true)
+                .setEnableGeoIndex(true)
+                .setFluoPcjAppName(getRyaInstanceName())
+                .build();
+        install.install(getRyaInstanceName(), installConfig);
+
+        // Connect to the instance of Rya that was just installed.
+        final Sail sail = RyaSailFactory.getInstance(conf);
+        final RyaSailRepository ryaRepo = new RyaSailRepository(sail);
+
+        return ryaRepo;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/KafkaExportITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/KafkaExportITBase.java b/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/KafkaExportITBase.java
new file mode 100644
index 0000000..85da422
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/KafkaExportITBase.java
@@ -0,0 +1,370 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.pcj.fluo.test.base;
+
+import static java.util.Objects.requireNonNull;
+import static org.junit.Assert.assertEquals;
+
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.fluo.api.config.ObserverSpecification;
+import org.apache.fluo.recipes.test.AccumuloExportITBase;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+import org.apache.rya.accumulo.AccumuloRyaDAO;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.indexing.accumulo.ConfigUtils;
+import org.apache.rya.indexing.external.PrecomputedJoinIndexerConfig;
+import org.apache.rya.indexing.pcj.fluo.app.export.kafka.KafkaExportParameters;
+import org.apache.rya.indexing.pcj.fluo.app.export.kafka.RyaSubGraphKafkaSerDe;
+import org.apache.rya.indexing.pcj.fluo.app.observers.AggregationObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.ConstructQueryResultObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.FilterObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.JoinObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.QueryResultObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.StatementPatternObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.TripleObserver;
+import org.apache.rya.indexing.pcj.storage.accumulo.VisibilityBindingSet;
+import org.apache.rya.rdftriplestore.RyaSailRepository;
+import org.apache.rya.sail.config.RyaSailFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.openrdf.model.Statement;
+import org.openrdf.repository.sail.SailRepositoryConnection;
+import org.openrdf.sail.Sail;
+
+
+import kafka.admin.AdminUtils;
+import kafka.admin.RackAwareMode;
+import kafka.server.KafkaConfig;
+import kafka.server.KafkaServer;
+import kafka.utils.MockTime;
+import kafka.utils.TestUtils;
+import kafka.utils.Time;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+import kafka.zk.EmbeddedZookeeper;
+
+/**
+ * The base Integration Test class used for Fluo applications that export to a
+ * Kakfa topic.
+ */
+public class KafkaExportITBase extends AccumuloExportITBase {
+
+    protected static final String RYA_INSTANCE_NAME = "test_";
+
+    private static final String ZKHOST = "127.0.0.1";
+    private static final String BROKERHOST = "127.0.0.1";
+    private static final String BROKERPORT = "9092";
+    private ZkUtils zkUtils;
+    private KafkaServer kafkaServer;
+    private EmbeddedZookeeper zkServer;
+    private ZkClient zkClient;
+
+    // The Rya instance statements are written to that will be fed into the Fluo
+    // app.
+    private RyaSailRepository ryaSailRepo = null;
+    private AccumuloRyaDAO dao = null;
+
+    /**
+     * Add info about the Kafka queue/topic to receive the export.
+     */
+    @Override
+    protected void preFluoInitHook() throws Exception {
+        // Setup the observers that will be used by the Fluo PCJ Application.
+        final List<ObserverSpecification> observers = new ArrayList<>();
+        observers.add(new ObserverSpecification(TripleObserver.class.getName()));
+        observers.add(new ObserverSpecification(StatementPatternObserver.class.getName()));
+        observers.add(new ObserverSpecification(JoinObserver.class.getName()));
+        observers.add(new ObserverSpecification(FilterObserver.class.getName()));
+        observers.add(new ObserverSpecification(AggregationObserver.class.getName()));
+
+        // Configure the export observer to export new PCJ results to the mini
+        // accumulo cluster.
+        final HashMap<String, String> exportParams = new HashMap<>();
+
+        final KafkaExportParameters kafkaParams = new KafkaExportParameters(exportParams);
+        kafkaParams.setExportToKafka(true);
+
+        // Configure the Kafka Producer
+        final Properties producerConfig = new Properties();
+        producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
+        producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
+        producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
+                "org.apache.rya.indexing.pcj.fluo.app.export.kafka.KryoVisibilityBindingSetSerializer");
+        kafkaParams.addAllProducerConfig(producerConfig);
+
+        final ObserverSpecification exportObserverConfig = new ObserverSpecification(QueryResultObserver.class.getName(), exportParams);
+        observers.add(exportObserverConfig);
+        
+        //create construct query observer and tell it not to export to Kafka
+        //it will only add results back into Fluo
+        HashMap<String, String> constructParams = new HashMap<>();
+        final KafkaExportParameters kafkaConstructParams = new KafkaExportParameters(constructParams);
+        kafkaConstructParams.setExportToKafka(true);
+        
+        // Configure the Kafka Producer
+        final Properties constructProducerConfig = new Properties();
+        constructProducerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
+        constructProducerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
+        constructProducerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, RyaSubGraphKafkaSerDe.class.getName());
+        kafkaConstructParams.addAllProducerConfig(constructProducerConfig);
+
+        final ObserverSpecification constructExportObserverConfig = new ObserverSpecification(ConstructQueryResultObserver.class.getName(),
+                constructParams);
+        observers.add(constructExportObserverConfig);
+
+        // Add the observers to the Fluo Configuration.
+        super.getFluoConfiguration().addObservers(observers);
+    }
+
+    /**
+     * setup mini kafka and call the super to setup mini fluo
+     */
+    @Before
+    public void setupKafka() throws Exception {
+        // Install an instance of Rya on the Accumulo cluster.
+        installRyaInstance();
+
+        // Setup Kafka.
+        zkServer = new EmbeddedZookeeper();
+        final String zkConnect = ZKHOST + ":" + zkServer.port();
+        zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
+        zkUtils = ZkUtils.apply(zkClient, false);
+
+        // setup Broker
+        final Properties brokerProps = new Properties();
+        brokerProps.setProperty("zookeeper.connect", zkConnect);
+        brokerProps.setProperty("broker.id", "0");
+        brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
+        brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
+        final KafkaConfig config = new KafkaConfig(brokerProps);
+        final Time mock = new MockTime();
+        kafkaServer = TestUtils.createServer(config, mock);
+    }
+
+    @After
+    public void teardownRya() {
+        final MiniAccumuloCluster cluster = getMiniAccumuloCluster();
+        final String instanceName = cluster.getInstanceName();
+        final String zookeepers = cluster.getZooKeepers();
+
+        // Uninstall the instance of Rya.
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
+                new AccumuloConnectionDetails(ACCUMULO_USER, ACCUMULO_PASSWORD.toCharArray(), instanceName, zookeepers),
+                super.getAccumuloConnector());
+
+        try {
+            ryaClient.getUninstall().uninstall(RYA_INSTANCE_NAME);
+            // Shutdown the repo.
+            if(ryaSailRepo != null) {ryaSailRepo.shutDown();}
+            if(dao != null ) {dao.destroy();}
+        } catch (Exception e) {
+            System.out.println("Encountered the following Exception when shutting down Rya: " + e.getMessage());
+        }
+    }
+
+    private void installRyaInstance() throws Exception {
+        final MiniAccumuloCluster cluster = super.getMiniAccumuloCluster();
+        final String instanceName = cluster.getInstanceName();
+        final String zookeepers = cluster.getZooKeepers();
+
+        // Install the Rya instance to the mini accumulo cluster.
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(
+                new AccumuloConnectionDetails(ACCUMULO_USER, ACCUMULO_PASSWORD.toCharArray(), instanceName, zookeepers),
+                super.getAccumuloConnector());
+
+        ryaClient.getInstall().install(RYA_INSTANCE_NAME,
+                InstallConfiguration.builder().setEnableTableHashPrefix(false).setEnableFreeTextIndex(false)
+                        .setEnableEntityCentricIndex(false).setEnableGeoIndex(false).setEnableTemporalIndex(false).setEnablePcjIndex(true)
+                        .setFluoPcjAppName(super.getFluoConfiguration().getApplicationName()).build());
+
+        // Connect to the Rya instance that was just installed.
+        final AccumuloRdfConfiguration conf = makeConfig(instanceName, zookeepers);
+        final Sail sail = RyaSailFactory.getInstance(conf);
+        dao = RyaSailFactory.getAccumuloDAOWithUpdatedConfig(conf);
+        ryaSailRepo = new RyaSailRepository(sail);
+    }
+
+    protected AccumuloRdfConfiguration makeConfig(final String instanceName, final String zookeepers) {
+        final AccumuloRdfConfiguration conf = new AccumuloRdfConfiguration();
+        conf.setTablePrefix(RYA_INSTANCE_NAME);
+
+        // Accumulo connection information.
+        conf.setAccumuloUser(AccumuloExportITBase.ACCUMULO_USER);
+        conf.setAccumuloPassword(AccumuloExportITBase.ACCUMULO_PASSWORD);
+        conf.setAccumuloInstance(super.getAccumuloConnector().getInstance().getInstanceName());
+        conf.setAccumuloZookeepers(super.getAccumuloConnector().getInstance().getZooKeepers());
+        conf.setAuths("");
+
+        // PCJ configuration information.
+        conf.set(ConfigUtils.USE_PCJ, "true");
+        conf.set(ConfigUtils.USE_PCJ_UPDATER_INDEX, "true");
+        conf.set(ConfigUtils.FLUO_APP_NAME, super.getFluoConfiguration().getApplicationName());
+        conf.set(ConfigUtils.PCJ_STORAGE_TYPE, PrecomputedJoinIndexerConfig.PrecomputedJoinStorageType.ACCUMULO.toString());
+        conf.set(ConfigUtils.PCJ_UPDATER_TYPE, PrecomputedJoinIndexerConfig.PrecomputedJoinUpdaterType.FLUO.toString());
+
+        conf.setDisplayQueryPlan(true);
+
+        return conf;
+    }
+
+    /**
+     * @return A {@link RyaSailRepository} that is connected to the Rya instance
+     *         that statements are loaded into.
+     */
+    protected RyaSailRepository getRyaSailRepository() throws Exception {
+        return ryaSailRepo;
+    }
+
+    /**
+     * @return A {@link AccumuloRyaDAO} so that RyaStatements with distinct
+     *         visibilities can be added to the Rya Instance
+     */
+    protected AccumuloRyaDAO getRyaDAO() {
+        return dao;
+    }
+
+    /**
+     * Close all the Kafka mini server and mini-zookeeper
+     */
+    @After
+    public void teardownKafka() {
+        if(kafkaServer != null) {kafkaServer.shutdown();}
+        if(zkClient != null) {zkClient.close();}
+        if(zkServer != null) {zkServer.shutdown();}
+    }
+
+    /**
+     * Test kafka without rya code to make sure kafka works in this environment.
+     * If this test fails then its a testing environment issue, not with Rya.
+     * Source: https://github.com/asmaier/mini-kafka
+     */
+    @Test
+    public void embeddedKafkaTest() throws Exception {
+        // create topic
+        final String topic = "testTopic";
+        AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
+
+        // setup producer
+        final Properties producerProps = new Properties();
+        producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
+        producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
+        producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
+        final KafkaProducer<Integer, byte[]> producer = new KafkaProducer<>(producerProps);
+
+        // setup consumer
+        final Properties consumerProps = new Properties();
+        consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
+        consumerProps.setProperty("group.id", "group0");
+        consumerProps.setProperty("client.id", "consumer0");
+        consumerProps.setProperty("key.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer");
+        consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
+
+        // to make sure the consumer starts from the beginning of the topic
+        consumerProps.put("auto.offset.reset", "earliest");
+
+        final KafkaConsumer<Integer, byte[]> consumer = new KafkaConsumer<>(consumerProps);
+        consumer.subscribe(Arrays.asList(topic));
+
+        // send message
+        final ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(topic, 42, "test-message".getBytes(StandardCharsets.UTF_8));
+        producer.send(data);
+        producer.close();
+
+        // starting consumer
+        final ConsumerRecords<Integer, byte[]> records = consumer.poll(3000);
+        assertEquals(1, records.count());
+        final Iterator<ConsumerRecord<Integer, byte[]>> recordIterator = records.iterator();
+        final ConsumerRecord<Integer, byte[]> record = recordIterator.next();
+        assertEquals(42, (int) record.key());
+        assertEquals("test-message", new String(record.value(), StandardCharsets.UTF_8));
+        consumer.close();
+    }
+
+    protected KafkaConsumer<Integer, VisibilityBindingSet> makeConsumer(final String TopicName) {
+        // setup consumer
+        final Properties consumerProps = new Properties();
+        consumerProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
+        consumerProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group0");
+        consumerProps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "consumer0");
+        consumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
+                "org.apache.kafka.common.serialization.IntegerDeserializer");
+        consumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
+                "org.apache.rya.indexing.pcj.fluo.app.export.kafka.KryoVisibilityBindingSetSerializer");
+
+        // to make sure the consumer starts from the beginning of the topic
+        consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+
+        final KafkaConsumer<Integer, VisibilityBindingSet> consumer = new KafkaConsumer<>(consumerProps);
+        consumer.subscribe(Arrays.asList(TopicName));
+        return consumer;
+    }
+
+    protected String loadData(final String sparql, final Collection<Statement> statements) throws Exception {
+        requireNonNull(sparql);
+        requireNonNull(statements);
+
+        // Register the PCJ with Rya.
+        final Instance accInstance = super.getAccumuloConnector().getInstance();
+        final Connector accumuloConn = super.getAccumuloConnector();
+
+        final RyaClient ryaClient = AccumuloRyaClientFactory.build(new AccumuloConnectionDetails(ACCUMULO_USER,
+                ACCUMULO_PASSWORD.toCharArray(), accInstance.getInstanceName(), accInstance.getZooKeepers()), accumuloConn);
+
+        final String pcjId = ryaClient.getCreatePCJ().createPCJ(RYA_INSTANCE_NAME, sparql);
+
+        // Write the data to Rya.
+        final SailRepositoryConnection ryaConn = getRyaSailRepository().getConnection();
+        ryaConn.begin();
+        ryaConn.add(statements);
+        ryaConn.commit();
+        ryaConn.close();
+
+        // Wait for the Fluo application to finish computing the end result.
+        super.getMiniFluo().waitForObservers();
+
+        // The PCJ Id is the topic name the results will be written to.
+        return pcjId;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/RyaExportITBase.java
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/RyaExportITBase.java b/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/RyaExportITBase.java
new file mode 100644
index 0000000..6feadff
--- /dev/null
+++ b/extras/rya.pcj.fluo/pcj.fluo.test.base/src/main/java/org/apache/rya/pcj/fluo/test/base/RyaExportITBase.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.pcj.fluo.test.base;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.fluo.api.config.ObserverSpecification;
+import org.apache.log4j.BasicConfigurator;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.rya.indexing.pcj.fluo.app.batch.BatchObserver;
+import org.apache.rya.indexing.pcj.fluo.app.export.rya.RyaExportParameters;
+import org.apache.rya.indexing.pcj.fluo.app.observers.AggregationObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.FilterObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.JoinObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.PeriodicQueryObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.QueryResultObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.StatementPatternObserver;
+import org.apache.rya.indexing.pcj.fluo.app.observers.TripleObserver;
+import org.junit.BeforeClass;
+
+/**
+ * The base Integration Test class used for Fluo applications that export to a Rya PCJ Index.
+ */
+public class RyaExportITBase extends FluoITBase {
+
+    @BeforeClass
+    public static void setupLogging() {
+        BasicConfigurator.configure();
+        Logger.getRootLogger().setLevel(Level.ERROR);
+    }
+
+    @Override
+    protected void preFluoInitHook() throws Exception {
+        // Setup the observers that will be used by the Fluo PCJ Application.
+        final List<ObserverSpecification> observers = new ArrayList<>();
+        observers.add(new ObserverSpecification(BatchObserver.class.getName()));
+        observers.add(new ObserverSpecification(TripleObserver.class.getName()));
+        observers.add(new ObserverSpecification(StatementPatternObserver.class.getName()));
+        observers.add(new ObserverSpecification(JoinObserver.class.getName()));
+        observers.add(new ObserverSpecification(FilterObserver.class.getName()));
+        observers.add(new ObserverSpecification(AggregationObserver.class.getName()));
+        observers.add(new ObserverSpecification(PeriodicQueryObserver.class.getName()));
+
+        // Configure the export observer to export new PCJ results to the mini accumulo cluster.
+        final HashMap<String, String> exportParams = new HashMap<>();
+        final RyaExportParameters ryaParams = new RyaExportParameters(exportParams);
+        ryaParams.setExportToRya(true);
+        ryaParams.setRyaInstanceName(getRyaInstanceName());
+        ryaParams.setAccumuloInstanceName(super.getMiniAccumuloCluster().getInstanceName());
+        ryaParams.setZookeeperServers(super.getMiniAccumuloCluster().getZooKeepers());
+        ryaParams.setExporterUsername(getUsername());
+        ryaParams.setExporterPassword(getPassword());
+
+        final ObserverSpecification exportObserverConfig = new ObserverSpecification(QueryResultObserver.class.getName(), exportParams);
+        observers.add(exportObserverConfig);
+
+        // Add the observers to the Fluo Configuration.
+        super.getFluoConfiguration().addObservers(observers);
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.pcj.fluo/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.pcj.fluo/pom.xml b/extras/rya.pcj.fluo/pom.xml
index 54a22fc..6979768 100644
--- a/extras/rya.pcj.fluo/pom.xml
+++ b/extras/rya.pcj.fluo/pom.xml
@@ -38,6 +38,7 @@
         <module>pcj.fluo.app</module>
         <module>pcj.fluo.client</module>
         <module>pcj.fluo.integration</module>
+        <module>pcj.fluo.test.base</module>
         <module>pcj.fluo.demo</module>
     </modules>
     <profiles>

http://git-wip-us.apache.org/repos/asf/incubator-rya/blob/2ca85427/extras/rya.periodic.service/periodic.service.integration.tests/pom.xml
----------------------------------------------------------------------
diff --git a/extras/rya.periodic.service/periodic.service.integration.tests/pom.xml b/extras/rya.periodic.service/periodic.service.integration.tests/pom.xml
new file mode 100644
index 0000000..bcd60aa
--- /dev/null
+++ b/extras/rya.periodic.service/periodic.service.integration.tests/pom.xml
@@ -0,0 +1,77 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+	<!-- Licensed to the Apache Software Foundation (ASF) under one or more 
+		contributor license agreements. See the NOTICE file distributed with this 
+		work for additional information regarding copyright ownership. The ASF licenses 
+		this file to you under the Apache License, Version 2.0 (the "License"); you 
+		may not use this file except in compliance with the License. You may obtain 
+		a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
+		required by applicable law or agreed to in writing, software distributed 
+		under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES 
+		OR CONDITIONS OF ANY KIND, either express or implied. See the License for 
+		the specific language governing permissions and limitations under the License. -->
+	<modelVersion>4.0.0</modelVersion>
+
+	<parent>
+		<groupId>org.apache.rya</groupId>
+		<artifactId>rya.periodic.service</artifactId>
+		<version>3.2.11-incubating-SNAPSHOT</version>
+	</parent>
+
+	<artifactId>rya.periodic.service.integration.tests</artifactId>
+	
+	<name>Apache Rya Periodic Service Integration Tests</name>
+    <description>Integration Tests for Rya Periodic Service</description>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.rya</groupId>
+			<artifactId>rya.pcj.fluo.test.base</artifactId>
+			<version>${project.version}</version>
+			<exclusions>
+				<exclusion>
+					<artifactId>log4j-1.2-api</artifactId>
+					<groupId>org.apache.logging.log4j</groupId>
+				</exclusion>
+				<exclusion>
+					<artifactId>log4j-api</artifactId>
+					<groupId>org.apache.logging.log4j</groupId>
+				</exclusion>
+				<exclusion>
+					<artifactId>log4j-core</artifactId>
+					<groupId>org.apache.logging.log4j</groupId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.rya</groupId>
+			<artifactId>rya.periodic.service.notification</artifactId>
+			<version>${project.version}</version>
+			<exclusions>
+				<exclusion>
+					<artifactId>logback-classic</artifactId>
+					<groupId>ch.qos.logback</groupId>
+				</exclusion>
+				<exclusion>
+					<artifactId>logback-core</artifactId>
+					<groupId>ch.qos.logback</groupId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+	</dependencies>
+
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-compiler-plugin</artifactId>
+				<configuration>
+					<encoding>UTF-8</encoding>
+					<source>1.8</source>
+					<target>1.8</target>
+				</configuration>
+			</plugin>
+		</plugins>
+	</build>
+
+</project>
\ No newline at end of file