You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/01/27 20:23:29 UTC

[28/51] [partial] Initial commit

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/expression/visitor/KeyValueExpressionVisitor.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/expression/visitor/KeyValueExpressionVisitor.java b/src/main/java/org/apache/phoenix/expression/visitor/KeyValueExpressionVisitor.java
new file mode 100644
index 0000000..838bb0b
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/expression/visitor/KeyValueExpressionVisitor.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.visitor;
+
+import org.apache.phoenix.expression.KeyValueColumnExpression;
+
+
+
+
+/**
+ * 
+ * Implementation of ExpressionVisitor where only KeyValueDataAccessor
+ * is being visited
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public abstract class KeyValueExpressionVisitor extends TraverseAllExpressionVisitor<Void> {
+    @Override
+    abstract public Void visit(KeyValueColumnExpression node);
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/expression/visitor/SingleAggregateFunctionVisitor.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/expression/visitor/SingleAggregateFunctionVisitor.java b/src/main/java/org/apache/phoenix/expression/visitor/SingleAggregateFunctionVisitor.java
new file mode 100644
index 0000000..63313a7
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/expression/visitor/SingleAggregateFunctionVisitor.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.visitor;
+
+import java.util.Iterator;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.function.SingleAggregateFunction;
+
+
+
+/**
+ * 
+ * Implementation of ExpressionVisitor where only SingleAggregateFunction
+ * instances are visited
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public abstract class SingleAggregateFunctionVisitor extends TraverseAllExpressionVisitor<Void> {
+    @Override
+    abstract public Iterator<Expression> visitEnter(SingleAggregateFunction node);
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/expression/visitor/TraverseAllExpressionVisitor.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/expression/visitor/TraverseAllExpressionVisitor.java b/src/main/java/org/apache/phoenix/expression/visitor/TraverseAllExpressionVisitor.java
new file mode 100644
index 0000000..39e7a3d
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/expression/visitor/TraverseAllExpressionVisitor.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.visitor;
+
+import java.util.*;
+
+import org.apache.phoenix.expression.Expression;
+
+
+
+
+public class TraverseAllExpressionVisitor<E> extends BaseExpressionVisitor<E> {
+
+    @Override
+    public Iterator<Expression> defaultIterator(Expression node) {
+        final List<Expression> children = node.getChildren();
+        return new Iterator<Expression>() {
+            private int position;
+            
+            @Override
+            public final boolean hasNext() {
+                return position < children.size();
+            }
+
+            @Override
+            public final Expression next() {
+                if (!hasNext()) {
+                    throw new NoSuchElementException();
+                }
+                return children.get(position++);
+            }
+
+            @Override
+            public final void remove() {
+                throw new UnsupportedOperationException();
+            }
+        };
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/expression/visitor/TraverseNoExpressionVisitor.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/expression/visitor/TraverseNoExpressionVisitor.java b/src/main/java/org/apache/phoenix/expression/visitor/TraverseNoExpressionVisitor.java
new file mode 100644
index 0000000..fe1d1f5
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/expression/visitor/TraverseNoExpressionVisitor.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.visitor;
+
+import java.util.Iterator;
+
+
+import com.google.common.collect.Iterators;
+import org.apache.phoenix.expression.Expression;
+
+public class TraverseNoExpressionVisitor<E> extends BaseExpressionVisitor<E> {
+
+    @Override
+    public Iterator<Expression> defaultIterator(Expression node) {
+        return Iterators.emptyIterator();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java b/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
new file mode 100644
index 0000000..42a85a6
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.filter.FilterBase;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.ExpressionType;
+import org.apache.phoenix.schema.IllegalDataException;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.ServerUtil;
+
+
+/**
+ * 
+ * Base class for filter that evaluates a WHERE clause expression.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+abstract public class BooleanExpressionFilter extends FilterBase {
+
+    protected Expression expression;
+    protected boolean evaluateOnCompletion;
+    private ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
+    
+    public BooleanExpressionFilter() {
+    }
+
+    public BooleanExpressionFilter(Expression expression) {
+        this.expression = expression;
+    }
+
+    protected void setEvaluateOnCompletion(boolean evaluateOnCompletion) {
+        this.evaluateOnCompletion = evaluateOnCompletion;
+    }
+    
+    protected boolean evaluateOnCompletion() {
+        return evaluateOnCompletion;
+    }
+    
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + expression.hashCode();
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        BooleanExpressionFilter other = (BooleanExpressionFilter)obj;
+        if (!expression.equals(other.expression)) return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return expression.toString();
+    }
+
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(
+            value="NP_BOOLEAN_RETURN_NULL",
+            justification="Returns null by design.")
+    protected Boolean evaluate(Tuple input) {
+        try {
+            if (!expression.evaluate(input, tempPtr)) {
+                return null;
+            }
+        } catch (IllegalDataException e) {
+            return Boolean.FALSE;
+        }
+        return (Boolean)expression.getDataType().toObject(tempPtr);
+    }
+
+    @Override
+    public void readFields(DataInput input) throws IOException {
+        try {
+            expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
+            expression.readFields(input);
+        } catch (Throwable t) {
+            ServerUtil.throwIOException("BooleanExpressionFilter failed to read fields. Ensure client and server are compatible", t);
+        }
+    }
+
+    @Override
+    public void write(DataOutput output) throws IOException {
+        try {
+            WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
+            expression.write(output);
+        } catch (Throwable t) {
+            ServerUtil.throwIOException("BooleanExpressionFilter failed to write fields. Ensure client and server are compatible", t);
+        }
+    }
+    
+    @Override
+    public void reset() {
+        expression.reset();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/EvaluateOnCompletionVisitor.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/EvaluateOnCompletionVisitor.java b/src/main/java/org/apache/phoenix/filter/EvaluateOnCompletionVisitor.java
new file mode 100644
index 0000000..a5b7ed6
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/EvaluateOnCompletionVisitor.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import java.util.Iterator;
+
+import org.apache.phoenix.expression.CaseExpression;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.IsNullExpression;
+import org.apache.phoenix.expression.RowKeyColumnExpression;
+import org.apache.phoenix.expression.RowValueConstructorExpression;
+import org.apache.phoenix.expression.visitor.TraverseAllExpressionVisitor;
+
+
+/**
+ * 
+ * Implementation of ExpressionVisitor for the expression used by the
+ * BooleanExpressionFilter that looks for expressions that need to be
+ * evaluated upon completion. Examples include:
+ * - CaseExpression with an else clause, since upon completion, the
+ * else clause would apply if the when clauses could not be evaluated
+ * due to the absense of a value.
+ * - IsNullExpression that's not negated, since upon completion, we
+ * know definitively that a column value was not found.
+ * - row key columns are used, since we may never have encountered a
+ * key value column of interest, but the expression may evaluate to true
+ * just based on the row key columns.
+ * @author jtaylor
+ * @since 0.1
+ */
+public class EvaluateOnCompletionVisitor extends TraverseAllExpressionVisitor<Void> {
+    private boolean evaluateOnCompletion = false;
+    
+    public boolean evaluateOnCompletion() {
+        return evaluateOnCompletion;
+    }
+    
+    @Override
+    public Iterator<Expression> visitEnter(IsNullExpression node) {
+        evaluateOnCompletion |= !node.isNegate();
+        return null;
+    }
+    @Override
+    public Iterator<Expression> visitEnter(CaseExpression node) {
+        evaluateOnCompletion |= node.hasElse();
+        return null;
+    }
+    @Override
+    public Void visit(RowKeyColumnExpression node) {
+        evaluateOnCompletion = true;
+        return null;
+    }
+    @Override
+    public Iterator<Expression> visitEnter(RowValueConstructorExpression node) {
+        evaluateOnCompletion = true;
+        return null;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java b/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
new file mode 100644
index 0000000..27a356b
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.phoenix.expression.Expression;
+
+
+/**
+ *
+ * Filter that evaluates WHERE clause expression, used in the case where there
+ * are references to multiple column qualifiers over multiple column families.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public class MultiCFCQKeyValueComparisonFilter extends MultiKeyValueComparisonFilter {
+    private final ImmutablePairBytesPtr ptr = new ImmutablePairBytesPtr();
+    private TreeSet<byte[]> cfSet;
+
+    public MultiCFCQKeyValueComparisonFilter() {
+    }
+
+    public MultiCFCQKeyValueComparisonFilter(Expression expression) {
+        super(expression);
+    }
+
+    @Override
+    protected void init() {
+        cfSet = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+        super.init();
+    }
+
+    @Override
+    protected Object setColumnKey(byte[] cf, int cfOffset, int cfLength,
+            byte[] cq, int cqOffset, int cqLength) {
+        ptr.set(cf, cfOffset, cfLength, cq, cqOffset, cqLength);
+        return ptr;
+    }
+
+    @Override
+    protected Object newColumnKey(byte[] cf, int cfOffset, int cfLength,
+            byte[] cq, int cqOffset, int cqLength) {
+
+        byte[] cfKey;
+        if (cfOffset == 0 && cf.length == cfLength) {
+            cfKey = cf;
+        } else {
+            // Copy bytes here, but figure cf names are typically a few bytes at most,
+            // so this will be better than creating an ImmutableBytesPtr
+            cfKey = new byte[cfLength];
+            System.arraycopy(cf, cfOffset, cfKey, 0, cfLength);
+        }
+        cfSet.add(cfKey);
+        return new ImmutablePairBytesPtr(cf, cfOffset, cfLength, cq, cqOffset, cqLength);
+    }
+
+    private static class ImmutablePairBytesPtr {
+        private byte[] bytes1;
+        private int offset1;
+        private int length1;
+        private byte[] bytes2;
+        private int offset2;
+        private int length2;
+        private int hashCode;
+
+        private ImmutablePairBytesPtr() {
+        }
+
+        private ImmutablePairBytesPtr(byte[] bytes1, int offset1, int length1, byte[] bytes2, int offset2, int length2) {
+            set(bytes1, offset1, length1, bytes2, offset2, length2);
+        }
+
+        @Override
+        public int hashCode() {
+            return hashCode;
+        }
+
+        public void set(byte[] bytes1, int offset1, int length1, byte[] bytes2, int offset2, int length2) {
+            this.bytes1 = bytes1;
+            this.offset1 = offset1;
+            this.length1 = length1;
+            this.bytes2 = bytes2;
+            this.offset2 = offset2;
+            this.length2 = length2;
+            int hash = 1;
+            for (int i = offset1; i < offset1 + length1; i++)
+                hash = (31 * hash) + bytes1[i];
+            for (int i = offset2; i < offset2 + length2; i++)
+                hash = (31 * hash) + bytes2[i];
+            hashCode = hash;
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (this == obj) return true;
+            if (obj == null) return false;
+            if (getClass() != obj.getClass()) return false;
+            ImmutablePairBytesPtr that = (ImmutablePairBytesPtr)obj;
+            if (this.hashCode != that.hashCode) return false;
+            if (Bytes.compareTo(this.bytes2, this.offset2, this.length2, that.bytes2, that.offset2, that.length2) != 0) return false;
+            if (Bytes.compareTo(this.bytes1, this.offset1, this.length1, that.bytes1, that.offset1, that.length1) != 0) return false;
+            return true;
+        }
+    }
+
+
+    @SuppressWarnings("all") // suppressing missing @Override since this doesn't exist for HBase 0.94.4
+    public boolean isFamilyEssential(byte[] name) {
+        // Only the column families involved in the expression are essential.
+        // The others are for columns projected in the select expression.
+        return cfSet.contains(name);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/MultiCQKeyValueComparisonFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/MultiCQKeyValueComparisonFilter.java b/src/main/java/org/apache/phoenix/filter/MultiCQKeyValueComparisonFilter.java
new file mode 100644
index 0000000..16f59ed
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/MultiCQKeyValueComparisonFilter.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.expression.Expression;
+
+/**
+ *
+ * Filter that evaluates WHERE clause expression, used in the case where there
+ * are references to multiple column qualifiers over a single column family.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public class MultiCQKeyValueComparisonFilter extends MultiKeyValueComparisonFilter {
+    private ImmutableBytesPtr ptr = new ImmutableBytesPtr();
+    private byte[] cf;
+
+    public MultiCQKeyValueComparisonFilter() {
+    }
+
+    public MultiCQKeyValueComparisonFilter(Expression expression) {
+        super(expression);
+    }
+
+    @Override
+    protected Object setColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset,
+            int cqLength) {
+        ptr.set(cq, cqOffset, cqLength);
+        return ptr;
+    }
+
+    @Override
+    protected Object newColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset,
+            int cqLength) {
+        if (cfOffset == 0 && cf.length == cfLength) {
+            this.cf = cf;
+        } else {
+            this.cf = new byte[cfLength];
+            System.arraycopy(cf, cfOffset, this.cf, 0, cfLength);
+        }
+        return new ImmutableBytesPtr(cq, cqOffset, cqLength);
+    }
+
+
+    @SuppressWarnings("all") // suppressing missing @Override since this doesn't exist for HBase 0.94.4
+    public boolean isFamilyEssential(byte[] name) {
+        return Bytes.compareTo(cf, name) == 0;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/MultiKeyValueComparisonFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/MultiKeyValueComparisonFilter.java b/src/main/java/org/apache/phoenix/filter/MultiKeyValueComparisonFilter.java
new file mode 100644
index 0000000..65aeee1
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/MultiKeyValueComparisonFilter.java
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.KeyValueColumnExpression;
+import org.apache.phoenix.schema.tuple.Tuple;
+
+
+/**
+ * 
+ * Modeled after {@link org.apache.hadoop.hbase.filter.SingleColumnValueFilter},
+ * but for general expression evaluation in the case where multiple KeyValue
+ * columns are referenced in the expression.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public abstract class MultiKeyValueComparisonFilter extends BooleanExpressionFilter {
+    private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0];
+
+    private Boolean matchedColumn;
+    protected final IncrementalResultTuple inputTuple = new IncrementalResultTuple();
+
+    public MultiKeyValueComparisonFilter() {
+    }
+
+    public MultiKeyValueComparisonFilter(Expression expression) {
+        super(expression);
+        init();
+    }
+
+    private static final class KeyValueRef {
+        public KeyValue keyValue;
+        
+        @Override
+        public String toString() {
+            if(keyValue != null) {
+                return keyValue.toString() + " value = " + Bytes.toStringBinary(keyValue.getValue());
+            } else {
+                return super.toString();
+            }
+        }
+    }
+    
+    protected abstract Object setColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength);
+    protected abstract Object newColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength);
+    
+    private final class IncrementalResultTuple implements Tuple {
+        private int refCount;
+        private final ImmutableBytesWritable keyPtr = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER);
+        private final Map<Object,KeyValueRef> foundColumns = new HashMap<Object,KeyValueRef>(5);
+        
+        public void reset() {
+            refCount = 0;
+            keyPtr.set(UNITIALIZED_KEY_BUFFER);
+            for (KeyValueRef ref : foundColumns.values()) {
+                ref.keyValue = null;
+            }
+        }
+        
+        @Override
+        public boolean isImmutable() {
+            return refCount == foundColumns.size();
+        }
+        
+        public void setImmutable() {
+            refCount = foundColumns.size();
+        }
+        
+        public ReturnCode resolveColumn(KeyValue value) {
+            // Always set key, in case we never find a key value column of interest,
+            // and our expression uses row key columns.
+            setKey(value);
+            byte[] buf = value.getBuffer();
+            Object ptr = setColumnKey(buf, value.getFamilyOffset(), value.getFamilyLength(), buf, value.getQualifierOffset(), value.getQualifierLength());
+            KeyValueRef ref = foundColumns.get(ptr);
+            if (ref == null) {
+                // Return INCLUDE here. Although this filter doesn't need this KV
+                // it should still be projected into the Result
+                return ReturnCode.INCLUDE;
+            }
+            // Since we only look at the latest key value for a given column,
+            // we are not interested in older versions
+            // TODO: test with older versions to confirm this doesn't get tripped
+            // This shouldn't be necessary, because a scan only looks at the latest
+            // version
+            if (ref.keyValue != null) {
+                // Can't do NEXT_ROW, because then we don't match the other columns
+                // SKIP, INCLUDE, and NEXT_COL seem to all act the same
+                return ReturnCode.NEXT_COL;
+            }
+            ref.keyValue = value;
+            refCount++;
+            return null;
+        }
+        
+        public void addColumn(byte[] cf, byte[] cq) {
+            Object ptr = MultiKeyValueComparisonFilter.this.newColumnKey(cf, 0, cf.length, cq, 0, cq.length);
+            foundColumns.put(ptr, new KeyValueRef());
+        }
+        
+        public void setKey(KeyValue value) {
+            keyPtr.set(value.getBuffer(), value.getRowOffset(), value.getRowLength());
+        }
+        
+        @Override
+        public void getKey(ImmutableBytesWritable ptr) {
+            ptr.set(keyPtr.get(),keyPtr.getOffset(),keyPtr.getLength());
+        }
+        
+        @Override
+        public KeyValue getValue(byte[] cf, byte[] cq) {
+            Object ptr = setColumnKey(cf, 0, cf.length, cq, 0, cq.length);
+            KeyValueRef ref = foundColumns.get(ptr);
+            return ref == null ? null : ref.keyValue;
+        }
+        
+        @Override
+        public String toString() {
+            return foundColumns.toString();
+        }
+
+        @Override
+        public int size() {
+            return refCount;
+        }
+
+        @Override
+        public KeyValue getValue(int index) {
+            // This won't perform very well, but it's not
+            // currently used anyway
+            for (KeyValueRef ref : foundColumns.values()) {
+                if (ref.keyValue == null) {
+                    continue;
+                }
+                if (index == 0) {
+                    return ref.keyValue;
+                }
+                index--;
+            }
+            throw new IndexOutOfBoundsException(Integer.toString(index));
+        }
+    }
+    
+    protected void init() {
+        EvaluateOnCompletionVisitor visitor = new EvaluateOnCompletionVisitor() {
+            @Override
+            public Void visit(KeyValueColumnExpression expression) {
+                inputTuple.addColumn(expression.getColumnFamily(), expression.getColumnName());
+                return null;
+            }
+        };
+        expression.accept(visitor);
+        this.evaluateOnCompletion = visitor.evaluateOnCompletion();
+        expression.reset();
+    }
+    
+    @Override
+    public ReturnCode filterKeyValue(KeyValue keyValue) {
+        if (Boolean.TRUE.equals(this.matchedColumn)) {
+          // We already found and matched the single column, all keys now pass
+          return ReturnCode.INCLUDE;
+        }
+        if (Boolean.FALSE.equals(this.matchedColumn)) {
+          // We found all the columns, but did not match the expression, so skip to next row
+          return ReturnCode.NEXT_ROW;
+        }
+        // This is a key value we're not interested in (TODO: why INCLUDE here instead of NEXT_COL?)
+        ReturnCode code = inputTuple.resolveColumn(keyValue);
+        if (code != null) {
+            return code;
+        }
+        
+        // We found a new column, so we can re-evaluate
+        // TODO: if we have row key columns in our expression, should
+        // we always evaluate or just wait until the end?
+        this.matchedColumn = this.evaluate(inputTuple);
+        if (this.matchedColumn == null) {
+            if (inputTuple.isImmutable()) {
+                this.matchedColumn = Boolean.FALSE;
+            } else {
+                return ReturnCode.INCLUDE;
+            }
+        }
+        return this.matchedColumn ? ReturnCode.INCLUDE : ReturnCode.NEXT_ROW;
+    }
+
+    @Override
+    public boolean filterRow() {
+        if (this.matchedColumn == null && !inputTuple.isImmutable() && evaluateOnCompletion()) {
+            inputTuple.setImmutable();
+            this.matchedColumn = this.evaluate(inputTuple);
+        }
+        
+        return ! (Boolean.TRUE.equals(this.matchedColumn));
+    }
+
+      @Override
+    public void reset() {
+        matchedColumn = null;
+        inputTuple.reset();
+        super.reset();
+    }
+
+    @Override
+    public void readFields(DataInput input) throws IOException {
+        super.readFields(input);
+        init();
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java b/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
new file mode 100644
index 0000000..32c2692
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.WritableUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.schema.tuple.Tuple;
+
+
+/**
+ *
+ * Filter for use when expressions only reference row key columns
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public class RowKeyComparisonFilter extends BooleanExpressionFilter {
+    private static final Logger logger = LoggerFactory.getLogger(RowKeyComparisonFilter.class);
+
+    private boolean evaluate = true;
+    private boolean keepRow = false;
+    private RowKeyTuple inputTuple = new RowKeyTuple();
+    private byte[] essentialCF;
+
+    public RowKeyComparisonFilter() {
+    }
+
+    public RowKeyComparisonFilter(Expression expression, byte[] essentialCF) {
+        super(expression);
+        this.essentialCF = essentialCF;
+    }
+
+    @Override
+    public void reset() {
+        this.keepRow = false;
+        this.evaluate = true;
+        super.reset();
+    }
+
+    /**
+     * Evaluate in filterKeyValue instead of filterRowKey, because HBASE-6562 causes filterRowKey
+     * to be called with deleted or partial row keys.
+     */
+    @Override
+    public ReturnCode filterKeyValue(KeyValue v) {
+        if (evaluate) {
+            inputTuple.setKey(v.getBuffer(), v.getRowOffset(), v.getRowLength());
+            this.keepRow = Boolean.TRUE.equals(evaluate(inputTuple));
+            if (logger.isDebugEnabled()) {
+                logger.debug("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER")  + " row " + inputTuple);
+            }
+            evaluate = false;
+        }
+        return keepRow ? ReturnCode.INCLUDE : ReturnCode.NEXT_ROW;
+    }
+
+    private final class RowKeyTuple implements Tuple {
+        private byte[] buf;
+        private int offset;
+        private int length;
+
+        public void setKey(byte[] buf, int offset, int length) {
+            this.buf = buf;
+            this.offset = offset;
+            this.length = length;
+        }
+
+        @Override
+        public void getKey(ImmutableBytesWritable ptr) {
+            ptr.set(buf, offset, length);
+        }
+
+        @Override
+        public KeyValue getValue(byte[] cf, byte[] cq) {
+            return null;
+        }
+
+        @Override
+        public boolean isImmutable() {
+            return true;
+        }
+
+        @Override
+        public String toString() {
+            return Bytes.toStringBinary(buf, offset, length);
+        }
+
+        @Override
+        public int size() {
+            return 0;
+        }
+
+        @Override
+        public KeyValue getValue(int index) {
+            throw new IndexOutOfBoundsException(Integer.toString(index));
+        }
+    }
+
+    @Override
+    public boolean filterRow() {
+        return !this.keepRow;
+    }
+
+    @SuppressWarnings("all") // suppressing missing @Override since this doesn't exist for HBase 0.94.4
+    public boolean isFamilyEssential(byte[] name) {
+        // We only need our "guaranteed to have a key value" column family,
+        // which we pass in and serialize through. In the case of a VIEW where
+        // we don't have this, we have to say that all families are essential.
+        return this.essentialCF.length == 0 ? true : Bytes.compareTo(this.essentialCF, name) == 0;
+    }
+
+    @Override
+    public void readFields(DataInput input) throws IOException {
+        super.readFields(input);
+        this.essentialCF = WritableUtils.readCompressedByteArray(input);
+    }
+
+    @Override
+    public void write(DataOutput output) throws IOException {
+        super.write(output);
+        WritableUtils.writeCompressedByteArray(output, this.essentialCF);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/SingleCFCQKeyValueComparisonFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/SingleCFCQKeyValueComparisonFilter.java b/src/main/java/org/apache/phoenix/filter/SingleCFCQKeyValueComparisonFilter.java
new file mode 100644
index 0000000..5b8a5f0
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/SingleCFCQKeyValueComparisonFilter.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.phoenix.expression.Expression;
+
+
+/**
+ * 
+ * SingleKeyValueComparisonFilter that needs to compare both the column family and
+ * column qualifier parts of the key value to disambiguate with another similarly
+ * named column qualifier in a different column family.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public class SingleCFCQKeyValueComparisonFilter extends SingleKeyValueComparisonFilter {
+    public SingleCFCQKeyValueComparisonFilter() {
+    }
+
+    public SingleCFCQKeyValueComparisonFilter(Expression expression) {
+        super(expression);
+    }
+
+    @Override
+    protected final int compare(byte[] cfBuf, int cfOffset, int cfLength, byte[] cqBuf, int cqOffset, int cqLength) {
+        int c = Bytes.compareTo(cf, 0, cf.length, cfBuf, cfOffset, cfLength);
+        if (c != 0) return c;
+        return Bytes.compareTo(cq, 0, cq.length, cqBuf, cqOffset, cqLength);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/SingleCQKeyValueComparisonFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/SingleCQKeyValueComparisonFilter.java b/src/main/java/org/apache/phoenix/filter/SingleCQKeyValueComparisonFilter.java
new file mode 100644
index 0000000..425839a
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/SingleCQKeyValueComparisonFilter.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.phoenix.expression.Expression;
+
+
+/**
+ * 
+ * SingleKeyValueComparisonFilter that needs to only compare the column qualifier
+ * part of the key value since the column qualifier is unique across all column
+ * families.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public class SingleCQKeyValueComparisonFilter extends SingleKeyValueComparisonFilter {
+    public SingleCQKeyValueComparisonFilter() {
+    }
+
+    public SingleCQKeyValueComparisonFilter(Expression expression) {
+        super(expression);
+    }
+
+    @Override
+    protected final int compare(byte[] cfBuf, int cfOffset, int cfLength, byte[] cqBuf, int cqOffset, int cqLength) {
+        return Bytes.compareTo(cq, 0, cq.length, cqBuf, cqOffset, cqLength);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/SingleKeyValueComparisonFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/SingleKeyValueComparisonFilter.java b/src/main/java/org/apache/phoenix/filter/SingleKeyValueComparisonFilter.java
new file mode 100644
index 0000000..1caa332
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/SingleKeyValueComparisonFilter.java
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import java.io.DataInput;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.KeyValueColumnExpression;
+import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
+
+
+
+/**
+ *
+ * Modeled after {@link org.apache.hadoop.hbase.filter.SingleColumnValueFilter},
+ * but for general expression evaluation in the case where only a single KeyValue
+ * column is referenced in the expression.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public abstract class SingleKeyValueComparisonFilter extends BooleanExpressionFilter {
+    private final SingleKeyValueTuple inputTuple = new SingleKeyValueTuple();
+    private boolean matchedColumn;
+    protected byte[] cf;
+    protected byte[] cq;
+
+    public SingleKeyValueComparisonFilter() {
+    }
+
+    public SingleKeyValueComparisonFilter(Expression expression) {
+        super(expression);
+        init();
+    }
+
+    protected abstract int compare(byte[] cfBuf, int cfOffset, int cfLength, byte[] cqBuf, int cqOffset, int cqLength);
+
+    private void init() {
+        EvaluateOnCompletionVisitor visitor = new EvaluateOnCompletionVisitor() {
+            @Override
+            public Void visit(KeyValueColumnExpression expression) {
+                cf = expression.getColumnFamily();
+                cq = expression.getColumnName();
+                return null;
+            }
+        };
+        expression.accept(visitor);
+        this.evaluateOnCompletion = visitor.evaluateOnCompletion();
+    }
+
+    private boolean foundColumn() {
+        return inputTuple.size() > 0;
+    }
+
+    @Override
+    public ReturnCode filterKeyValue(KeyValue keyValue) {
+        if (this.matchedColumn) {
+          // We already found and matched the single column, all keys now pass
+          // TODO: why won't this cause earlier versions of a kv to be included?
+          return ReturnCode.INCLUDE;
+        }
+        if (this.foundColumn()) {
+          // We found all the columns, but did not match the expression, so skip to next row
+          return ReturnCode.NEXT_ROW;
+        }
+        byte[] buf = keyValue.getBuffer();
+        if (compare(buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(), buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength()) != 0) {
+            // Remember the key in case this is the only key value we see.
+            // We'll need it if we have row key columns too.
+            inputTuple.setKey(keyValue);
+            // This is a key value we're not interested in
+            // TODO: use NEXT_COL when bug fix comes through that includes the row still
+            return ReturnCode.INCLUDE;
+        }
+        inputTuple.setKeyValue(keyValue);
+
+        // We have the columns, so evaluate here
+        if (!Boolean.TRUE.equals(evaluate(inputTuple))) {
+            return ReturnCode.NEXT_ROW;
+        }
+        this.matchedColumn = true;
+        return ReturnCode.INCLUDE;
+    }
+
+    @Override
+    public boolean filterRow() {
+        // If column was found, return false if it was matched, true if it was not.
+        if (foundColumn()) {
+            return !this.matchedColumn;
+        }
+        // If column was not found, evaluate the expression here upon completion.
+        // This is required with certain expressions, for example, with IS NULL
+        // expressions where they'll evaluate to TRUE when the column being
+        // tested wasn't found.
+        // Since the filter is called also to position the scan initially, we have
+        // to guard against this by checking whether or not we've filtered in
+        // the key value (i.e. filterKeyValue was called and we found the keyValue
+        // for which we're looking).
+        if (inputTuple.hasKey() && evaluateOnCompletion()) {
+            return !Boolean.TRUE.equals(evaluate(inputTuple));
+        }
+        // Finally, if we have no values, and we're not required to re-evaluate it
+        // just filter the row
+        return true;
+    }
+
+      @Override
+    public void reset() {
+        inputTuple.reset();
+        matchedColumn = false;
+        super.reset();
+    }
+
+    @Override
+    public void readFields(DataInput input) throws IOException {
+        super.readFields(input);
+        init();
+    }
+
+    @SuppressWarnings("all") // suppressing missing @Override since this doesn't exist for HBase 0.94.4
+    public boolean isFamilyEssential(byte[] name) {
+        // Only the column families involved in the expression are essential.
+        // The others are for columns projected in the select expression
+        return Bytes.compareTo(cf, name) == 0;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java b/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
new file mode 100644
index 0000000..ac8f174
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -0,0 +1,523 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.filter;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.filter.FilterBase;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+import com.google.common.hash.HashFunction;
+import com.google.common.hash.Hasher;
+import com.google.common.hash.Hashing;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.query.KeyRange.Bound;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.RowKeySchema;
+import org.apache.phoenix.schema.ValueSchema.Field;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.ScanUtil;
+
+
+/**
+ * 
+ * Filter that seeks based on CNF containing anded and ored key ranges
+ * 
+ * TODO: figure out when to reset/not reset position array
+ *
+ * @author ryang, jtaylor
+ * @since 0.1
+ */
+public class SkipScanFilter extends FilterBase {
+    private enum Terminate {AT, AFTER};
+    // Conjunctive normal form of or-ed ranges or point lookups
+    private List<List<KeyRange>> slots;
+    // schema of the row key
+    private RowKeySchema schema;
+    // current position for each slot
+    private int[] position;
+    // buffer used for skip hint
+    private int maxKeyLength;
+    private byte[] startKey;
+    private int startKeyLength;
+    private byte[] endKey; 
+    private int endKeyLength;
+    private boolean isDone;
+
+    private final ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+
+    /**
+     * We know that initially the first row will be positioned at or 
+     * after the first possible key.
+     */
+    public SkipScanFilter() {
+    }
+
+    public SkipScanFilter(List<List<KeyRange>> slots, RowKeySchema schema) {
+        int maxKeyLength = getTerminatorCount(schema);
+        for (List<KeyRange> slot : slots) {
+            int maxSlotLength = 0;
+            for (KeyRange range : slot) {
+                int maxRangeLength = Math.max(range.getLowerRange().length, range.getUpperRange().length);
+                if (maxSlotLength < maxRangeLength) {
+                    maxSlotLength = maxRangeLength;
+                }
+            }
+            maxKeyLength += maxSlotLength;
+        }
+        init(slots, schema, maxKeyLength);
+    }
+
+    private void init(List<List<KeyRange>> slots, RowKeySchema schema, int maxKeyLength) {
+        for (List<KeyRange> ranges : slots) {
+            if (ranges.isEmpty()) {
+                throw new IllegalStateException();
+            }
+        }
+        this.slots = slots;
+        this.schema = schema;
+        this.maxKeyLength = maxKeyLength;
+        this.position = new int[slots.size()];
+        startKey = new byte[maxKeyLength];
+        endKey = new byte[maxKeyLength];
+        endKeyLength = 0;
+    }
+
+    // Exposed for testing.
+    List<List<KeyRange>> getSlots() {
+        return slots;
+    }
+
+    @Override
+    public boolean filterAllRemaining() {
+        return isDone;
+    }
+
+    @Override
+    public ReturnCode filterKeyValue(KeyValue kv) {
+        return navigate(kv.getBuffer(), kv.getRowOffset(),kv.getRowLength(),Terminate.AFTER);
+    }
+
+    @Override
+    public KeyValue getNextKeyHint(KeyValue kv) {
+        // TODO: don't allocate new key value every time here if possible
+        return isDone ? null : new KeyValue(startKey, 0, startKeyLength,
+                null, 0, 0, null, 0, 0, HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0);
+    }
+
+    public boolean hasIntersect(byte[] lowerInclusiveKey, byte[] upperExclusiveKey) {
+        return intersect(lowerInclusiveKey, upperExclusiveKey, null);
+    }
+    /**
+     * Intersect the ranges of this filter with the ranges form by lowerInclusive and upperInclusive
+     * key and filter out the ones that are not included in the region. Return the new intersected
+     * SkipScanFilter or null if there is no intersection.
+     */
+    public SkipScanFilter intersect(byte[] lowerInclusiveKey, byte[] upperExclusiveKey) {
+        List<List<KeyRange>> newSlots = Lists.newArrayListWithCapacity(slots.size());
+        if (intersect(lowerInclusiveKey, upperExclusiveKey, newSlots)) {
+            return new SkipScanFilter(newSlots, schema);
+        }
+        return null;
+    }
+    
+    private boolean intersect(byte[] lowerInclusiveKey, byte[] upperExclusiveKey, List<List<KeyRange>> newSlots) {
+        boolean lowerUnbound = (lowerInclusiveKey.length == 0);
+        Arrays.fill(position, 0);
+        isDone = false;
+        int startPos = 0;
+        int lastSlot = slots.size()-1;
+        if (!lowerUnbound) {
+            // Find the position of the first slot of the lower range
+            schema.next(ptr, 0, schema.iterator(lowerInclusiveKey,ptr));
+            startPos = ScanUtil.searchClosestKeyRangeWithUpperHigherThanPtr(slots.get(0), ptr, 0);
+            // Lower range is past last upper range of first slot, so cannot possibly be in range
+            if (startPos >= slots.get(0).size()) {
+                return false;
+            }
+        }
+        boolean upperUnbound = (upperExclusiveKey.length == 0);
+        int endPos = slots.get(0).size()-1;
+        if (!upperUnbound) {
+            // Find the position of the first slot of the upper range
+            schema.next(ptr, 0, schema.iterator(upperExclusiveKey,ptr));
+            endPos = ScanUtil.searchClosestKeyRangeWithUpperHigherThanPtr(slots.get(0), ptr, startPos);
+            // Upper range lower than first lower range of first slot, so cannot possibly be in range
+            if (endPos == 0 && Bytes.compareTo(upperExclusiveKey, slots.get(0).get(0).getLowerRange()) <= 0) {
+                return false;
+            }
+            // Past last position, so we can include everything from the start position
+            if (endPos >= slots.get(0).size()) {
+                upperUnbound = true;
+                endPos = slots.get(0).size()-1;
+            }
+        }
+        if (!lowerUnbound) {
+            position[0] = startPos;
+            navigate(lowerInclusiveKey, 0, lowerInclusiveKey.length, Terminate.AFTER);
+            if (filterAllRemaining()) {
+                return false;
+            }
+        }
+        if (upperUnbound) {
+            if (newSlots != null) {
+                newSlots.add(slots.get(0).subList(startPos, endPos+1));
+                newSlots.addAll(slots.subList(1, slots.size()));
+            }
+            return true;
+        }
+        int[] lowerPosition = Arrays.copyOf(position, position.length);
+        // Navigate to the upperExclusiveKey, but not past it
+        ReturnCode endCode = navigate(upperExclusiveKey, 0, upperExclusiveKey.length, Terminate.AT);
+        if (endCode == ReturnCode.INCLUDE) {
+            setStartKey();
+            // If the upperExclusiveKey is equal to the start key, we've gone one position too far, since
+            // our upper key is exclusive. In that case, go to the previous key
+            if (Bytes.compareTo(startKey, 0, startKeyLength, upperExclusiveKey, 0, upperExclusiveKey.length) == 0 &&
+                    (previousPosition(lastSlot) < 0 || position[0] < lowerPosition[0])) {
+                // If by backing up one position we have an empty range, then return
+                return false;
+            }
+        } else if (endCode == ReturnCode.SEEK_NEXT_USING_HINT) {
+            // The upperExclusive key is smaller than the slots stored in the position. Check if it's the same position
+            // as the slots for lowerInclusive. If so, there is no intersection.
+            if (Arrays.equals(lowerPosition, position)) {
+                return false;
+            }
+        }
+        // Copy inclusive all positions 
+        for (int i = 0; i <= lastSlot; i++) {
+            List<KeyRange> newRanges = slots.get(i).subList(lowerPosition[i], Math.min(position[i] + 1, slots.get(i).size()));
+            if (newRanges.isEmpty()) {
+                return false;
+            }
+            if (newSlots != null) {
+                newSlots.add(newRanges);
+            }
+            if (position[i] > lowerPosition[i]) {
+                if (newSlots != null) {
+                    newSlots.addAll(slots.subList(i+1, slots.size()));
+                }
+                break;
+            }
+        }
+        return true;
+    }
+
+    private int previousPosition(int i) {
+        while (i >= 0 && --position[i] < 0) {
+            position[i] = slots.get(i).size()-1;
+            i--;
+        }
+        return i;
+    }
+    
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(
+            value="QBA_QUESTIONABLE_BOOLEAN_ASSIGNMENT", 
+            justification="Assignment designed to work this way.")
+    private ReturnCode navigate(final byte[] currentKey, final int offset, final int length, Terminate terminate) {
+        int nSlots = slots.size();
+        // First check to see if we're in-range until we reach our end key
+        if (endKeyLength > 0) {
+            if (Bytes.compareTo(currentKey, offset, length, endKey, 0, endKeyLength) < 0) {
+                return ReturnCode.INCLUDE;
+            }
+
+            // If key range of last slot is a single key, we can increment our position
+            // since we know we'll be past the current row after including it.
+            if (slots.get(nSlots-1).get(position[nSlots-1]).isSingleKey()) {
+                if (nextPosition(nSlots-1) < 0) {
+                    // Current row will be included, but we have no more
+                    isDone = true;
+                    return ReturnCode.NEXT_ROW;
+                }
+            }
+            else {
+                // Reset the positions to zero from the next slot after the earliest ranged slot, since the
+                // next key could be bigger at this ranged slot, and smaller than the current position of
+                // less significant slots.
+                int earliestRangeIndex = nSlots-1;
+                for (int i = 0; i < nSlots; i++) {
+                    if (!slots.get(i).get(position[i]).isSingleKey()) {
+                        earliestRangeIndex = i;
+                        break;
+                    }
+                }
+                Arrays.fill(position, earliestRangeIndex+1, position.length, 0);
+            }
+        }
+        endKeyLength = 0;
+        
+        // We could have included the previous
+        if (isDone) {
+            return ReturnCode.NEXT_ROW;
+        }
+
+        int i = 0;
+        boolean seek = false;
+        int earliestRangeIndex = nSlots-1;
+        int minOffset = offset;
+        int maxOffset = schema.iterator(currentKey, minOffset, length, ptr);
+        schema.next(ptr, i, maxOffset);
+        while (true) {
+            // Increment to the next range while the upper bound of our current slot is less than our current key
+            while (position[i] < slots.get(i).size() && slots.get(i).get(position[i]).compareUpperToLowerBound(ptr) < 0) {
+                position[i]++;
+            }
+            Arrays.fill(position, i+1, position.length, 0);
+            if (position[i] >= slots.get(i).size()) {
+                // Our current key is bigger than the last range of the current slot.
+                // If navigating after current key, backtrack and increment the key of the previous slot values.
+                // If navigating to current key, just return
+                if (terminate == Terminate.AT) {
+                    return ReturnCode.SEEK_NEXT_USING_HINT;
+                }
+                if (i == 0) {
+                    isDone = true;
+                    return ReturnCode.NEXT_ROW;
+                }
+                // Increment key and backtrack until in range. We know at this point that we'll be
+                // issuing a seek next hint.
+                seek = true;
+                Arrays.fill(position, i, position.length, 0);
+                int j  = i - 1;
+                // If we're positioned at a single key, no need to copy the current key and get the next key .
+                // Instead, just increment to the next key and continue.
+                boolean incremented = false;
+                while (j >= 0 && slots.get(j).get(position[j]).isSingleKey() && (incremented=true) && (position[j] = (position[j] + 1) % slots.get(j).size()) == 0) {
+                    j--;
+                    incremented = false;
+                }
+                if (j < 0) {
+                    isDone = true;
+                    return ReturnCode.NEXT_ROW;
+                }
+                if (incremented) {
+                    // Continue the loop after setting the start key, because our start key maybe smaller than
+                    // the current key, so we'll end up incrementing the start key until it's bigger than the
+                    // current key.
+                    setStartKey();
+                    schema.reposition(ptr, i, j, minOffset, maxOffset);
+                } else {
+                    int currentLength = setStartKey(ptr, minOffset, j+1);
+                    // From here on, we use startKey as our buffer (resetting minOffset and maxOffset)
+                    // We've copied the part of the current key above that we need into startKey
+                    // Reinitialize the iterator to be positioned at previous slot position
+                    minOffset = 0;
+                    maxOffset = startKeyLength;
+                    schema.iterator(startKey, minOffset, maxOffset, ptr, j+1);
+                    // Do nextKey after setting the accessor b/c otherwise the null byte may have
+                    // been incremented causing us not to find it
+                    ByteUtil.nextKey(startKey, currentLength);
+                }
+                i = j;
+            } else if (slots.get(i).get(position[i]).compareLowerToUpperBound(ptr) > 0) {
+                // Our current key is less than the lower range of the current position in the current slot.
+                // Seek to the lower range, since it's bigger than the current key
+                setStartKey(ptr, minOffset, i);
+                return ReturnCode.SEEK_NEXT_USING_HINT;
+            } else { // We're in range, check the next slot
+                if (!slots.get(i).get(position[i]).isSingleKey() && i < earliestRangeIndex) {
+                    earliestRangeIndex = i;
+                }
+                // If we're past the last slot or we know we're seeking to the next (in
+                // which case the previously updated slot was verified to be within the
+                // range, so we don't need to check the rest of the slots. If we were
+                // to check the rest of the slots, we'd get into trouble because we may
+                // have a null byte that was incremented which screws up our schema.next call)
+                if (i == nSlots-1 || seek) {
+                    break;
+                }
+                i++;
+                // If we run out of slots in our key, it means we have a partial key.
+                if (schema.next(ptr, i, maxOffset) == null) {
+                    // If the rest of the slots are checking for IS NULL, then break because
+                    // that's the case (since we don't store trailing nulls).
+                    if (allTrailingNulls(i)) {
+                        break;
+                    }
+                    // Otherwise we seek to the next start key because we're before it now
+                    setStartKey(ptr, minOffset, i);
+                    return ReturnCode.SEEK_NEXT_USING_HINT;
+                }
+            }
+        }
+            
+        if (seek) {
+            return ReturnCode.SEEK_NEXT_USING_HINT;
+        }
+        // Else, we're in range for all slots and can include this row plus all rows 
+        // up to the upper range of our last slot. We do this for ranges and single keys
+        // since we potentially have multiple key values for the same row key.
+        setEndKey(ptr, minOffset, i);
+        return ReturnCode.INCLUDE;
+    }
+
+    private boolean allTrailingNulls(int i) {
+        for (; i < slots.size(); i++) {
+            List<KeyRange> keyRanges = slots.get(i);
+            if (keyRanges.size() != 1) {
+                return false;
+            }
+            KeyRange keyRange = keyRanges.get(0);
+            if (!keyRange.isSingleKey()) {
+                return false;
+            }
+            if (keyRange.getLowerRange().length != 0) {
+                return false;
+            }
+        }
+        return true;
+    }
+    
+    private int nextPosition(int i) {
+        while (i >= 0 && slots.get(i).get(position[i]).isSingleKey() && (position[i] = (position[i] + 1) % slots.get(i).size()) == 0) {
+            i--;
+        }
+        return i;
+    }
+
+    private void setStartKey() {
+        startKeyLength = setKey(Bound.LOWER, startKey, 0, 0);
+    }
+
+    private int setStartKey(ImmutableBytesWritable ptr, int offset, int i) {
+        int length = ptr.getOffset() - offset;
+        startKey = copyKey(startKey, length + this.maxKeyLength, ptr.get(), offset, length);
+        startKeyLength = length;
+        // Add separator byte if we're at the end of the buffer, since trailing separator bytes are stripped
+        if (ptr.getOffset() + ptr.getLength() == offset + length && i-1 > 0 && !schema.getField(i-1).getDataType().isFixedWidth()) {
+            startKey[startKeyLength++] = QueryConstants.SEPARATOR_BYTE;
+        }
+        startKeyLength += setKey(Bound.LOWER, startKey, startKeyLength, i);
+        return length;
+    }
+    
+    private int setEndKey(ImmutableBytesWritable ptr, int offset, int i) {
+        int length = ptr.getOffset() - offset;
+        endKey = copyKey(endKey, length + this.maxKeyLength, ptr.get(), offset, length);
+        endKeyLength = length;
+        endKeyLength += setKey(Bound.UPPER, endKey, length, i);
+        return length;
+    }
+    
+    private int setKey(Bound bound, byte[] key, int keyOffset, int slotStartIndex) {
+        return ScanUtil.setKey(schema, slots, position, bound, key, keyOffset, slotStartIndex, position.length);
+    }
+
+    private static byte[] copyKey(byte[] targetKey, int targetLength, byte[] sourceKey, int offset, int length) {
+        if (targetLength > targetKey.length) {
+            targetKey = new byte[targetLength];
+        }
+        System.arraycopy(sourceKey, offset, targetKey, 0, length);
+        return targetKey;
+    }
+
+    private int getTerminatorCount(RowKeySchema schema) {
+        int nTerminators = 0;
+        for (int i = 0; i < schema.getFieldCount(); i++) {
+            Field field = schema.getField(i);
+            // We won't have a terminator on the last PK column
+            // unless it is variable length and exclusive, but
+            // having the extra byte irregardless won't hurt anything
+            if (!field.getDataType().isFixedWidth()) {
+                nTerminators++;
+            }
+        }
+        return nTerminators;
+    }
+
+    @Override
+    public void readFields(DataInput in) throws IOException {
+        RowKeySchema schema = new RowKeySchema();
+        schema.readFields(in);
+        int maxLength = getTerminatorCount(schema);
+        int andLen = in.readInt();
+        List<List<KeyRange>> slots = Lists.newArrayListWithExpectedSize(andLen);
+        for (int i=0; i<andLen; i++) {
+            int orlen = in.readInt();
+            List<KeyRange> orclause = Lists.newArrayListWithExpectedSize(orlen);
+            slots.add(orclause);
+            int maxSlotLength = 0;
+            for (int j=0; j<orlen; j++) {
+                KeyRange range = new KeyRange();
+                range.readFields(in);
+                if (range.getLowerRange().length > maxSlotLength) {
+                    maxSlotLength = range.getLowerRange().length;
+                }
+                if (range.getUpperRange().length > maxSlotLength) {
+                    maxSlotLength = range.getUpperRange().length;
+                }
+                orclause.add(range);
+            }
+            maxLength += maxSlotLength;
+        }
+        this.init(slots, schema, maxLength);
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+        schema.write(out);
+        out.writeInt(slots.size());
+        for (List<KeyRange> orclause : slots) {
+            out.writeInt(orclause.size());
+            for (KeyRange range : orclause) {
+                range.write(out);
+            }
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        HashFunction hf = Hashing.goodFastHash(32);
+        Hasher h = hf.newHasher();
+        h.putInt(slots.size());
+        for (int i=0; i<slots.size(); i++) {
+            h.putInt(slots.get(i).size());
+            for (int j=0; j<slots.size(); j++) {
+                h.putBytes(slots.get(i).get(j).getLowerRange());
+                h.putBytes(slots.get(i).get(j).getUpperRange());
+            }
+        }
+        return h.hash().asInt();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof SkipScanFilter)) return false;
+        SkipScanFilter other = (SkipScanFilter)obj;
+        return Objects.equal(slots, other.slots) && Objects.equal(schema, other.schema);
+    }
+
+    @Override
+    public String toString() {
+        return "SkipScanFilter "+ slots.toString() ;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/index/BaseIndexCodec.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/index/BaseIndexCodec.java b/src/main/java/org/apache/phoenix/index/BaseIndexCodec.java
new file mode 100644
index 0000000..4e13a0f
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/index/BaseIndexCodec.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.index;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+
+import org.apache.hbase.index.covered.IndexCodec;
+
+/**
+ *
+ */
+public abstract class BaseIndexCodec implements IndexCodec {
+
+  @Override
+  public void initialize(RegionCoprocessorEnvironment env) throws IOException {
+    // noop
+  }
+
+  /**
+   * {@inheritDoc}
+   * <p>
+   * By default, the codec is always enabled. Subclasses should override this method if they want do
+   * decide to index on a per-mutation basis.
+ * @throws IOException 
+   */
+  @Override
+  public boolean isEnabled(Mutation m) throws IOException {
+    return true;
+  }
+
+  /**
+   * {@inheritDoc}
+   * <p>
+   * Assumes each mutation is not in a batch. Subclasses that have different batching behavior
+   * should override this.
+   */
+  @Override
+  public byte[] getBatchId(Mutation m) {
+    return null;
+  }
+}
\ No newline at end of file