You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/01/27 20:23:10 UTC

[09/51] [partial] Initial commit

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/RowKeyValueAccessor.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/RowKeyValueAccessor.java b/src/main/java/org/apache/phoenix/schema/RowKeyValueAccessor.java
new file mode 100644
index 0000000..01f51c7
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/RowKeyValueAccessor.java
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import static org.apache.phoenix.query.QueryConstants.SEPARATOR_BYTE;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+
+import org.apache.phoenix.util.ByteUtil;
+
+
+/**
+ * 
+ * Class that encapsulates accessing a value stored in the row key.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public class RowKeyValueAccessor implements Writable   {
+    /**
+     * Constructor solely for use during deserialization. Should not
+     * otherwise be used.
+     */
+    public RowKeyValueAccessor() {
+    }
+    
+    /**
+     * Constructor to compile access to the value in the row key formed from
+     * a list of PData.
+     * 
+     * @param data the list of data that make up the key
+     * @param index the zero-based index of the data item to access.
+     */
+    public RowKeyValueAccessor(List<? extends PDatum> data, int index) {
+        this.index = index;
+        int[] offsets = new int[data.size()];
+        int nOffsets = 0;
+        Iterator<? extends PDatum> iterator = data.iterator();
+        PDatum datum = iterator.next();
+        int pos = 0;
+        while (pos < index) {
+            int offset = 0;
+            if (datum.getDataType().isFixedWidth()) {
+                do {
+                    offset += datum.getByteSize();
+                    datum = iterator.next();
+                    pos++;
+                } while (pos < index && datum.getDataType().isFixedWidth());
+                offsets[nOffsets++] = offset; // Encode fixed byte offset as positive
+            } else {
+                do {
+                    offset++; // Count the number of variable length columns
+                    datum = iterator.next();
+                    pos++;
+                } while (pos < index && !datum.getDataType().isFixedWidth());
+                offsets[nOffsets++] = -offset; // Encode number of variable length columns as negative
+            }
+        }
+        if (nOffsets < offsets.length) {
+            this.offsets = Arrays.copyOf(offsets, nOffsets);
+        } else {
+            this.offsets = offsets;
+        }
+        // Remember this so that we don't bother looking for the null separator byte in this case
+        this.isFixedLength = datum.getDataType().isFixedWidth();
+        this.hasSeparator = !isFixedLength && (datum != data.get(data.size()-1));
+    }
+    
+    RowKeyValueAccessor(int[] offsets, boolean isFixedLength, boolean hasSeparator) {
+        this.offsets = offsets;
+        this.isFixedLength = isFixedLength;
+        this.hasSeparator = hasSeparator;
+    }
+
+    private int index = -1; // Only available on client side
+    private int[] offsets;
+    private boolean isFixedLength;
+    private boolean hasSeparator;
+
+    public int getIndex() {
+        return index;
+    }
+    
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + (hasSeparator ? 1231 : 1237);
+        result = prime * result + (isFixedLength ? 1231 : 1237);
+        result = prime * result + Arrays.hashCode(offsets);
+        return result;
+    }
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        RowKeyValueAccessor other = (RowKeyValueAccessor)obj;
+        if (hasSeparator != other.hasSeparator) return false;
+        if (isFixedLength != other.isFixedLength) return false;
+        if (!Arrays.equals(offsets, other.offsets)) return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return "RowKeyValueAccessor [offsets=" + Arrays.toString(offsets) + ", isFixedLength=" + isFixedLength
+                + ", hasSeparator=" + hasSeparator + "]";
+    }
+
+    @Override
+    public void readFields(DataInput input) throws IOException {
+        // Decode hasSeparator and isFixedLength from vint storing offset array length
+        int length = WritableUtils.readVInt(input);
+        hasSeparator = (length & 0x02) != 0;
+        isFixedLength = (length & 0x01) != 0;
+        length >>= 2;
+        offsets = ByteUtil.deserializeVIntArray(input, length);
+    }
+
+    @Override
+    public void write(DataOutput output) throws IOException {
+        // Encode hasSeparator and isFixedLength into vint storing offset array length
+        // (since there's plenty of room)
+        int length = offsets.length << 2;
+        length |= (hasSeparator ? 1 << 1 : 0) | (isFixedLength ? 1 : 0);
+        ByteUtil.serializeVIntArray(output, offsets, length);
+    }
+    
+    /**
+     * Calculate the byte offset in the row key to the start of the PK column value
+     * @param keyBuffer the byte array of the row key
+     * @param keyOffset the offset in the byte array of where the key begins
+     * @return byte offset to the start of the PK column value
+     */
+    public int getOffset(byte[] keyBuffer, int keyOffset) {
+        // Use encoded offsets to navigate through row key buffer
+        for (int offset : offsets) {
+            if (offset >= 0) { // If offset is non negative, it's a byte offset
+                keyOffset += offset;
+            } else { // Else, a negative offset is the number of variable length values to skip
+                while (offset++ < 0) {
+                    // FIXME: keyOffset < keyBuffer.length required because HBase passes bogus keys to filter to position scan (HBASE-6562)
+                    while (keyOffset < keyBuffer.length && keyBuffer[keyOffset++] != SEPARATOR_BYTE) {
+                    }
+                }
+            }
+        }
+        return keyOffset;
+    }
+    
+    /**
+     * Calculate the length of the PK column value
+     * @param keyBuffer the byte array of the row key
+     * @param keyOffset the offset in the byte array of where the key begins
+     * @param maxOffset maximum offset to use while calculating length 
+     * @return the length of the PK column value
+     */
+    public int getLength(byte[] keyBuffer, int keyOffset, int maxOffset) {
+        if (!hasSeparator) {
+            return maxOffset - keyOffset;
+        }
+        int offset = keyOffset;
+        // FIXME: offset < maxOffset required because HBase passes bogus keys to filter to position scan (HBASE-6562)
+        while (offset < maxOffset && keyBuffer[offset] != SEPARATOR_BYTE) {
+            offset++;
+        }
+        return offset - keyOffset;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/SaltingUtil.java b/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
new file mode 100644
index 0000000..50ec4b6
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+
+import com.google.common.collect.Lists;
+import org.apache.phoenix.compile.ScanRanges;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.query.KeyRange.Bound;
+import org.apache.phoenix.util.ScanUtil;
+
+
+/**
+ * Utility methods related to transparent salting of row keys.
+ */
+public class SaltingUtil {
+    public static final int NUM_SALTING_BYTES = 1;
+    public static final Integer MAX_BUCKET_NUM = 256; // Unsigned byte.
+    public static final String SALTING_COLUMN_NAME = "_SALT";
+    public static final String SALTED_ROW_KEY_NAME = "_SALTED_KEY";
+    public static final PColumnImpl SALTING_COLUMN = new PColumnImpl(
+            PNameFactory.newName(SALTING_COLUMN_NAME), null, PDataType.BINARY, 1, 0, false, 0, null);
+
+    public static List<KeyRange> generateAllSaltingRanges(int bucketNum) {
+        List<KeyRange> allRanges = Lists.<KeyRange>newArrayListWithExpectedSize(bucketNum);
+        for (int i=0; i<bucketNum; i++) {
+            byte[] saltByte = new byte[] {(byte) i};
+            allRanges.add(SALTING_COLUMN.getDataType().getKeyRange(
+                    saltByte, true, saltByte, true));
+        }
+        return allRanges;
+    }
+
+    public static byte[][] getSalteByteSplitPoints(int saltBucketNum) {
+        byte[][] splits = new byte[saltBucketNum-1][];
+        for (int i = 1; i < saltBucketNum; i++) {
+            splits[i-1] = new byte[] {(byte) i};
+        }
+        return splits;
+    }
+
+    // Compute the hash of the key value stored in key and set its first byte as the value. The
+    // first byte of key should be left empty as a place holder for the salting byte.
+    public static byte[] getSaltedKey(ImmutableBytesWritable key, int bucketNum) {
+        byte[] keyBytes = new byte[key.getLength()];
+        byte saltByte = getSaltingByte(key.get(), key.getOffset() + 1, key.getLength() - 1, bucketNum);
+        keyBytes[0] = saltByte;
+        System.arraycopy(key.get(), key.getOffset() + 1, keyBytes, 1, key.getLength() - 1);
+        return keyBytes;
+    }
+
+    // Generate the bucket byte given a byte array and the number of buckets.
+    public static byte getSaltingByte(byte[] value, int offset, int length, int bucketNum) {
+        int hash = hashCode(value, offset, length);
+        byte bucketByte = (byte) ((Math.abs(hash) % bucketNum));
+        return bucketByte;
+    }
+
+    private static int hashCode(byte a[], int offset, int length) {
+        if (a == null)
+            return 0;
+        int result = 1;
+        for (int i = offset; i < offset + length; i++) {
+            result = 31 * result + a[i];
+        }
+        return result;
+    }
+
+    public static List<List<KeyRange>> setSaltByte(List<List<KeyRange>> ranges, int bucketNum) {
+        if (ranges == null || ranges.isEmpty()) {
+            return ScanRanges.NOTHING.getRanges();
+        }
+        for (int i = 1; i < ranges.size(); i++) {
+            List<KeyRange> range = ranges.get(i);
+            if (range != null && !range.isEmpty()) {
+                throw new IllegalStateException();
+            }
+        }
+        List<KeyRange> newRanges = Lists.newArrayListWithExpectedSize(ranges.size());
+        for (KeyRange range : ranges.get(0)) {
+            if (!range.isSingleKey()) {
+                throw new IllegalStateException();
+            }
+            byte[] key = range.getLowerRange();
+            byte saltByte = SaltingUtil.getSaltingByte(key, 0, key.length, bucketNum);
+            byte[] saltedKey = new byte[key.length + 1];
+            System.arraycopy(key, 0, saltedKey, 1, key.length);   
+            saltedKey[0] = saltByte;
+            newRanges.add(KeyRange.getKeyRange(saltedKey, true, saltedKey, true));
+        }
+        return Collections.singletonList(newRanges);
+    }
+    
+    public static List<List<KeyRange>> flattenRanges(List<List<KeyRange>> ranges, RowKeySchema schema, int bucketNum) {
+        if (ranges == null || ranges.isEmpty()) {
+            return ScanRanges.NOTHING.getRanges();
+        }
+        int count = 1;
+        // Skip salt byte range in the first position
+        for (int i = 1; i < ranges.size(); i++) {
+            count *= ranges.get(i).size();
+        }
+        KeyRange[] expandedRanges = new KeyRange[count];
+        int[] position = new int[ranges.size()];
+        int estimatedKeyLength = ScanUtil.estimateMaximumKeyLength(schema, 1, ranges);
+        int idx = 0, length;
+        byte saltByte;
+        byte[] key = new byte[estimatedKeyLength];
+        do {
+            length = ScanUtil.setKey(schema, ranges, position, Bound.LOWER, key, 1, 0, ranges.size(), 1);
+            saltByte = SaltingUtil.getSaltingByte(key, 1, length, bucketNum);
+            key[0] = saltByte;
+            byte[] saltedKey = Arrays.copyOf(key, length + 1);
+            KeyRange range = PDataType.VARBINARY.getKeyRange(saltedKey, true, saltedKey, true);
+            expandedRanges[idx++] = range;
+        } while (incrementKey(ranges, position));
+        // The comparator is imperfect, but sufficient for all single keys.
+        Arrays.sort(expandedRanges, KeyRange.COMPARATOR);
+        List<KeyRange> expandedRangesList = Arrays.asList(expandedRanges);
+        return Collections.singletonList(expandedRangesList);
+    }
+
+    private static boolean incrementKey(List<List<KeyRange>> slots, int[] position) {
+        int idx = slots.size() - 1;
+        while (idx >= 0 && (position[idx] = (position[idx] + 1) % slots.get(idx).size()) == 0) {
+            idx--;
+        }
+        return idx >= 0;
+    }
+
+    public static KeyRange addSaltByte(byte[] startKey, KeyRange minMaxRange) {
+        byte saltByte = startKey.length == 0 ? 0 : startKey[0];
+        byte[] lowerRange = minMaxRange.getLowerRange();
+        if(!minMaxRange.lowerUnbound()) {
+            byte[] newLowerRange = new byte[lowerRange.length + 1];
+            newLowerRange[0] = saltByte;
+            System.arraycopy(lowerRange, 0, newLowerRange, 1, lowerRange.length);
+            lowerRange = newLowerRange;
+        }
+        byte[] upperRange = minMaxRange.getUpperRange();
+
+        if(!minMaxRange.upperUnbound()) { 
+            byte[] newUpperRange = new byte[upperRange.length + 1];
+            newUpperRange[0] = saltByte;
+            System.arraycopy(upperRange, 0, newUpperRange, 1, upperRange.length);
+            upperRange = newUpperRange;
+        }
+        return KeyRange.getKeyRange(lowerRange, upperRange);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/TableAlreadyExistsException.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/TableAlreadyExistsException.java b/src/main/java/org/apache/phoenix/schema/TableAlreadyExistsException.java
new file mode 100644
index 0000000..9605610
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/TableAlreadyExistsException.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.sql.SQLException;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+
+
+/**
+ * 
+ * Exception thrown when a table name could not be found in the schema
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public class TableAlreadyExistsException extends SQLException {
+    private static final long serialVersionUID = 1L;
+    private static SQLExceptionCode code = SQLExceptionCode.TABLE_ALREADY_EXIST;
+    private final String schemaName;
+    private final String tableName;
+    private final PTable table;
+
+    public TableAlreadyExistsException(String schemaName, String tableName, PTable table) {
+        super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).build().toString(),
+                code.getSQLState());
+        this.table = table;
+        this.schemaName = schemaName;
+        this.tableName = tableName;
+    }
+
+    public PTable getTable() {
+        return table;
+    }
+
+    public String getTableName() {
+        return tableName;
+    }
+
+    public String getSchemaName() {
+        return schemaName;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/TableNotFoundException.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/TableNotFoundException.java b/src/main/java/org/apache/phoenix/schema/TableNotFoundException.java
new file mode 100644
index 0000000..7c8bb63
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/TableNotFoundException.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+
+
+/**
+ * 
+ * Exception thrown when a table name could not be found in the schema
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public class TableNotFoundException extends MetaDataEntityNotFoundException {
+    private static final long serialVersionUID = 1L;
+    private static SQLExceptionCode code = SQLExceptionCode.TABLE_UNDEFINED;
+    private final String schemaName;
+    private final String tableName;
+
+    public TableNotFoundException(String tableName) {
+        this(null, tableName);
+    }
+
+    public TableNotFoundException(String schemaName, String tableName) {
+        super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).build().toString(),
+                code.getSQLState(), code.getErrorCode());
+        this.tableName = tableName;
+        this.schemaName = schemaName;
+    }
+
+    public String getTableName() {
+        return tableName;
+    }
+
+    public String getSchemaName() {
+        return schemaName;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/TableRef.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/TableRef.java b/src/main/java/org/apache/phoenix/schema/TableRef.java
new file mode 100644
index 0000000..80d5caa
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/TableRef.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import org.apache.hadoop.hbase.HConstants;
+
+
+
+public final class TableRef {
+    private final PTable table;
+    private final String alias;
+    private final long timeStamp;
+    private final boolean hasDynamicCols;
+
+    public TableRef(TableRef tableRef, long timeStamp) {
+        this(tableRef.alias, tableRef.table, timeStamp, tableRef.hasDynamicCols);
+    }
+    
+    public TableRef(PTable table) {
+        this(null, table, HConstants.LATEST_TIMESTAMP, false);
+    }
+
+    public TableRef(String alias, PTable table, long timeStamp, boolean hasDynamicCols) {
+        this.alias = alias;
+        this.table = table;
+        this.timeStamp = timeStamp;
+        this.hasDynamicCols = hasDynamicCols;
+    }
+
+    public PTable getTable() {
+        return table;
+    }
+
+    public String getTableAlias() {
+        return alias;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + this.table.getName().getString().hashCode();
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        TableRef other = (TableRef)obj;
+        if (!table.getName().getString().equals(other.table.getName().getString())) return false;
+        return true;
+    }
+
+    public long getTimeStamp() {
+        return timeStamp;
+    }
+
+    public boolean hasDynamicCols() {
+        return hasDynamicCols;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/TypeMismatchException.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/TypeMismatchException.java b/src/main/java/org/apache/phoenix/schema/TypeMismatchException.java
new file mode 100644
index 0000000..eb5088a
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/TypeMismatchException.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.sql.SQLException;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+
+/**
+ * Exception thrown when we try to convert one type into a different incompatible type.
+ * 
+ * @author zhuang
+ * @since 1.0
+ */
+public class TypeMismatchException extends SQLException {
+    private static final long serialVersionUID = 1L;
+    private static SQLExceptionCode code = SQLExceptionCode.TYPE_MISMATCH;
+
+    public TypeMismatchException(PDataType type, String location) {
+        super(new SQLExceptionInfo.Builder(code).setMessage(type + " for " + location).build().toString(), code.getSQLState(), code.getErrorCode());
+    }
+
+    public TypeMismatchException(PDataType lhs, PDataType rhs) {
+        super(new SQLExceptionInfo.Builder(code).setMessage(lhs + " and " + rhs).build().toString(), code.getSQLState(), code.getErrorCode());
+    }
+
+    public TypeMismatchException(PDataType lhs, PDataType rhs, String location) {
+        super(new SQLExceptionInfo.Builder(code).setMessage(lhs + " and " + rhs + " for " + location).build().toString(), code.getSQLState(), code.getErrorCode());
+    }
+
+    public TypeMismatchException(String lhs, String rhs, String location) {
+        super(new SQLExceptionInfo.Builder(code).setMessage(lhs + " and " + rhs + " for " + location).build().toString(), code.getSQLState(), code.getErrorCode());
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/ValueBitSet.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/ValueBitSet.java b/src/main/java/org/apache/phoenix/schema/ValueBitSet.java
new file mode 100644
index 0000000..3ce2506
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/ValueBitSet.java
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.phoenix.util.SizedUtil;
+
+
+/**
+ * 
+ * Class to track whether or not a value is null.
+ * The value is a zero-based position in the schema provided.
+ *
+ * @author jtaylor
+ * @since 0.1
+ * 
+ */
+public class ValueBitSet {
+    public final static ValueBitSet EMPTY_VALUE_BITSET = new ValueBitSet();
+    private static final int BITS_PER_LONG = 64;
+    private static final int BITS_PER_SHORT = 16;
+    private final long[] bits;
+    private final ValueSchema schema;
+    
+    private int maxSetBit = -1;
+    
+    public static ValueBitSet newInstance(ValueSchema schema) {
+        if (schema.getFieldCount() == schema.getMinNullable()) {
+            return EMPTY_VALUE_BITSET;
+        }
+        return new ValueBitSet(schema);
+    }
+    
+    private ValueBitSet() {
+        schema = null;
+        bits = new long[0];
+    }
+    
+    private ValueBitSet(ValueSchema schema) {
+        this.schema = schema;
+        bits = new long[Math.max(1,(schema.getFieldCount() - schema.getMinNullable() + BITS_PER_LONG -1) / BITS_PER_LONG)];
+    }
+    
+    public int getMaxSetBit() {
+        return maxSetBit;
+    }
+    
+    private boolean isVarLength() {
+        return schema == null ? false : schema.getFieldCount() - schema.getMinNullable() > BITS_PER_SHORT;
+    }
+    
+    public int getNullCount(int nBit, int nFields) {
+        if (schema == null) {
+            return 0;
+        }
+        int count = 0;
+        int index = nBit/BITS_PER_LONG;
+        // Shift right based on the bit index, because we aren't interested in the bits before this.
+        int shiftRight = nBit % BITS_PER_LONG;
+        int bitsToLeft = BITS_PER_LONG - shiftRight;
+        // Shift left based on the number of fields we're interested in counting.
+        int shiftLeft = Math.max(0, (BITS_PER_LONG - nFields));
+        // Mask off the bits of interest by shifting the bitset.
+        count += Math.min(nFields, bitsToLeft) - (Long.bitCount((bits[index] >>> shiftRight) << shiftLeft));
+        // Subtract from the number of fields the total number of possible fields we looked at
+        nFields -= bitsToLeft;
+        if (nFields > 0) {
+            // If more fields to count, then walk through the successive long bits
+            while (nFields > BITS_PER_LONG) {
+                count += BITS_PER_LONG - Long.bitCount(bits[++index]);
+                nFields -= BITS_PER_LONG;
+            }
+            // Count the final remaining fields
+            if (nFields > 0) {
+                count += nFields - Long.bitCount(bits[++index] << (BITS_PER_LONG - nFields));
+            }
+        }
+        return count;
+    }
+    
+    /**
+     * Serialize the value bit set into a byte array. The byte array
+     * is expected to have enough room (use {@link #getEstimatedLength()}
+     * to ensure enough room exists.
+     * @param b the byte array into which to put the serialized bit set
+     * @param offset the offset into the byte array
+     * @return the incremented offset
+     */
+    public int toBytes(byte[] b, int offset) {
+        if (schema == null) {
+            return offset;
+        }
+        // If the total number of possible values is bigger than 16 bits (the
+        // size of a short), then serialize the long array followed by the
+        // array length.
+        if (isVarLength()) {
+            short nLongs = (short)((maxSetBit + BITS_PER_LONG) / BITS_PER_LONG);
+            for (int i = 0; i < nLongs; i++) {
+                offset = Bytes.putLong(b, offset, bits[i]);
+            }
+            offset = Bytes.putShort(b, offset, nLongs);
+        } else { 
+            // Else if the number of values is less than or equal to 16,
+            // serialize the bits directly into a short.
+            offset = Bytes.putShort(b, offset, (short)bits[0]);            
+        }
+        return offset;
+    }
+    
+    public void clear() {
+        Arrays.fill(bits, 0);
+        maxSetBit = -1;
+    }
+    
+    public boolean get(int nBit) {
+        int lIndex = nBit / BITS_PER_LONG;
+        int bIndex = nBit % BITS_PER_LONG;
+        return (bits[lIndex] & (1L << bIndex)) != 0;
+    }
+    
+    public void set(int nBit) {
+        int lIndex = nBit / BITS_PER_LONG;
+        int bIndex = nBit % BITS_PER_LONG;
+        bits[lIndex] |= (1L << bIndex);
+        maxSetBit = Math.max(maxSetBit, nBit);
+    }
+    
+    public void or(ImmutableBytesWritable ptr) {
+        if (schema == null) {
+            return;
+        }
+        if (isVarLength()) {
+            int offset = ptr.getOffset() + ptr.getLength() - Bytes.SIZEOF_SHORT;
+            short nLongs = Bytes.toShort(ptr.get(), offset);
+            offset -= nLongs * Bytes.SIZEOF_LONG;
+            for (int i = 0; i < nLongs; i++) {
+                bits[i] |= Bytes.toLong(ptr.get(), offset);
+                offset += Bytes.SIZEOF_LONG;
+            }
+            maxSetBit = Math.max(maxSetBit, nLongs * Bytes.SIZEOF_LONG - 1);
+        } else {
+            long l = Bytes.toShort(ptr.get(), ptr.getOffset() + ptr.getLength() - Bytes.SIZEOF_SHORT);
+            bits[0] |= l;
+            maxSetBit = Math.max(maxSetBit, BITS_PER_SHORT - 1);
+        }
+        
+    }
+    
+    /**
+     * @return Max serialization size
+     */
+    public int getEstimatedLength() {
+        if (schema == null) {
+            return 0;
+        }
+        return Bytes.SIZEOF_SHORT + (isVarLength() ? (maxSetBit + BITS_PER_LONG - 1) / BITS_PER_LONG * Bytes.SIZEOF_LONG : 0);
+    }
+    
+    public static int getSize(int nBits) {
+        return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.ARRAY_SIZE + SizedUtil.INT_SIZE + (nBits + BITS_PER_LONG - 1) / BITS_PER_LONG * Bytes.SIZEOF_LONG;
+    }
+    
+    /**
+     * @return Size of object in memory
+     */
+    public int getSize() {
+        if (schema == null) {
+            return 0;
+        }
+        return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.ARRAY_SIZE + SizedUtil.LONG_SIZE * bits.length + SizedUtil.INT_SIZE;
+    }
+
+    public void or(ValueBitSet isSet) {
+        for (int i = 0; i < bits.length; i++) {
+            bits[i] |= isSet.bits[i];
+        }
+        maxSetBit = Math.max(maxSetBit, isSet.maxSetBit);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/ValueRangeExcpetion.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/ValueRangeExcpetion.java b/src/main/java/org/apache/phoenix/schema/ValueRangeExcpetion.java
new file mode 100644
index 0000000..ad245b0
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/ValueRangeExcpetion.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.sql.SQLException;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+
+/**
+ * Exception thrown when we try to use use an argument that has the wrong type. 
+ * 
+ * @author anoopsjohn
+ * @since 1.1.2
+ */
+public class ValueRangeExcpetion extends SQLException{
+    private static final long serialVersionUID = 1L;
+    private static SQLExceptionCode code = SQLExceptionCode.VALUE_OUTSIDE_RANGE;
+    
+    public ValueRangeExcpetion(Object minValue, Object maxValue, Object actualValue, String location){
+        super(new SQLExceptionInfo.Builder(code).setMessage("expected: [" + minValue + " , " + maxValue + "] but was: " + actualValue + " at " + location).build().toString(), code.getSQLState());
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/ValueSchema.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/ValueSchema.java b/src/main/java/org/apache/phoenix/schema/ValueSchema.java
new file mode 100644
index 0000000..c96736a
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/ValueSchema.java
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+/**
+ * 
+ * Simple flat schema over a byte array where fields may be any of {@link PDataType}.
+ * Optimized for positional access by index.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public abstract class ValueSchema implements Writable {
+    public static final int ESTIMATED_VARIABLE_LENGTH_SIZE = 10;
+    private int[] fieldIndexByPosition;
+    private List<Field> fields;
+    private int estimatedLength;
+    private boolean isFixedLength;
+    private boolean isMaxLength;
+    private int minNullable;
+    
+    public ValueSchema() {
+    }
+    
+    protected ValueSchema(int minNullable, List<Field> fields) {
+        init(minNullable, fields);
+    }
+
+    private void init(int minNullable, List<Field> fields) {
+        this.minNullable = minNullable;
+        this.fields = ImmutableList.copyOf(fields);
+        int estimatedLength = 0;
+        boolean isMaxLength = true, isFixedLength = true;
+        int positions = 0;
+        for (Field field : fields) {
+            int fieldEstLength = 0;
+            PDataType type = field.getDataType();
+            Integer byteSize = type.getByteSize();
+            if (type.isFixedWidth()) {
+                fieldEstLength += field.getByteSize();
+            } else {
+                isFixedLength = false;
+                // Account for vint for length if not fixed
+                if (byteSize == null) {
+                    isMaxLength = false;
+                    fieldEstLength += ESTIMATED_VARIABLE_LENGTH_SIZE;
+                } else {
+                    fieldEstLength += WritableUtils.getVIntSize(byteSize);
+                    fieldEstLength = byteSize;
+                }
+            }
+            positions += field.getCount();
+            estimatedLength += fieldEstLength * field.getCount();
+        }
+        fieldIndexByPosition = new int[positions];
+        for (int i = 0, j= 0; i < fields.size(); i++) {
+            Field field = fields.get(i);
+            Arrays.fill(fieldIndexByPosition, j, j + field.getCount(), i);
+            j += field.getCount();
+        }
+        this.isFixedLength = isFixedLength;
+        this.isMaxLength = isMaxLength;
+        this.estimatedLength = estimatedLength;
+    }
+    
+    public int getFieldCount() {
+        return fieldIndexByPosition.length;
+    }
+    
+    public List<Field> getFields() {
+        return fields;
+    }
+    
+    /**
+     * @return true if all types are fixed width
+     */
+    public boolean isFixedLength() {
+        return isFixedLength;
+    }
+    
+    /**
+     * @return true if {@link #getEstimatedValueLength()} returns the maximum length
+     * of a serialized value for this schema
+     */
+    public boolean isMaxLength() {
+        return isMaxLength;
+    }
+    
+    /**
+     * @return estimated size in bytes of a serialized value for this schema
+     */
+    public int getEstimatedValueLength() {
+        return estimatedLength;
+    }
+    
+    /**
+     * Non-nullable fields packed to the left so that we do not need to store trailing nulls.
+     * Knowing the minimum position of a nullable field enables this.
+     * @return the minimum position of a nullable field
+     */
+    public int getMinNullable() {
+        return minNullable;
+    }
+    
+    public static final class Field implements Writable {
+        @Override
+        public int hashCode() {
+            final int prime = 31;
+            int result = 1;
+            result = prime * result + byteSize;
+            result = prime * result + type.hashCode();
+            result = prime * result + ((columnModifier == null) ? 0 : columnModifier.hashCode());
+            result = prime * result + (isNullable ? 1231 : 1237);
+            return result;
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (this == obj) return true;
+            if (obj == null) return false;
+            if (getClass() != obj.getClass()) return false;
+            Field other = (Field)obj;
+            if (byteSize != other.byteSize) return false;
+            if (columnModifier != other.columnModifier) return false;
+            if (isNullable != other.isNullable) return false;
+            if (type != other.type) return false;
+            return true;
+        }
+
+        private int count;
+        private PDataType type;
+        private int byteSize = 0;
+        private boolean isNullable;
+        private ColumnModifier columnModifier;
+        
+        public Field() {
+        }
+        
+        private Field(PDatum datum, boolean isNullable, int count, ColumnModifier columnModifier) {
+            this.type = datum.getDataType();
+            this.columnModifier = columnModifier;
+            this.count = count;
+            this.isNullable = isNullable;
+            if (this.type.isFixedWidth() && this.type.getByteSize() == null) {
+                this.byteSize = datum.getByteSize();
+            }
+        }
+        
+        private Field(Field field, int count) {
+            this.type = field.getDataType();
+            this.byteSize = field.byteSize;
+            this.count = count;
+        }
+        
+        public final ColumnModifier getColumnModifier() {
+            return columnModifier;
+        }
+        
+        public final PDataType getDataType() {
+            return type;
+        }
+        
+        public final boolean isNullable() {
+            return isNullable;
+        }
+        
+        public final int getByteSize() {
+            return type.getByteSize() == null ? byteSize : type.getByteSize();
+        }
+        
+        public final int getCount() {
+            return count;
+        }
+
+        @Override
+        public void readFields(DataInput input) throws IOException {
+            // Encode isNullable in sign bit of type ordinal (offset by 1, since ordinal could be 0)
+            int typeOrdinal = WritableUtils.readVInt(input);
+            if (typeOrdinal < 0) {
+                typeOrdinal *= -1;
+                this.isNullable = true;
+            }
+            this.type = PDataType.values()[typeOrdinal-1];
+            this.count = WritableUtils.readVInt(input);
+            if (this.count < 0) {
+                this.count *= -1;
+                this.columnModifier = ColumnModifier.SORT_DESC;
+            }
+            if (this.type.isFixedWidth() && this.type.getByteSize() == null) {
+                this.byteSize = WritableUtils.readVInt(input);
+            }
+        }
+
+        @Override
+        public void write(DataOutput output) throws IOException {
+            WritableUtils.writeVInt(output, (type.ordinal() + 1) * (this.isNullable ? -1 : 1));
+            WritableUtils.writeVInt(output, count * (columnModifier == null ? 1 : -1));
+            if (type.isFixedWidth() && type.getByteSize() == null) {
+                WritableUtils.writeVInt(output, byteSize);
+            }
+        }
+    }
+    
+    public abstract static class ValueSchemaBuilder {
+        protected List<Field> fields = new ArrayList<Field>();
+        protected int nFields = Integer.MAX_VALUE;
+        protected final int minNullable;
+        
+        public ValueSchemaBuilder(int minNullable) {
+            this.minNullable = minNullable;
+        }
+        
+        protected List<Field> buildFields() {
+            List<Field> condensedFields = new ArrayList<Field>(fields.size());
+            for (int i = 0; i < Math.min(nFields,fields.size()); ) {
+                Field field = fields.get(i);
+                int count = 1;
+                while ( ++i < fields.size() && field.equals(fields.get(i))) {
+                    count++;
+                }
+                condensedFields.add(count == 1 ? field : new Field(field,count));
+            }
+            return condensedFields;
+        }
+
+        abstract public ValueSchema build();
+
+        public ValueSchemaBuilder setMaxFields(int nFields) {
+            this.nFields = nFields;
+            return this;
+        }
+        
+        protected ValueSchemaBuilder addField(PDatum datum, boolean isNullable, ColumnModifier columnModifier) {
+            fields.add(new Field(datum, isNullable, 1, columnModifier));
+            return this;
+        }
+    }
+    
+    public int getEstimatedByteSize() {
+        int size = 0;
+        size += WritableUtils.getVIntSize(minNullable);
+        size += WritableUtils.getVIntSize(fields.size());
+        size += fields.size() * 3;
+        return size;
+    }
+    
+    public void serialize(DataOutput output) throws IOException {
+        WritableUtils.writeVInt(output, minNullable);
+        WritableUtils.writeVInt(output, fields.size());
+        for (int i = 0; i < fields.size(); i++) {
+            fields.get(i).write(output);
+        }
+    }
+    
+    public Field getField(int position) {
+        return fields.get(fieldIndexByPosition[position]);
+    }
+    
+    @Override
+    public void readFields(DataInput in) throws IOException {
+        int minNullable = WritableUtils.readVInt(in);
+        int nFields = WritableUtils.readVInt(in);
+        List<Field> fields = Lists.newArrayListWithExpectedSize(nFields);
+        for (int i = 0; i < nFields; i++) {
+            Field field = new Field();
+            field.readFields(in);
+            fields.add(field);
+        }
+        init(minNullable, fields);
+    }
+         
+    @Override
+    public void write(DataOutput out) throws IOException {
+        WritableUtils.writeVInt(out, minNullable);
+        WritableUtils.writeVInt(out, fields.size());
+        for (int i = 0; i < fields.size(); i++) {
+            fields.get(i).write(out);
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/stat/PTableStats.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/stat/PTableStats.java b/src/main/java/org/apache/phoenix/schema/stat/PTableStats.java
new file mode 100644
index 0000000..5e38a27
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/stat/PTableStats.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.stat;
+
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HRegionInfo;
+
+
+/**
+ * Interface for Phoenix table statistics. Statistics is collected on the server
+ * side and can be used for various purpose like splitting region for scanning, etc.
+ * 
+ * The table is defined on the client side, but it is populated on the server side. The client
+ * should not populate any data to the statistics object.
+ */
+public interface PTableStats {
+
+    /**
+     * Given the region info, returns an array of bytes that is the current estimate of key
+     * distribution inside that region. The keys should split that region into equal chunks.
+     * 
+     * @param region
+     * @return array of keys
+     */
+    byte[][] getRegionGuidePosts(HRegionInfo region);
+
+    void write(DataOutput output) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java b/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java
new file mode 100644
index 0000000..27bb665
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.stat;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.WritableUtils;
+
+import com.google.common.collect.ImmutableMap;
+
+
+/**
+ * Implementation for PTableStats.
+ */
+public class PTableStatsImpl implements PTableStats {
+
+    // The map for guide posts should be immutable. We only take the current snapshot from outside
+    // method call and store it.
+    private Map<String, byte[][]> regionGuidePosts;
+
+    public PTableStatsImpl() { }
+
+    public PTableStatsImpl(Map<String, byte[][]> stats) {
+        regionGuidePosts = ImmutableMap.copyOf(stats);
+    }
+
+    @Override
+    public byte[][] getRegionGuidePosts(HRegionInfo region) {
+        return regionGuidePosts.get(region.getRegionNameAsString());
+    }
+
+    @Override
+    public void write(DataOutput output) throws IOException {
+        if (regionGuidePosts == null) {
+            WritableUtils.writeVInt(output, 0);
+            return;
+        }
+        WritableUtils.writeVInt(output, regionGuidePosts.size());
+        for (Entry<String, byte[][]> entry : regionGuidePosts.entrySet()) {
+            WritableUtils.writeString(output, entry.getKey());
+            byte[][] value = entry.getValue();
+            WritableUtils.writeVInt(output, value.length);
+            for (int i=0; i<value.length; i++) {
+                Bytes.writeByteArray(output, value[i]);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java b/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
new file mode 100644
index 0000000..bf1944c
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.tuple;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.phoenix.util.KeyValueUtil;
+
+
+public class MultiKeyValueTuple implements Tuple {
+    private List<KeyValue> values;
+    
+    public MultiKeyValueTuple(List<KeyValue> values) {
+        setKeyValues(values);
+    }
+    
+    public MultiKeyValueTuple() {
+    }
+
+    public void setKeyValues(List<KeyValue> values) {
+        this.values = ImmutableList.copyOf(values);
+    }
+    
+    @Override
+    public void getKey(ImmutableBytesWritable ptr) {
+        KeyValue value = values.get(0);
+        ptr.set(value.getBuffer(), value.getRowOffset(), value.getRowLength());
+    }
+
+    @Override
+    public boolean isImmutable() {
+        return true;
+    }
+
+    @Override
+    public KeyValue getValue(byte[] family, byte[] qualifier) {
+        return KeyValueUtil.getColumnLatest(values, family, qualifier);
+    }
+
+    @Override
+    public String toString() {
+        return values.toString();
+    }
+
+    @Override
+    public int size() {
+        return values.size();
+    }
+
+    @Override
+    public KeyValue getValue(int index) {
+        return values.get(index);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java b/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
new file mode 100644
index 0000000..43a94e8
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.tuple;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.phoenix.util.ResultUtil;
+
+
+public class ResultTuple implements Tuple {
+    private Result result;
+    
+    public ResultTuple(Result result) {
+        this.result = result;
+    }
+    
+    public ResultTuple() {
+    }
+    
+    public Result getResult() {
+        return this.result;
+    }
+
+    public void setResult(Result result) {
+        this.result = result;
+    }
+    
+    @Override
+    public void getKey(ImmutableBytesWritable ptr) {
+        ResultUtil.getKey(result, ptr);
+    }
+
+    @Override
+    public boolean isImmutable() {
+        return true;
+    }
+
+    @Override
+    public KeyValue getValue(byte[] family, byte[] qualifier) {
+        return result.getColumnLatest(family, qualifier);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append("keyvalues=");
+      if(this.result.isEmpty()) {
+        sb.append("NONE");
+        return sb.toString();
+      }
+      sb.append("{");
+      boolean moreThanOne = false;
+      for(KeyValue kv : this.result.list()) {
+        if(moreThanOne) {
+          sb.append(", \n");
+        } else {
+          moreThanOne = true;
+        }
+        sb.append(kv.toString()+"/value="+Bytes.toString(kv.getValue()));
+      }
+      sb.append("}\n");
+      return sb.toString();
+    }
+
+    @Override
+    public int size() {
+        return result.size();
+    }
+
+    @Override
+    public KeyValue getValue(int index) {
+        return result.raw()[index];
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/tuple/SingleKeyValueTuple.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/tuple/SingleKeyValueTuple.java b/src/main/java/org/apache/phoenix/schema/tuple/SingleKeyValueTuple.java
new file mode 100644
index 0000000..8f7946b
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/tuple/SingleKeyValueTuple.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.tuple;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+public class SingleKeyValueTuple implements Tuple {
+    private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0];
+    private KeyValue keyValue;
+    private ImmutableBytesWritable keyPtr = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER);
+    
+    public SingleKeyValueTuple() {
+    }
+    
+    public SingleKeyValueTuple(KeyValue keyValue) {
+        if (keyValue == null) {
+            throw new NullPointerException();
+        }
+        setKeyValue(keyValue);
+    }
+    
+    public boolean hasKey() {
+        return keyPtr.get() != UNITIALIZED_KEY_BUFFER;
+    }
+    
+    public void reset() {
+        this.keyValue = null;
+        keyPtr.set(UNITIALIZED_KEY_BUFFER);
+    }
+    
+    public void setKeyValue(KeyValue keyValue) {
+        if (keyValue == null) {
+            throw new IllegalArgumentException();
+        }
+        this.keyValue = keyValue;
+        setKey(keyValue);
+    }
+    
+    public void setKey(ImmutableBytesWritable ptr) {
+        keyPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength());
+    }
+    
+    public void setKey(KeyValue keyValue) {
+        if (keyValue == null) {
+            throw new IllegalArgumentException();
+        }
+        keyPtr.set(keyValue.getBuffer(), keyValue.getRowOffset(), keyValue.getRowLength());
+    }
+    
+    @Override
+    public void getKey(ImmutableBytesWritable ptr) {
+        ptr.set(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength());
+    }
+    
+    @Override
+    public KeyValue getValue(byte[] cf, byte[] cq) {
+        return keyValue;
+    }
+
+    @Override
+    public boolean isImmutable() {
+        return true;
+    }
+    
+    @Override
+    public String toString() {
+        return "SingleKeyValueTuple[" + keyValue == null ? keyPtr.get() == UNITIALIZED_KEY_BUFFER ? "null" : Bytes.toStringBinary(keyPtr.get(),keyPtr.getOffset(),keyPtr.getLength()) : keyValue.toString() + "]";
+    }
+
+    @Override
+    public int size() {
+        return keyValue == null ? 0 : 1;
+    }
+
+    @Override
+    public KeyValue getValue(int index) {
+        if (index != 0 || keyValue == null) {
+            throw new IndexOutOfBoundsException(Integer.toString(index));
+        }
+        return keyValue;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java b/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java
new file mode 100644
index 0000000..7c36607
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.tuple;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+
+/**
+ * 
+ * Interface representing an ordered list of KeyValues returned as the
+ * result of a query. Each tuple represents a row (i.e. all its KeyValues
+ * will have the same key), and each KeyValue represents a column value.
+ *
+ * @author jtaylor
+ * @since 0.1
+ */
+public interface Tuple {
+    /**
+     * @return Number of KeyValues contained by the Tuple.
+     */
+    public int size();
+    
+    /**
+     * Determines whether or not the Tuple is immutable (the typical case)
+     * or will potentially have additional KeyValues added to it (the case
+     * during filter evaluation when we see one KeyValue at a time).
+     * @return true if Tuple is immutable and false otherwise.
+     */
+    public boolean isImmutable();
+    
+    /**
+     * Get the row key for the Tuple
+     * @param ptr the bytes pointer that will be updated to point to
+     * the key buffer.
+     */
+    public void getKey(ImmutableBytesWritable ptr);
+    
+    /**
+     * Get the KeyValue at the given index.
+     * @param index the zero-based KeyValue index between 0 and {@link #size()} exclusive
+     * @return the KeyValue at the given index
+     * @throws IndexOutOfBoundsException if an invalid index is used
+     */
+    public KeyValue getValue(int index);
+    
+    /***
+     * Get the KeyValue contained by the Tuple with the given family and
+     * qualifier name.
+     * @param family the column family of the KeyValue being retrieved
+     * @param qualifier the column qualify of the KeyValue being retrieved
+     * @return the KeyValue with the given family and qualifier name or
+     * null if not found.
+     */
+    public KeyValue getValue(byte [] family, byte [] qualifier);
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/util/BigDecimalUtil.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/util/BigDecimalUtil.java b/src/main/java/org/apache/phoenix/util/BigDecimalUtil.java
new file mode 100644
index 0000000..4b49f0f
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/util/BigDecimalUtil.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.util;
+
+import org.apache.hadoop.hbase.util.Pair;
+
+/**
+ * 
+ * @author anoopsjohn
+ * @since 1.2.1
+ */
+public class BigDecimalUtil {
+
+    /**
+     * Calculates the precision and scale for BigDecimal arithmetic operation results. It uses the algorithm mentioned
+     * <a href="http://db.apache.org/derby/docs/10.0/manuals/reference/sqlj124.html#HDRSII-SQLJ-36146">here</a>
+     * @param lp precision of the left operand
+     * @param ls scale of the left operand
+     * @param rp precision of the right operand
+     * @param rs scale of the right operand
+     * @param op The operation type
+     * @return {@link Pair} comprising of the precision and scale.
+     */
+    public static Pair<Integer, Integer> getResultPrecisionScale(int lp, int ls, int rp, int rs, Operation op) {
+        int resultPrec = 0, resultScale = 0;
+        switch (op) {
+        case MULTIPLY:
+            resultPrec = lp + rp;
+            resultScale = ls + rs;
+            break;
+        case DIVIDE:
+            resultPrec = lp - ls + rp + Math.max(ls + rp - rs + 1, 4);
+            resultScale = 31 - lp + ls - rs;
+            break;
+        case ADD:
+            resultPrec = 2 * (lp - ls) + ls; // Is this correct? The page says addition -> 2 * (p - s) + s.
+            resultScale = Math.max(ls, rs);
+            break;
+        case AVG:
+            resultPrec = Math.max(lp - ls, rp - rs) + 1 + Math.max(ls, rs);
+            resultScale = Math.max(Math.max(ls, rs), 4);
+            break;
+        case OTHERS:
+            resultPrec = Math.max(lp - ls, rp - rs) + 1 + Math.max(ls, rs);
+            resultScale = Math.max(ls, rs);
+        }
+        return new Pair<Integer, Integer>(resultPrec, resultScale);
+    }
+    
+    public static enum Operation {
+        MULTIPLY, DIVIDE, ADD, AVG, OTHERS;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/c5b80246/src/main/java/org/apache/phoenix/util/BitSet.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/util/BitSet.java b/src/main/java/org/apache/phoenix/util/BitSet.java
new file mode 100644
index 0000000..7cdbe6a
--- /dev/null
+++ b/src/main/java/org/apache/phoenix/util/BitSet.java
@@ -0,0 +1,106 @@
+package org.apache.phoenix.util;
+
+import java.io.*;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * 
+ * BitSet that can be initialized with primitive types, which
+ * is only available in Java 7 or above.
+ *
+ * @author jtaylor
+ * @since 2.1.0
+ */
+public class BitSet {
+    public static final int BITS_PER_LONG = 64;
+    public static final int BITS_PER_INT = 32;
+    public static final int BITS_PER_SHORT = 16;
+    public static final int BITS_PER_BYTE = 8;
+    private final long[] bits;
+    
+    public static int getByteSize(int capacity) {
+        if (capacity <= BitSet.BITS_PER_BYTE) {
+            return Bytes.SIZEOF_BYTE;
+        } else if (capacity <= BitSet.BITS_PER_SHORT) {
+            return Bytes.SIZEOF_SHORT;
+        } else if (capacity <= BitSet.BITS_PER_INT) {
+            return Bytes.SIZEOF_INT;
+        } else if (capacity <= BitSet.BITS_PER_LONG) {
+            return Bytes.SIZEOF_LONG;
+        } else {
+            int nLongs = (capacity-1) / BitSet.BITS_PER_LONG + 1;
+            return nLongs * Bytes.SIZEOF_LONG;
+        }
+    }
+
+    public static BitSet read(DataInput input, int capacity) throws IOException {
+        if (capacity <= BitSet.BITS_PER_BYTE) {
+            return fromPrimitive(input.readByte());
+        } else if (capacity <= BitSet.BITS_PER_SHORT) {
+            return fromPrimitive(input.readShort());
+        } else if (capacity <= BitSet.BITS_PER_INT) {
+            return fromPrimitive(input.readInt());
+        } else if (capacity <= BitSet.BITS_PER_LONG) {
+            return fromPrimitive(input.readLong());
+        } else {
+            int nLongs = (capacity-1) / BitSet.BITS_PER_LONG + 1;
+            return fromArray(ByteUtil.readFixedLengthLongArray(input, nLongs));
+        }
+    }
+    
+    public static void write(DataOutput output, BitSet bitSet, int capacity) throws IOException {
+        if (capacity <= BitSet.BITS_PER_BYTE) {
+            output.writeByte((byte)bitSet.bits[0]);
+        } else if (capacity <= BitSet.BITS_PER_SHORT) {
+            output.writeShort((short)bitSet.bits[0]);
+        } else if (capacity <= BitSet.BITS_PER_INT) {
+            output.writeInt((int)bitSet.bits[0]);
+        } else if (capacity <= BitSet.BITS_PER_LONG) {
+            output.writeLong(bitSet.bits[0]);
+        } else {
+            ByteUtil.writeFixedLengthLongArray(output, bitSet.bits);
+        }
+    }
+    
+    public static BitSet fromPrimitive(byte bits) {
+        return new BitSet(new long[] { bits });
+    }
+
+    public static BitSet fromPrimitive(short bits) {
+        return new BitSet(new long[] { bits });
+    }
+
+    public static BitSet fromPrimitive(int bits) {
+        return new BitSet(new long[] { bits });
+    }
+
+    public static BitSet fromPrimitive(long bits) {
+        return new BitSet(new long[] { bits });
+    }
+
+    public static BitSet fromArray(long[] bits) {
+        return new BitSet(bits);
+    }
+
+    public static BitSet withCapacity(int maxBits) {
+        int size = Math.max(1,(maxBits + BITS_PER_LONG -1) / BITS_PER_LONG);
+        return new BitSet(new long[size]);
+    }
+
+    public BitSet(long[] bits) {
+        this.bits = bits;
+    }
+
+    public boolean get(int nBit) {
+        int lIndex = nBit / BITS_PER_LONG;
+        int bIndex = nBit % BITS_PER_LONG;
+        return (bits[lIndex] & (1L << bIndex)) != 0;
+    }
+    
+    public void set(int nBit) {
+        int lIndex = nBit / BITS_PER_LONG;
+        int bIndex = nBit % BITS_PER_LONG;
+        bits[lIndex] |= (1L << bIndex);
+    }
+}