You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by en...@apache.org on 2014/06/28 02:30:53 UTC

[07/49] git commit: HBASE-10354 Add an API for defining consistency per request

HBASE-10354 Add an API for defining consistency per request

git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-10070@1565062 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d8ea476b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d8ea476b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d8ea476b

Branch: refs/heads/master
Commit: d8ea476bf1ce0572cf05fc56f5c39dd8df4f99d1
Parents: d4b8222
Author: Enis Soztutar <en...@apache.org>
Authored: Thu Feb 6 03:14:15 2014 +0000
Committer: Enis Soztutar <en...@apache.org>
Committed: Fri Jun 27 16:39:36 2014 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/client/Consistency.java |  50 ++
 .../org/apache/hadoop/hbase/client/Get.java     |  24 +-
 .../org/apache/hadoop/hbase/client/Result.java  |  54 +-
 .../org/apache/hadoop/hbase/client/Scan.java    |  60 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |  64 +-
 .../hbase/protobuf/generated/ClientProtos.java  | 599 ++++++++++++++++---
 hbase-protocol/src/main/protobuf/Client.proto   |  14 +
 hbase-shell/src/main/ruby/hbase.rb              |   1 +
 hbase-shell/src/main/ruby/hbase/table.rb        |   9 +-
 hbase-shell/src/main/ruby/shell/commands/get.rb |   6 +-
 .../src/main/ruby/shell/commands/scan.rb        |   1 +
 11 files changed, 739 insertions(+), 143 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java
new file mode 100644
index 0000000..7685300
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Consistency defines the expected consistency level for an operation.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public enum Consistency {
+  // developer note: Do not reorder. Client.proto#Consistency depends on this order
+  /**
+   * Strong consistency is the default consistency model in HBase,
+   * where reads and writes go through a single server which serializes
+   * the updates, and returns all data that was written and ack'd.
+   */
+  STRONG,
+
+  /**
+   * Timeline consistent reads might return values that may not see
+   * the most recent updates. Write transactions are always performed
+   * in strong consistency model in HBase which guarantees that transactions
+   * are ordered, and replayed in the same order by all copies of the data.
+   * In timeline consistency, the get and scan requests can be answered from data
+   * that may be stale.
+   * <br>
+   * The client may still observe transactions out of order if the requests are
+   * responded from different servers.
+   */
+  TIMELINE,
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index 81a31a4..faccf5e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -77,6 +77,7 @@ public class Get extends Query
   private boolean closestRowBefore = false;
   private Map<byte [], NavigableSet<byte []>> familyMap =
     new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
+  private Consistency consistency = null;
 
   /**
    * Create a Get operation for the specified row.
@@ -270,6 +271,7 @@ public class Get extends Query
    * Method for retrieving the get's row
    * @return row
    */
+  @Override
   public byte [] getRow() {
     return this.row;
   }
@@ -341,6 +343,22 @@ public class Get extends Query
   }
 
   /**
+   * Returns the consistency level for this operation
+   * @return the consistency level
+   */
+  public Consistency getConsistency() {
+    return consistency;
+  }
+
+  /**
+   * Sets the consistency level for this operation
+   * @param consistency the consistency level
+   */
+  public void setConsistency(Consistency consistency) {
+    this.consistency = consistency;
+  }
+
+  /**
    * Compile the table and column family (i.e. schema) information
    * into a String. Useful for parsing and aggregation by debugging,
    * logging, and administration tools.
@@ -369,7 +387,7 @@ public class Get extends Query
   public Map<String, Object> toMap(int maxCols) {
     // we start with the fingerprint map and build on top of it.
     Map<String, Object> map = getFingerprint();
-    // replace the fingerprint's simple list of families with a 
+    // replace the fingerprint's simple list of families with a
     // map from column families to lists of qualifiers and kv details
     Map<String, List<String>> columns = new HashMap<String, List<String>>();
     map.put("families", columns);
@@ -402,8 +420,8 @@ public class Get extends Query
           }
           familyList.add(Bytes.toStringBinary(column));
         }
-      }   
-    }   
+      }
+    }
     map.put("totalColumns", colCount);
     if (this.filter != null) {
       map.put("filter", this.filter.toString());

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index b3f7076..74faab2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 public class Result implements CellScannable {
   private Cell[] cells;
   private Boolean exists; // if the query was just to check existence.
+  private boolean stale = false;
   // We're not using java serialization.  Transient here is just a marker to say
   // that this is where we cache row if we're ever asked for it.
   private transient byte [] row = null;
@@ -109,7 +110,7 @@ public class Result implements CellScannable {
   @Deprecated
   public Result(List<KeyValue> kvs) {
     // TODO: Here we presume the passed in Cells are KVs.  One day this won't always be so.
-    this(kvs.toArray(new Cell[kvs.size()]), null);
+    this(kvs.toArray(new Cell[kvs.size()]), null, false);
   }
 
   /**
@@ -118,14 +119,18 @@ public class Result implements CellScannable {
    * @param cells List of cells
    */
   public static Result create(List<Cell> cells) {
-    return new Result(cells.toArray(new Cell[cells.size()]), null);
+    return new Result(cells.toArray(new Cell[cells.size()]), null, false);
   }
 
   public static Result create(List<Cell> cells, Boolean exists) {
+    return create(cells, exists, false);
+  }
+
+  public static Result create(List<Cell> cells, Boolean exists, boolean stale) {
     if (exists != null){
-      return new Result(null, exists);
+      return new Result(null, exists, stale);
     }
-    return new Result(cells.toArray(new Cell[cells.size()]), null);
+    return new Result(cells.toArray(new Cell[cells.size()]), null, stale);
   }
 
   /**
@@ -134,13 +139,21 @@ public class Result implements CellScannable {
    * @param cells array of cells
    */
   public static Result create(Cell[] cells) {
-    return new Result(cells, null);
+    return new Result(cells, null, false);
+  }
+
+  public static Result create(Cell[] cells, Boolean exists, boolean stale) {
+    if (exists != null){
+      return new Result(null, exists, stale);
+    }
+    return new Result(cells, null, stale);
   }
 
   /** Private ctor. Use {@link #create(Cell[])}. */
-  private Result(Cell[] cells, Boolean exists) {
+  private Result(Cell[] cells, Boolean exists, boolean stale) {
     this.cells = cells;
     this.exists = exists;
+    this.stale = stale;
   }
 
   /**
@@ -180,13 +193,13 @@ public class Result implements CellScannable {
   }
 
   /**
-   * Return an cells of a Result as an array of KeyValues 
-   * 
+   * Return an cells of a Result as an array of KeyValues
+   *
    * WARNING do not use, expensive.  This does an arraycopy of the cell[]'s value.
    *
    * Added to ease transition from  0.94 -> 0.96.
-   * 
-   * @deprecated as of 0.96, use {@link #rawCells()}  
+   *
+   * @deprecated as of 0.96, use {@link #rawCells()}
    * @return array of KeyValues, empty array if nothing in result.
    */
   @Deprecated
@@ -208,15 +221,15 @@ public class Result implements CellScannable {
   public List<Cell> listCells() {
     return isEmpty()? null: Arrays.asList(rawCells());
   }
-  
+
   /**
-   * Return an cells of a Result as an array of KeyValues 
-   * 
+   * Return an cells of a Result as an array of KeyValues
+   *
    * WARNING do not use, expensive.  This does  an arraycopy of the cell[]'s value.
    *
    * Added to ease transition from  0.94 -> 0.96.
-   * 
-   * @deprecated as of 0.96, use {@link #listCells()}  
+   *
+   * @deprecated as of 0.96, use {@link #listCells()}
    * @return all sorted List of KeyValues; can be null if no cells in the result
    */
   @Deprecated
@@ -634,6 +647,7 @@ public class Result implements CellScannable {
       NavigableMap<Long, byte[]> versionMap = columnMap.get(qualifier);
       if(versionMap == null) {
         versionMap = new TreeMap<Long, byte[]>(new Comparator<Long>() {
+          @Override
           public int compare(Long l1, Long l2) {
             return l2.compareTo(l1);
           }
@@ -823,4 +837,14 @@ public class Result implements CellScannable {
   public void setExists(Boolean exists) {
     this.exists = exists;
   }
+
+  /**
+   * Whether or not the results are coming from possibly stale data. Stale results
+   * might be returned if {@link Consistency} is not STRONG for the query.
+   * @return Whether or not the results are coming from possibly stale data.
+   */
+  public boolean isStale() {
+    return stale;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index bf968df..a7d587a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -119,12 +119,12 @@ public class Scan extends Query {
   // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE))
   static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
   static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
-  
+
   // If an application wants to use multiple scans over different tables each scan must
   // define this attribute with the appropriate table name by calling
   // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
   static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
-  
+
   /*
    * -1 means no caching
    */
@@ -136,22 +136,23 @@ public class Scan extends Query {
   private Map<byte [], NavigableSet<byte []>> familyMap =
     new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
   private Boolean loadColumnFamiliesOnDemand = null;
+  private Consistency consistency = null;
 
   /**
    * Set it true for small scan to get better performance
-   * 
+   *
    * Small scan should use pread and big scan can use seek + read
-   * 
+   *
    * seek + read is fast but can cause two problem (1) resource contention (2)
    * cause too much network io
-   * 
+   *
    * [89-fb] Using pread for non-compaction read request
    * https://issues.apache.org/jira/browse/HBASE-7266
-   * 
+   *
    * On the other hand, if setting it true, we would do
    * openScanner,next,closeScanner in one RPC call. It means the better
    * performance for small scan. [HBASE-9488].
-   * 
+   *
    * Generally, if the scan range is within one data block(64KB), it could be
    * considered as a small scan.
    */
@@ -209,6 +210,7 @@ public class Scan extends Query {
     getScan = scan.isGetScan();
     filter = scan.getFilter(); // clone?
     loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
+    consistency = scan.getConsistency();
     TimeRange ctr = scan.getTimeRange();
     tr = new TimeRange(ctr.getMin(), ctr.getMax());
     Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
@@ -243,6 +245,7 @@ public class Scan extends Query {
     this.tr = get.getTimeRange();
     this.familyMap = get.getFamilyMap();
     this.getScan = true;
+    this.consistency = get.getConsistency();
     for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
       setAttribute(attr.getKey(), attr.getValue());
     }
@@ -540,6 +543,7 @@ public class Scan extends Query {
   /**
    * @return RowFilter
    */
+  @Override
   public Filter getFilter() {
     return filter;
   }
@@ -578,7 +582,7 @@ public class Scan extends Query {
    * Set whether this scan is a reversed one
    * <p>
    * This is false by default which means forward(normal) scan.
-   * 
+   *
    * @param reversed if true, scan will be backward order
    * @return this
    */
@@ -631,6 +635,22 @@ public class Scan extends Query {
   }
 
   /**
+   * Returns the consistency level for this operation
+   * @return the consistency level
+   */
+  public Consistency getConsistency() {
+    return consistency;
+  }
+
+  /**
+   * Sets the consistency level for this operation
+   * @param consistency the consistency level
+   */
+  public void setConsistency(Consistency consistency) {
+    this.consistency = consistency;
+  }
+
+  /**
    * Compile the table and column family (i.e. schema) information
    * into a String. Useful for parsing and aggregation by debugging,
    * logging, and administration tools.
@@ -695,15 +715,15 @@ public class Scan extends Query {
         colCount += entry.getValue().size();
         if (maxCols <= 0) {
           continue;
-        } 
+        }
         for (byte [] column : entry.getValue()) {
           if (--maxCols <= 0) {
             continue;
           }
           columns.add(Bytes.toStringBinary(column));
         }
-      } 
-    }       
+      }
+    }
     map.put("totalColumns", colCount);
     if (this.filter != null) {
       map.put("filter", this.filter.toString());
@@ -741,10 +761,10 @@ public class Scan extends Query {
    * Set the isolation level for this scan. If the
    * isolation level is set to READ_UNCOMMITTED, then
    * this scan will return data from committed and
-   * uncommitted transactions. If the isolation level 
-   * is set to READ_COMMITTED, then this scan will return 
+   * uncommitted transactions. If the isolation level
+   * is set to READ_COMMITTED, then this scan will return
    * data from committed transactions only. If a isolation
-   * level is not explicitly set on a Scan, then it 
+   * level is not explicitly set on a Scan, then it
    * is assumed to be READ_COMMITTED.
    * @param level IsolationLevel for this scan
    */
@@ -753,7 +773,7 @@ public class Scan extends Query {
   }
   /*
    * @return The isolation level of this scan.
-   * If no isolation level was set for this scan object, 
+   * If no isolation level was set for this scan object,
    * then it returns READ_COMMITTED.
    * @return The IsolationLevel for this scan
    */
@@ -767,20 +787,20 @@ public class Scan extends Query {
    * Set whether this scan is a small scan
    * <p>
    * Small scan should use pread and big scan can use seek + read
-   * 
+   *
    * seek + read is fast but can cause two problem (1) resource contention (2)
    * cause too much network io
-   * 
+   *
    * [89-fb] Using pread for non-compaction read request
    * https://issues.apache.org/jira/browse/HBASE-7266
-   * 
+   *
    * On the other hand, if setting it true, we would do
    * openScanner,next,closeScanner in one RPC call. It means the better
    * performance for small scan. [HBASE-9488].
-   * 
+   *
    * Generally, if the scan range is within one data block(64KB), it could be
    * considered as a small scan.
-   * 
+   *
    * @param small
    */
   public void setSmall(boolean small) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 40ab472..5ec2a3a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -10,7 +10,7 @@
  *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distr=ibuted on an "AS IS" BASIS,
+ * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
@@ -36,7 +36,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.NavigableSet;
 
-import com.google.protobuf.HBaseZeroCopyByteString;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
@@ -54,6 +53,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
@@ -136,6 +136,7 @@ import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
 import com.google.protobuf.ByteString;
+import com.google.protobuf.HBaseZeroCopyByteString;
 import com.google.protobuf.InvalidProtocolBufferException;
 import com.google.protobuf.Message;
 import com.google.protobuf.Parser;
@@ -167,10 +168,19 @@ public final class ProtobufUtil {
   private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY);
   private final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true);
   private final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false);
+  private final static Result EMPTY_RESULT_STALE = Result.create(EMPTY_CELL_ARRAY, null, true);
+  private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE
+    = Result.create((Cell[])null, true, true);
+  private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE
+    = Result.create((Cell[])null, false, true);
 
   private final static ClientProtos.Result EMPTY_RESULT_PB;
   private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE;
   private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_FALSE;
+  private final static ClientProtos.Result EMPTY_RESULT_PB_STALE;
+  private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE_STALE;
+  private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_FALSE_STALE;
+
 
   static {
     ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder();
@@ -179,15 +189,21 @@ public final class ProtobufUtil {
     builder.setAssociatedCellCount(0);
     EMPTY_RESULT_PB_EXISTS_TRUE =  builder.build();
 
+    builder.setStale(true);
+    EMPTY_RESULT_PB_EXISTS_TRUE_STALE = builder.build();
     builder.clear();
 
     builder.setExists(false);
     builder.setAssociatedCellCount(0);
     EMPTY_RESULT_PB_EXISTS_FALSE =  builder.build();
+    builder.setStale(true);
+    EMPTY_RESULT_PB_EXISTS_FALSE_STALE = builder.build();
 
     builder.clear();
     builder.setAssociatedCellCount(0);
     EMPTY_RESULT_PB =  builder.build();
+    builder.setStale(true);
+    EMPTY_RESULT_PB_STALE = builder.build();
   }
 
   /**
@@ -461,9 +477,28 @@ public final class ProtobufUtil {
     if (proto.hasClosestRowBefore() && proto.getClosestRowBefore()){
       get.setClosestRowBefore(true);
     }
+    if (proto.hasConsistency()) {
+      get.setConsistency(toConsistency(proto.getConsistency()));
+    }
     return get;
   }
 
+  public static Consistency toConsistency(ClientProtos.Consistency consistency) {
+    switch (consistency) {
+      case STRONG : return Consistency.STRONG;
+      case TIMELINE : return Consistency.TIMELINE;
+      default : return Consistency.STRONG;
+    }
+  }
+
+  public static ClientProtos.Consistency toConsistency(Consistency consistency) {
+    switch (consistency) {
+      case STRONG : return ClientProtos.Consistency.STRONG;
+      case TIMELINE : return ClientProtos.Consistency.TIMELINE;
+      default : return ClientProtos.Consistency.STRONG;
+    }
+  }
+
   /**
    * Convert a protocol buffer Mutate to a Put.
    *
@@ -1006,6 +1041,10 @@ public final class ProtobufUtil {
     if (get.isClosestRowBefore()){
       builder.setClosestRowBefore(true);
     }
+    if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
+      builder.setConsistency(toConsistency(get.getConsistency()));
+    }
+
     return builder.build();
   }
 
@@ -1198,7 +1237,7 @@ public final class ProtobufUtil {
 
     Cell[] cells = result.rawCells();
     if (cells == null || cells.length == 0) {
-      return EMPTY_RESULT_PB;
+      return result.isStale() ? EMPTY_RESULT_PB_STALE : EMPTY_RESULT_PB;
     }
 
     ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder();
@@ -1206,6 +1245,8 @@ public final class ProtobufUtil {
       builder.addCell(toCell(c));
     }
 
+    builder.setStale(result.isStale());
+
     return builder.build();
   }
 
@@ -1229,9 +1270,10 @@ public final class ProtobufUtil {
   public static ClientProtos.Result toResultNoData(final Result result) {
     if (result.getExists() != null) return toResult(result.getExists());
     int size = result.size();
-    if (size == 0) return EMPTY_RESULT_PB;
+    if (size == 0) return result.isStale() ? EMPTY_RESULT_PB_STALE : EMPTY_RESULT_PB;
     ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder();
     builder.setAssociatedCellCount(size);
+    builder.setStale(result.isStale());
     return builder.build();
   }
 
@@ -1243,19 +1285,22 @@ public final class ProtobufUtil {
    */
   public static Result toResult(final ClientProtos.Result proto) {
     if (proto.hasExists()) {
+      if (proto.getStale()) {
+        return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE;
+      }
       return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE;
     }
 
     List<CellProtos.Cell> values = proto.getCellList();
     if (values.isEmpty()){
-      return EMPTY_RESULT;
+      return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT;
     }
 
     List<Cell> cells = new ArrayList<Cell>(values.size());
     for (CellProtos.Cell c : values) {
       cells.add(toCell(c));
     }
-    return Result.create(cells, null);
+    return Result.create(cells, null, proto.getStale());
   }
 
   /**
@@ -1275,6 +1320,9 @@ public final class ProtobufUtil {
           (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0)) {
         throw new IllegalArgumentException("bad proto: exists with cells is no allowed " + proto);
       }
+      if (proto.getStale()) {
+        return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE;
+      }
       return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE;
     }
 
@@ -1296,7 +1344,9 @@ public final class ProtobufUtil {
       }
     }
 
-    return (cells == null || cells.isEmpty()) ? EMPTY_RESULT : Result.create(cells, null);
+    return (cells == null || cells.isEmpty())
+        ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT)
+        : Result.create(cells, null, proto.getStale());
   }
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index fd81fe3..bd1c216 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -8,6 +8,93 @@ public final class ClientProtos {
   public static void registerAllExtensions(
       com.google.protobuf.ExtensionRegistry registry) {
   }
+  /**
+   * Protobuf enum {@code Consistency}
+   *
+   * <pre>
+   **
+   * Consistency defines the expected consistency level for an operation.
+   * </pre>
+   */
+  public enum Consistency
+      implements com.google.protobuf.ProtocolMessageEnum {
+    /**
+     * <code>STRONG = 0;</code>
+     */
+    STRONG(0, 0),
+    /**
+     * <code>TIMELINE = 1;</code>
+     */
+    TIMELINE(1, 1),
+    ;
+
+    /**
+     * <code>STRONG = 0;</code>
+     */
+    public static final int STRONG_VALUE = 0;
+    /**
+     * <code>TIMELINE = 1;</code>
+     */
+    public static final int TIMELINE_VALUE = 1;
+
+
+    public final int getNumber() { return value; }
+
+    public static Consistency valueOf(int value) {
+      switch (value) {
+        case 0: return STRONG;
+        case 1: return TIMELINE;
+        default: return null;
+      }
+    }
+
+    public static com.google.protobuf.Internal.EnumLiteMap<Consistency>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap<Consistency>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<Consistency>() {
+            public Consistency findValueByNumber(int number) {
+              return Consistency.valueOf(number);
+            }
+          };
+
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
+    }
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor().getEnumTypes().get(0);
+    }
+
+    private static final Consistency[] VALUES = values();
+
+    public static Consistency valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
+
+    private final int index;
+    private final int value;
+
+    private Consistency(int index, int value) {
+      this.index = index;
+      this.value = value;
+    }
+
+    // @@protoc_insertion_point(enum_scope:Consistency)
+  }
+
   public interface AuthorizationsOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -1858,6 +1945,16 @@ public final class ClientProtos {
      * </pre>
      */
     boolean getClosestRowBefore();
+
+    // optional .Consistency consistency = 12 [default = STRONG];
+    /**
+     * <code>optional .Consistency consistency = 12 [default = STRONG];</code>
+     */
+    boolean hasConsistency();
+    /**
+     * <code>optional .Consistency consistency = 12 [default = STRONG];</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency getConsistency();
   }
   /**
    * Protobuf type {@code Get}
@@ -1995,6 +2092,17 @@ public final class ClientProtos {
               closestRowBefore_ = input.readBool();
               break;
             }
+            case 96: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(12, rawValue);
+              } else {
+                bitField0_ |= 0x00000200;
+                consistency_ = value;
+              }
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -2289,6 +2397,22 @@ public final class ClientProtos {
       return closestRowBefore_;
     }
 
+    // optional .Consistency consistency = 12 [default = STRONG];
+    public static final int CONSISTENCY_FIELD_NUMBER = 12;
+    private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_;
+    /**
+     * <code>optional .Consistency consistency = 12 [default = STRONG];</code>
+     */
+    public boolean hasConsistency() {
+      return ((bitField0_ & 0x00000200) == 0x00000200);
+    }
+    /**
+     * <code>optional .Consistency consistency = 12 [default = STRONG];</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency getConsistency() {
+      return consistency_;
+    }
+
     private void initFields() {
       row_ = com.google.protobuf.ByteString.EMPTY;
       column_ = java.util.Collections.emptyList();
@@ -2301,6 +2425,7 @@ public final class ClientProtos {
       storeOffset_ = 0;
       existenceOnly_ = false;
       closestRowBefore_ = false;
+      consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -2369,6 +2494,9 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00000100) == 0x00000100)) {
         output.writeBool(11, closestRowBefore_);
       }
+      if (((bitField0_ & 0x00000200) == 0x00000200)) {
+        output.writeEnum(12, consistency_.getNumber());
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -2422,6 +2550,10 @@ public final class ClientProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeBoolSize(11, closestRowBefore_);
       }
+      if (((bitField0_ & 0x00000200) == 0x00000200)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(12, consistency_.getNumber());
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -2494,6 +2626,11 @@ public final class ClientProtos {
         result = result && (getClosestRowBefore()
             == other.getClosestRowBefore());
       }
+      result = result && (hasConsistency() == other.hasConsistency());
+      if (hasConsistency()) {
+        result = result &&
+            (getConsistency() == other.getConsistency());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -2551,6 +2688,10 @@ public final class ClientProtos {
         hash = (37 * hash) + CLOSEST_ROW_BEFORE_FIELD_NUMBER;
         hash = (53 * hash) + hashBoolean(getClosestRowBefore());
       }
+      if (hasConsistency()) {
+        hash = (37 * hash) + CONSISTENCY_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getConsistency());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -2710,6 +2851,8 @@ public final class ClientProtos {
         bitField0_ = (bitField0_ & ~0x00000200);
         closestRowBefore_ = false;
         bitField0_ = (bitField0_ & ~0x00000400);
+        consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
+        bitField0_ = (bitField0_ & ~0x00000800);
         return this;
       }
 
@@ -2800,6 +2943,10 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00000100;
         }
         result.closestRowBefore_ = closestRowBefore_;
+        if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
+          to_bitField0_ |= 0x00000200;
+        }
+        result.consistency_ = consistency_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -2895,6 +3042,9 @@ public final class ClientProtos {
         if (other.hasClosestRowBefore()) {
           setClosestRowBefore(other.getClosestRowBefore());
         }
+        if (other.hasConsistency()) {
+          setConsistency(other.getConsistency());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -3932,6 +4082,42 @@ public final class ClientProtos {
         return this;
       }
 
+      // optional .Consistency consistency = 12 [default = STRONG];
+      private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
+      /**
+       * <code>optional .Consistency consistency = 12 [default = STRONG];</code>
+       */
+      public boolean hasConsistency() {
+        return ((bitField0_ & 0x00000800) == 0x00000800);
+      }
+      /**
+       * <code>optional .Consistency consistency = 12 [default = STRONG];</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency getConsistency() {
+        return consistency_;
+      }
+      /**
+       * <code>optional .Consistency consistency = 12 [default = STRONG];</code>
+       */
+      public Builder setConsistency(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000800;
+        consistency_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional .Consistency consistency = 12 [default = STRONG];</code>
+       */
+      public Builder clearConsistency() {
+        bitField0_ = (bitField0_ & ~0x00000800);
+        consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:Get)
     }
 
@@ -4043,6 +4229,24 @@ public final class ClientProtos {
      * </pre>
      */
     boolean getExists();
+
+    // optional bool stale = 4 [default = false];
+    /**
+     * <code>optional bool stale = 4 [default = false];</code>
+     *
+     * <pre>
+     * Whether or not the results are coming from possibly stale data 
+     * </pre>
+     */
+    boolean hasStale();
+    /**
+     * <code>optional bool stale = 4 [default = false];</code>
+     *
+     * <pre>
+     * Whether or not the results are coming from possibly stale data 
+     * </pre>
+     */
+    boolean getStale();
   }
   /**
    * Protobuf type {@code Result}
@@ -4113,6 +4317,11 @@ public final class ClientProtos {
               exists_ = input.readBool();
               break;
             }
+            case 32: {
+              bitField0_ |= 0x00000004;
+              stale_ = input.readBool();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4277,10 +4486,35 @@ public final class ClientProtos {
       return exists_;
     }
 
+    // optional bool stale = 4 [default = false];
+    public static final int STALE_FIELD_NUMBER = 4;
+    private boolean stale_;
+    /**
+     * <code>optional bool stale = 4 [default = false];</code>
+     *
+     * <pre>
+     * Whether or not the results are coming from possibly stale data 
+     * </pre>
+     */
+    public boolean hasStale() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional bool stale = 4 [default = false];</code>
+     *
+     * <pre>
+     * Whether or not the results are coming from possibly stale data 
+     * </pre>
+     */
+    public boolean getStale() {
+      return stale_;
+    }
+
     private void initFields() {
       cell_ = java.util.Collections.emptyList();
       associatedCellCount_ = 0;
       exists_ = false;
+      stale_ = false;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -4303,6 +4537,9 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeBool(3, exists_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeBool(4, stale_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -4324,6 +4561,10 @@ public final class ClientProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeBoolSize(3, exists_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(4, stale_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -4359,6 +4600,11 @@ public final class ClientProtos {
         result = result && (getExists()
             == other.getExists());
       }
+      result = result && (hasStale() == other.hasStale());
+      if (hasStale()) {
+        result = result && (getStale()
+            == other.getStale());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -4384,6 +4630,10 @@ public final class ClientProtos {
         hash = (37 * hash) + EXISTS_FIELD_NUMBER;
         hash = (53 * hash) + hashBoolean(getExists());
       }
+      if (hasStale()) {
+        hash = (37 * hash) + STALE_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getStale());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -4504,6 +4754,8 @@ public final class ClientProtos {
         bitField0_ = (bitField0_ & ~0x00000002);
         exists_ = false;
         bitField0_ = (bitField0_ & ~0x00000004);
+        stale_ = false;
+        bitField0_ = (bitField0_ & ~0x00000008);
         return this;
       }
 
@@ -4549,6 +4801,10 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00000002;
         }
         result.exists_ = exists_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.stale_ = stale_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -4597,6 +4853,9 @@ public final class ClientProtos {
         if (other.hasExists()) {
           setExists(other.getExists());
         }
+        if (other.hasStale()) {
+          setStale(other.getStale());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -5076,6 +5335,55 @@ public final class ClientProtos {
         return this;
       }
 
+      // optional bool stale = 4 [default = false];
+      private boolean stale_ ;
+      /**
+       * <code>optional bool stale = 4 [default = false];</code>
+       *
+       * <pre>
+       * Whether or not the results are coming from possibly stale data 
+       * </pre>
+       */
+      public boolean hasStale() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * <code>optional bool stale = 4 [default = false];</code>
+       *
+       * <pre>
+       * Whether or not the results are coming from possibly stale data 
+       * </pre>
+       */
+      public boolean getStale() {
+        return stale_;
+      }
+      /**
+       * <code>optional bool stale = 4 [default = false];</code>
+       *
+       * <pre>
+       * Whether or not the results are coming from possibly stale data 
+       * </pre>
+       */
+      public Builder setStale(boolean value) {
+        bitField0_ |= 0x00000008;
+        stale_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bool stale = 4 [default = false];</code>
+       *
+       * <pre>
+       * Whether or not the results are coming from possibly stale data 
+       * </pre>
+       */
+      public Builder clearStale() {
+        bitField0_ = (bitField0_ & ~0x00000008);
+        stale_ = false;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:Result)
     }
 
@@ -13340,6 +13648,16 @@ public final class ClientProtos {
      * <code>optional bool reversed = 15 [default = false];</code>
      */
     boolean getReversed();
+
+    // optional .Consistency consistency = 16 [default = STRONG];
+    /**
+     * <code>optional .Consistency consistency = 16 [default = STRONG];</code>
+     */
+    boolean hasConsistency();
+    /**
+     * <code>optional .Consistency consistency = 16 [default = STRONG];</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency getConsistency();
   }
   /**
    * Protobuf type {@code Scan}
@@ -13500,6 +13818,17 @@ public final class ClientProtos {
               reversed_ = input.readBool();
               break;
             }
+            case 128: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(16, rawValue);
+              } else {
+                bitField0_ |= 0x00002000;
+                consistency_ = value;
+              }
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -13846,6 +14175,22 @@ public final class ClientProtos {
       return reversed_;
     }
 
+    // optional .Consistency consistency = 16 [default = STRONG];
+    public static final int CONSISTENCY_FIELD_NUMBER = 16;
+    private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_;
+    /**
+     * <code>optional .Consistency consistency = 16 [default = STRONG];</code>
+     */
+    public boolean hasConsistency() {
+      return ((bitField0_ & 0x00002000) == 0x00002000);
+    }
+    /**
+     * <code>optional .Consistency consistency = 16 [default = STRONG];</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency getConsistency() {
+      return consistency_;
+    }
+
     private void initFields() {
       column_ = java.util.Collections.emptyList();
       attribute_ = java.util.Collections.emptyList();
@@ -13862,6 +14207,7 @@ public final class ClientProtos {
       loadColumnFamiliesOnDemand_ = false;
       small_ = false;
       reversed_ = false;
+      consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -13938,6 +14284,9 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00001000) == 0x00001000)) {
         output.writeBool(15, reversed_);
       }
+      if (((bitField0_ & 0x00002000) == 0x00002000)) {
+        output.writeEnum(16, consistency_.getNumber());
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -14007,6 +14356,10 @@ public final class ClientProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeBoolSize(15, reversed_);
       }
+      if (((bitField0_ & 0x00002000) == 0x00002000)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(16, consistency_.getNumber());
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -14099,6 +14452,11 @@ public final class ClientProtos {
         result = result && (getReversed()
             == other.getReversed());
       }
+      result = result && (hasConsistency() == other.hasConsistency());
+      if (hasConsistency()) {
+        result = result &&
+            (getConsistency() == other.getConsistency());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -14172,6 +14530,10 @@ public final class ClientProtos {
         hash = (37 * hash) + REVERSED_FIELD_NUMBER;
         hash = (53 * hash) + hashBoolean(getReversed());
       }
+      if (hasConsistency()) {
+        hash = (37 * hash) + CONSISTENCY_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getConsistency());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -14342,6 +14704,8 @@ public final class ClientProtos {
         bitField0_ = (bitField0_ & ~0x00002000);
         reversed_ = false;
         bitField0_ = (bitField0_ & ~0x00004000);
+        consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
+        bitField0_ = (bitField0_ & ~0x00008000);
         return this;
       }
 
@@ -14448,6 +14812,10 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00001000;
         }
         result.reversed_ = reversed_;
+        if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
+          to_bitField0_ |= 0x00002000;
+        }
+        result.consistency_ = consistency_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -14555,6 +14923,9 @@ public final class ClientProtos {
         if (other.hasReversed()) {
           setReversed(other.getReversed());
         }
+        if (other.hasConsistency()) {
+          setConsistency(other.getConsistency());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -15699,6 +16070,42 @@ public final class ClientProtos {
         return this;
       }
 
+      // optional .Consistency consistency = 16 [default = STRONG];
+      private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
+      /**
+       * <code>optional .Consistency consistency = 16 [default = STRONG];</code>
+       */
+      public boolean hasConsistency() {
+        return ((bitField0_ & 0x00008000) == 0x00008000);
+      }
+      /**
+       * <code>optional .Consistency consistency = 16 [default = STRONG];</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency getConsistency() {
+        return consistency_;
+      }
+      /**
+       * <code>optional .Consistency consistency = 16 [default = STRONG];</code>
+       */
+      public Builder setConsistency(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00008000;
+        consistency_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional .Consistency consistency = 16 [default = STRONG];</code>
+       */
+      public Builder clearConsistency() {
+        bitField0_ = (bitField0_ & ~0x00008000);
+        consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:Scan)
     }
 
@@ -30103,7 +30510,7 @@ public final class ClientProtos {
       "o\032\nCell.proto\032\020Comparator.proto\"\037\n\016Autho" +
       "rizations\022\r\n\005label\030\001 \003(\t\"$\n\016CellVisibili" +
       "ty\022\022\n\nexpression\030\001 \002(\t\"+\n\006Column\022\016\n\006fami" +
-      "ly\030\001 \002(\014\022\021\n\tqualifier\030\002 \003(\014\"\251\002\n\003Get\022\013\n\003r" +
+      "ly\030\001 \002(\014\022\021\n\tqualifier\030\002 \003(\014\"\324\002\n\003Get\022\013\n\003r" +
       "ow\030\001 \002(\014\022\027\n\006column\030\002 \003(\0132\007.Column\022!\n\tatt" +
       "ribute\030\003 \003(\0132\016.NameBytesPair\022\027\n\006filter\030\004" +
       " \001(\0132\007.Filter\022\036\n\ntime_range\030\005 \001(\0132\n.Time" +
@@ -30111,96 +30518,100 @@ public final class ClientProtos {
       "blocks\030\007 \001(\010:\004true\022\023\n\013store_limit\030\010 \001(\r\022",
       "\024\n\014store_offset\030\t \001(\r\022\035\n\016existence_only\030" +
       "\n \001(\010:\005false\022!\n\022closest_row_before\030\013 \001(\010" +
-      ":\005false\"L\n\006Result\022\023\n\004cell\030\001 \003(\0132\005.Cell\022\035" +
-      "\n\025associated_cell_count\030\002 \001(\005\022\016\n\006exists\030" +
-      "\003 \001(\010\"A\n\nGetRequest\022 \n\006region\030\001 \002(\0132\020.Re" +
-      "gionSpecifier\022\021\n\003get\030\002 \002(\0132\004.Get\"&\n\013GetR" +
-      "esponse\022\027\n\006result\030\001 \001(\0132\007.Result\"\200\001\n\tCon" +
-      "dition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tq" +
-      "ualifier\030\003 \002(\014\022\"\n\014compare_type\030\004 \002(\0162\014.C" +
-      "ompareType\022\037\n\ncomparator\030\005 \002(\0132\013.Compara",
-      "tor\"\265\006\n\rMutationProto\022\013\n\003row\030\001 \001(\014\0220\n\013mu" +
-      "tate_type\030\002 \001(\0162\033.MutationProto.Mutation" +
-      "Type\0220\n\014column_value\030\003 \003(\0132\032.MutationPro" +
-      "to.ColumnValue\022\021\n\ttimestamp\030\004 \001(\004\022!\n\tatt" +
-      "ribute\030\005 \003(\0132\016.NameBytesPair\022:\n\ndurabili" +
-      "ty\030\006 \001(\0162\031.MutationProto.Durability:\013USE" +
-      "_DEFAULT\022\036\n\ntime_range\030\007 \001(\0132\n.TimeRange" +
-      "\022\035\n\025associated_cell_count\030\010 \001(\005\022\r\n\005nonce" +
-      "\030\t \001(\004\032\347\001\n\013ColumnValue\022\016\n\006family\030\001 \002(\014\022B" +
-      "\n\017qualifier_value\030\002 \003(\0132).MutationProto.",
-      "ColumnValue.QualifierValue\032\203\001\n\016Qualifier" +
-      "Value\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022" +
-      "\021\n\ttimestamp\030\003 \001(\004\022.\n\013delete_type\030\004 \001(\0162" +
-      "\031.MutationProto.DeleteType\022\014\n\004tags\030\005 \001(\014" +
-      "\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP_" +
-      "WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFS" +
-      "YNC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020\000\022\r\n" +
-      "\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n\nDel" +
-      "eteType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030DELET" +
-      "E_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMILY\020\002",
-      "\022\031\n\025DELETE_FAMILY_VERSION\020\003\"\207\001\n\rMutateRe" +
-      "quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" +
-      " \n\010mutation\030\002 \002(\0132\016.MutationProto\022\035\n\tcon" +
-      "dition\030\003 \001(\0132\n.Condition\022\023\n\013nonce_group\030" +
-      "\004 \001(\004\"<\n\016MutateResponse\022\027\n\006result\030\001 \001(\0132" +
-      "\007.Result\022\021\n\tprocessed\030\002 \001(\010\"\375\002\n\004Scan\022\027\n\006" +
-      "column\030\001 \003(\0132\007.Column\022!\n\tattribute\030\002 \003(\013" +
-      "2\016.NameBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010s" +
-      "top_row\030\004 \001(\014\022\027\n\006filter\030\005 \001(\0132\007.Filter\022\036" +
-      "\n\ntime_range\030\006 \001(\0132\n.TimeRange\022\027\n\014max_ve",
-      "rsions\030\007 \001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004t" +
-      "rue\022\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_result_si" +
-      "ze\030\n \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_o" +
-      "ffset\030\014 \001(\r\022&\n\036load_column_families_on_d" +
-      "emand\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017" +
-      " \001(\010:\005false\"\236\001\n\013ScanRequest\022 \n\006region\030\001 " +
-      "\001(\0132\020.RegionSpecifier\022\023\n\004scan\030\002 \001(\0132\005.Sc" +
-      "an\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows" +
-      "\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_ca" +
-      "ll_seq\030\006 \001(\004\"y\n\014ScanResponse\022\030\n\020cells_pe",
-      "r_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014mo" +
-      "re_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n\007results" +
-      "\030\005 \003(\0132\007.Result\"\263\001\n\024BulkLoadHFileRequest" +
-      "\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\0225\n\013fa" +
-      "mily_path\030\002 \003(\0132 .BulkLoadHFileRequest.F" +
-      "amilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFam" +
-      "ilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n" +
-      "\025BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a" +
-      "\n\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n" +
-      "\014service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t",
-      "\022\017\n\007request\030\004 \002(\014\"9\n\030CoprocessorServiceR" +
-      "esult\022\035\n\005value\030\001 \001(\0132\016.NameBytesPair\"d\n\031" +
-      "CoprocessorServiceRequest\022 \n\006region\030\001 \002(" +
-      "\0132\020.RegionSpecifier\022%\n\004call\030\002 \002(\0132\027.Copr" +
-      "ocessorServiceCall\"]\n\032CoprocessorService" +
-      "Response\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" +
-      "er\022\035\n\005value\030\002 \002(\0132\016.NameBytesPair\"{\n\006Act" +
-      "ion\022\r\n\005index\030\001 \001(\r\022 \n\010mutation\030\002 \001(\0132\016.M" +
-      "utationProto\022\021\n\003get\030\003 \001(\0132\004.Get\022-\n\014servi" +
-      "ce_call\030\004 \001(\0132\027.CoprocessorServiceCall\"Y",
-      "\n\014RegionAction\022 \n\006region\030\001 \002(\0132\020.RegionS" +
-      "pecifier\022\016\n\006atomic\030\002 \001(\010\022\027\n\006action\030\003 \003(\013" +
-      "2\007.Action\"\221\001\n\021ResultOrException\022\r\n\005index" +
-      "\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Result\022!\n\texcep" +
-      "tion\030\003 \001(\0132\016.NameBytesPair\0221\n\016service_re" +
-      "sult\030\004 \001(\0132\031.CoprocessorServiceResult\"f\n" +
-      "\022RegionActionResult\022-\n\021resultOrException" +
-      "\030\001 \003(\0132\022.ResultOrException\022!\n\texception\030" +
-      "\002 \001(\0132\016.NameBytesPair\"G\n\014MultiRequest\022#\n" +
-      "\014regionAction\030\001 \003(\0132\r.RegionAction\022\022\n\nno",
-      "nceGroup\030\002 \001(\004\"@\n\rMultiResponse\022/\n\022regio" +
-      "nActionResult\030\001 \003(\0132\023.RegionActionResult" +
-      "2\261\002\n\rClientService\022 \n\003Get\022\013.GetRequest\032\014" +
-      ".GetResponse\022)\n\006Mutate\022\016.MutateRequest\032\017" +
-      ".MutateResponse\022#\n\004Scan\022\014.ScanRequest\032\r." +
-      "ScanResponse\022>\n\rBulkLoadHFile\022\025.BulkLoad" +
-      "HFileRequest\032\026.BulkLoadHFileResponse\022F\n\013" +
-      "ExecService\022\032.CoprocessorServiceRequest\032" +
-      "\033.CoprocessorServiceResponse\022&\n\005Multi\022\r." +
-      "MultiRequest\032\016.MultiResponseBB\n*org.apac",
-      "he.hadoop.hbase.protobuf.generatedB\014Clie" +
-      "ntProtosH\001\210\001\001\240\001\001"
+      ":\005false\022)\n\013consistency\030\014 \001(\0162\014.Consisten" +
+      "cy:\006STRONG\"b\n\006Result\022\023\n\004cell\030\001 \003(\0132\005.Cel" +
+      "l\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006exis" +
+      "ts\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\"A\n\nGetReq" +
+      "uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\021" +
+      "\n\003get\030\002 \002(\0132\004.Get\"&\n\013GetResponse\022\027\n\006resu" +
+      "lt\030\001 \001(\0132\007.Result\"\200\001\n\tCondition\022\013\n\003row\030\001" +
+      " \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022",
+      "\"\n\014compare_type\030\004 \002(\0162\014.CompareType\022\037\n\nc" +
+      "omparator\030\005 \002(\0132\013.Comparator\"\265\006\n\rMutatio" +
+      "nProto\022\013\n\003row\030\001 \001(\014\0220\n\013mutate_type\030\002 \001(\016" +
+      "2\033.MutationProto.MutationType\0220\n\014column_" +
+      "value\030\003 \003(\0132\032.MutationProto.ColumnValue\022" +
+      "\021\n\ttimestamp\030\004 \001(\004\022!\n\tattribute\030\005 \003(\0132\016." +
+      "NameBytesPair\022:\n\ndurability\030\006 \001(\0162\031.Muta" +
+      "tionProto.Durability:\013USE_DEFAULT\022\036\n\ntim" +
+      "e_range\030\007 \001(\0132\n.TimeRange\022\035\n\025associated_" +
+      "cell_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\347\001\n\013Colu",
+      "mnValue\022\016\n\006family\030\001 \002(\014\022B\n\017qualifier_val" +
+      "ue\030\002 \003(\0132).MutationProto.ColumnValue.Qua" +
+      "lifierValue\032\203\001\n\016QualifierValue\022\021\n\tqualif" +
+      "ier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimestamp\030\003 " +
+      "\001(\004\022.\n\013delete_type\030\004 \001(\0162\031.MutationProto" +
+      ".DeleteType\022\014\n\004tags\030\005 \001(\014\"W\n\nDurability\022" +
+      "\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_" +
+      "WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014Mu" +
+      "tationType\022\n\n\006APPEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n" +
+      "\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n\nDeleteType\022\026\n\022DELE",
+      "TE_ONE_VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERS" +
+      "IONS\020\001\022\021\n\rDELETE_FAMILY\020\002\022\031\n\025DELETE_FAMI" +
+      "LY_VERSION\020\003\"\207\001\n\rMutateRequest\022 \n\006region" +
+      "\030\001 \002(\0132\020.RegionSpecifier\022 \n\010mutation\030\002 \002" +
+      "(\0132\016.MutationProto\022\035\n\tcondition\030\003 \001(\0132\n." +
+      "Condition\022\023\n\013nonce_group\030\004 \001(\004\"<\n\016Mutate" +
+      "Response\022\027\n\006result\030\001 \001(\0132\007.Result\022\021\n\tpro" +
+      "cessed\030\002 \001(\010\"\250\003\n\004Scan\022\027\n\006column\030\001 \003(\0132\007." +
+      "Column\022!\n\tattribute\030\002 \003(\0132\016.NameBytesPai" +
+      "r\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022\027",
+      "\n\006filter\030\005 \001(\0132\007.Filter\022\036\n\ntime_range\030\006 " +
+      "\001(\0132\n.TimeRange\022\027\n\014max_versions\030\007 \001(\r:\0011" +
+      "\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022\022\n\nbatch_si" +
+      "ze\030\t \001(\r\022\027\n\017max_result_size\030\n \001(\004\022\023\n\013sto" +
+      "re_limit\030\013 \001(\r\022\024\n\014store_offset\030\014 \001(\r\022&\n\036" +
+      "load_column_families_on_demand\030\r \001(\010\022\r\n\005" +
+      "small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010:\005false\022)\n\013" +
+      "consistency\030\020 \001(\0162\014.Consistency:\006STRONG\"" +
+      "\236\001\n\013ScanRequest\022 \n\006region\030\001 \001(\0132\020.Region" +
+      "Specifier\022\023\n\004scan\030\002 \001(\0132\005.Scan\022\022\n\nscanne",
+      "r_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rcl" +
+      "ose_scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004" +
+      "\"y\n\014ScanResponse\022\030\n\020cells_per_result\030\001 \003" +
+      "(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003" +
+      " \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n\007results\030\005 \003(\0132\007.Res" +
+      "ult\"\263\001\n\024BulkLoadHFileRequest\022 \n\006region\030\001" +
+      " \002(\0132\020.RegionSpecifier\0225\n\013family_path\030\002 " +
+      "\003(\0132 .BulkLoadHFileRequest.FamilyPath\022\026\n" +
+      "\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006f" +
+      "amily\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFi",
+      "leResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Coprocesso" +
+      "rServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_nam" +
+      "e\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007request\030" +
+      "\004 \002(\014\"9\n\030CoprocessorServiceResult\022\035\n\005val" +
+      "ue\030\001 \001(\0132\016.NameBytesPair\"d\n\031CoprocessorS" +
+      "erviceRequest\022 \n\006region\030\001 \002(\0132\020.RegionSp" +
+      "ecifier\022%\n\004call\030\002 \002(\0132\027.CoprocessorServi" +
+      "ceCall\"]\n\032CoprocessorServiceResponse\022 \n\006" +
+      "region\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030" +
+      "\002 \002(\0132\016.NameBytesPair\"{\n\006Action\022\r\n\005index",
+      "\030\001 \001(\r\022 \n\010mutation\030\002 \001(\0132\016.MutationProto" +
+      "\022\021\n\003get\030\003 \001(\0132\004.Get\022-\n\014service_call\030\004 \001(" +
+      "\0132\027.CoprocessorServiceCall\"Y\n\014RegionActi" +
+      "on\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006" +
+      "atomic\030\002 \001(\010\022\027\n\006action\030\003 \003(\0132\007.Action\"\221\001" +
+      "\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022\027\n\006re" +
+      "sult\030\002 \001(\0132\007.Result\022!\n\texception\030\003 \001(\0132\016" +
+      ".NameBytesPair\0221\n\016service_result\030\004 \001(\0132\031" +
+      ".CoprocessorServiceResult\"f\n\022RegionActio" +
+      "nResult\022-\n\021resultOrException\030\001 \003(\0132\022.Res",
+      "ultOrException\022!\n\texception\030\002 \001(\0132\016.Name" +
+      "BytesPair\"G\n\014MultiRequest\022#\n\014regionActio" +
+      "n\030\001 \003(\0132\r.RegionAction\022\022\n\nnonceGroup\030\002 \001" +
+      "(\004\"@\n\rMultiResponse\022/\n\022regionActionResul" +
+      "t\030\001 \003(\0132\023.RegionActionResult*\'\n\013Consiste" +
+      "ncy\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\261\002\n\rClient" +
+      "Service\022 \n\003Get\022\013.GetRequest\032\014.GetRespons" +
+      "e\022)\n\006Mutate\022\016.MutateRequest\032\017.MutateResp" +
+      "onse\022#\n\004Scan\022\014.ScanRequest\032\r.ScanRespons" +
+      "e\022>\n\rBulkLoadHFile\022\025.BulkLoadHFileReques",
+      "t\032\026.BulkLoadHFileResponse\022F\n\013ExecService" +
+      "\022\032.CoprocessorServiceRequest\032\033.Coprocess" +
+      "orServiceResponse\022&\n\005Multi\022\r.MultiReques" +
+      "t\032\016.MultiResponseBB\n*org.apache.hadoop.h" +
+      "base.protobuf.generatedB\014ClientProtosH\001\210" +
+      "\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -30230,13 +30641,13 @@ public final class ClientProtos {
           internal_static_Get_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_Get_descriptor,
-              new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "ClosestRowBefore", });
+              new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "ClosestRowBefore", "Consistency", });
           internal_static_Result_descriptor =
             getDescriptor().getMessageTypes().get(4);
           internal_static_Result_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_Result_descriptor,
-              new java.lang.String[] { "Cell", "AssociatedCellCount", "Exists", });
+              new java.lang.String[] { "Cell", "AssociatedCellCount", "Exists", "Stale", });
           internal_static_GetRequest_descriptor =
             getDescriptor().getMessageTypes().get(5);
           internal_static_GetRequest_fieldAccessorTable = new
@@ -30290,7 +30701,7 @@ public final class ClientProtos {
           internal_static_Scan_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_Scan_descriptor,
-              new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", });
+              new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", });
           internal_static_ScanRequest_descriptor =
             getDescriptor().getMessageTypes().get(12);
           internal_static_ScanRequest_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-protocol/src/main/protobuf/Client.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto
index 631d0cd..b8ad8d0 100644
--- a/hbase-protocol/src/main/protobuf/Client.proto
+++ b/hbase-protocol/src/main/protobuf/Client.proto
@@ -52,6 +52,14 @@ message Column {
 }
 
 /**
+ * Consistency defines the expected consistency level for an operation.
+ */
+enum Consistency {
+  STRONG   = 0;
+  TIMELINE = 1;
+}
+
+/**
  * The protocol buffer version of Get.
  * Unless existence_only is specified, return all the requested data
  * for the row that matches exactly, or the one that immediately
@@ -75,6 +83,8 @@ message Get {
   // If the row to get doesn't exist, return the
   // closest row before.
   optional bool closest_row_before = 11 [default = false];
+
+  optional Consistency consistency = 12 [default = STRONG];
 }
 
 message Result {
@@ -92,6 +102,9 @@ message Result {
   // used for Get to check existence only. Not set if existence_only was not set to true
   //  in the query.
   optional bool exists = 3;
+
+  // Whether or not the results are coming from possibly stale data 
+  optional bool stale = 4 [default = false];
 }
 
 /**
@@ -233,6 +246,7 @@ message Scan {
   optional bool load_column_families_on_demand = 13; /* DO NOT add defaults to load_column_families_on_demand. */
   optional bool small = 14;
   optional bool reversed = 15 [default = false];
+  optional Consistency consistency = 16 [default = STRONG];
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-shell/src/main/ruby/hbase.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase.rb b/hbase-shell/src/main/ruby/hbase.rb
index fcd11fc..7ac5e0d 100644
--- a/hbase-shell/src/main/ruby/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase.rb
@@ -62,6 +62,7 @@ module HBaseConstants
   VISIBILITY="VISIBILITY"
   AUTHORIZATIONS = "AUTHORIZATIONS"
   SKIP_FLUSH = 'SKIP_FLUSH'
+  CONSISTENCY = "CONSISTENCY"
 
   # Load constants from hbase java API
   def self.promote_constants(constants)

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-shell/src/main/ruby/hbase/table.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb
index 1932043..5f71070 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -270,6 +270,7 @@ EOF
       filter = args.delete(FILTER) if args[FILTER]
       attributes = args[ATTRIBUTES]
       authorizations = args[AUTHORIZATIONS]
+      consistency = args.delete(CONSISTENCY) if args[CONSISTENCY]
       unless args.empty?
         columns = args[COLUMN] || args[COLUMNS]
         if args[VERSIONS]
@@ -317,8 +318,8 @@ EOF
           get.setTimeStamp(ts.to_i) if args[TIMESTAMP]
           get.setTimeRange(args[TIMERANGE][0], args[TIMERANGE][1]) if args[TIMERANGE]
         end
-          set_attributes(get, attributes) if attributes
-          set_authorizations(get, authorizations) if authorizations  
+        set_attributes(get, attributes) if attributes
+        set_authorizations(get, authorizations) if authorizations
       end
 
       unless filter.class == String
@@ -327,6 +328,8 @@ EOF
         get.setFilter(org.apache.hadoop.hbase.filter.ParseFilter.new.parseFilterString(filter))
       end
 
+      get.setConsistency(org.apache.hadoop.hbase.client.Consistency.valueOf(consistency)) if consistency
+
       # Call hbase for the results
       result = @table.get(get)
       return nil if result.isEmpty
@@ -385,6 +388,7 @@ EOF
         raw = args["RAW"] || false
         attributes = args[ATTRIBUTES]
         authorizations = args[AUTHORIZATIONS]
+        consistency = args[CONSISTENCY]
         # Normalize column names
         columns = [columns] if columns.class == String
         unless columns.kind_of?(Array)
@@ -421,6 +425,7 @@ EOF
         scan.setRaw(raw)
         set_attributes(scan, attributes) if attributes
         set_authorizations(scan, authorizations) if authorizations
+        scan.setConsistency(org.apache.hadoop.hbase.client.Consistency.valueOf(consistency)) if consistency
       else
         scan = org.apache.hadoop.hbase.client.Scan.new
       end

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-shell/src/main/ruby/shell/commands/get.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get.rb b/hbase-shell/src/main/ruby/shell/commands/get.rb
index 817abc5..0035310 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get.rb
@@ -37,8 +37,9 @@ a dictionary of column(s), timestamp, timerange and versions. Examples:
   hbase> get 't1', 'r1', 'c1'
   hbase> get 't1', 'r1', 'c1', 'c2'
   hbase> get 't1', 'r1', ['c1', 'c2']
-  hbsase> get 't1','r1', {COLUMN => 'c1', ATTRIBUTES => {'mykey'=>'myvalue'}}
-  hbsase> get 't1','r1', {COLUMN => 'c1', AUTHORIZATIONS => ['PRIVATE','SECRET']}
+  hbase> get 't1', 'r1', {COLUMN => 'c1', ATTRIBUTES => {'mykey'=>'myvalue'}}
+  hbase> get 't1', 'r1', {COLUMN => 'c1', AUTHORIZATIONS => ['PRIVATE','SECRET']}
+  hbase> get 't1', 'r1', {CONSISTENCY => 'TIMELINE'}
 
 Besides the default 'toStringBinary' format, 'get' also supports custom formatting by
 column.  A user can define a FORMATTER by adding it to the column name in the get
@@ -69,6 +70,7 @@ would be:
   hbase> t.get 'r1', 'c1'
   hbase> t.get 'r1', 'c1', 'c2'
   hbase> t.get 'r1', ['c1', 'c2']
+  hbase> t.get 'r1', {CONSISTENCY => 'TIMELINE'}
 EOF
       end
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ea476b/hbase-shell/src/main/ruby/shell/commands/scan.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/scan.rb b/hbase-shell/src/main/ruby/shell/commands/scan.rb
index 4c6f1c1..8ce8d46 100644
--- a/hbase-shell/src/main/ruby/shell/commands/scan.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/scan.rb
@@ -48,6 +48,7 @@ Some examples:
     (QualifierFilter (>=, 'binary:xyz'))) AND (TimestampsFilter ( 123, 456))"}
   hbase> scan 't1', {FILTER =>
     org.apache.hadoop.hbase.filter.ColumnPaginationFilter.new(1, 0)}
+  hbase> scan 't1', {CONSISTENCY => 'TIMELINE'}
 For setting the Operation Attributes 
   hbase> scan 't1', { COLUMNS => ['c1', 'c2'], ATTRIBUTES => {'mykey' => 'myvalue'}}
   hbase> scan 't1', { COLUMNS => ['c1', 'c2'], AUTHORIZATIONS => ['PRIVATE','SECRET']}