You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/07/06 20:32:22 UTC

[2/3] hbase git commit: HBASE-14070 - Core HLC (Sai Teja Ranuva) Rebased by Amit Patel

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 11301d8..4dd62f9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -31,14 +31,9 @@ import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.IsolationLevel;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -94,6 +89,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
   private final int minVersions;
   private final long maxRowSize;
   private final long cellsPerHeartbeatCheck;
+  private final TimestampType timestampType;
 
   // 1) Collects all the KVHeap that are eagerly getting closed during the
   //    course of a scan
@@ -176,8 +172,17 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
     int numCol = columns == null ? 0 : columns.size();
     explicitColumnQuery = numCol > 0;
     this.scan = scan;
-    this.now = EnvironmentEdgeManager.currentTime();
-    this.oldestUnexpiredTS = scan.isRaw() ? 0L : now - scanInfo.getTtl();
+
+    this.now = this.store != null ? this.store.getClock().now() :
+        new Clock.System().now();
+    this.timestampType = this.store != null ?
+        this.store.getClock().getTimestampType() : TimestampType.PHYSICAL;
+    // Convert to milliseconds before subtracting time
+    long diff = this.timestampType.toEpochTimeMillisFromTimestamp(now) - scanInfo.getTtl();
+    // Prevent overflow if diff is negative and timestampType is HYBRID
+    diff = diff > 0 ? timestampType.fromEpochTimeMillisToTimestamp(diff) : 0L;
+    this.oldestUnexpiredTS = scan.isRaw() ? 0L : diff;
+
     this.minVersions = scanInfo.getMinVersions();
 
      // We look up row-column Bloom filters for multi-column queries as part of
@@ -205,6 +210,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
       // readType is default if the scan keeps running for a long time.
       this.scanUsePread = this.readType != Scan.ReadType.STREAM;
     }
+
     this.preadMaxBytes = scanInfo.getPreadMaxBytes();
     this.cellsPerHeartbeatCheck = scanInfo.getCellsPerTimeoutCheck();
     // Parallel seeking is on if the config allows and more there is more than one store file.
@@ -237,8 +243,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
     if (columns != null && scan.isRaw()) {
       throw new DoNotRetryIOException("Cannot specify any column for a raw scan");
     }
-    matcher = UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now,
-      store.getCoprocessorHost());
+    matcher = UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, store
+        .getCoprocessorHost());
 
     this.store.addChangedReaderObserver(this);
 
@@ -314,12 +320,12 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
       // use legacy query matcher since we do not consider the scan object in our code. Only used to
       // keep compatibility for coprocessor.
       matcher = LegacyScanQueryMatcher.create(scan, scanInfo, null, scanType, smallestReadPoint,
-        earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow,
-        store.getCoprocessorHost());
+        earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow,
+          dropDeletesToRow, store.getCoprocessorHost());
     } else {
       matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, smallestReadPoint,
-        earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow,
-        store.getCoprocessorHost());
+        earliestPutTs, oldestUnexpiredTS, now, dropDeletesFromRow,
+          dropDeletesToRow, store.getCoprocessorHost());
     }
 
     // Filter the list of scanners using Bloom filters, time range, TTL, etc.
@@ -343,6 +349,26 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
   }
 
   @VisibleForTesting
+  StoreScanner(final Store store, final Scan scan, ScanInfo scanInfo,
+      ScanType scanType, final NavigableSet<byte[]> columns,
+      final List<KeyValueScanner> scanners) throws IOException {
+    this(store, scan, scanInfo, scanType, columns, scanners,
+        HConstants.LATEST_TIMESTAMP,
+        // 0 is passed as readpoint because the test bypasses Store
+        0);
+  }
+
+  @VisibleForTesting
+  StoreScanner(final Store store, final Scan scan, ScanInfo scanInfo,
+      ScanType scanType, final NavigableSet<byte[]> columns,
+      final List<KeyValueScanner> scanners, long earliestPutTs)
+      throws IOException {
+    this(store, scan, scanInfo, scanType, columns, scanners, earliestPutTs,
+        // 0 is passed as readpoint because the test bypasses Store
+        0);
+  }
+
+  @VisibleForTesting
   StoreScanner(final Scan scan, ScanInfo scanInfo,
     ScanType scanType, final NavigableSet<byte[]> columns,
     final List<? extends KeyValueScanner> scanners, long earliestPutTs)
@@ -352,14 +378,41 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
       0);
   }
 
+  public StoreScanner(final Store store, final Scan scan, ScanInfo scanInfo, ScanType scanType,
+      final NavigableSet<byte[]> columns, final List<KeyValueScanner> scanners, long earliestPutTs,
+      long readPt) throws IOException {
+    this(store, scan, scanInfo, columns, readPt, scan.getCacheBlocks(), scanType);
+    if (scanType == ScanType.USER_SCAN) {
+      this.matcher = UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null);
+    } else {
+      if (scan.hasFilter() || (scan.getStartRow() != null && scan.getStartRow().length > 0)
+          || (scan.getStopRow() != null && scan.getStopRow().length > 0)
+          || !scan.getTimeRange().isAllTime() || columns != null) {
+        // use legacy query matcher since we do not consider the scan object in our code. Only used
+        // to keep compatibility for coprocessor.
+        matcher = LegacyScanQueryMatcher.create(scan, scanInfo, columns, scanType, Long.MAX_VALUE,
+            earliestPutTs, oldestUnexpiredTS, now, null, null,
+            store.getCoprocessorHost());
+      } else {
+        this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE,
+            earliestPutTs, oldestUnexpiredTS, now, null, null,
+            null);
+      }
+    }
+
+    // Seek all scanners to the initial key
+    seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled);
+    addCurrentScanners(scanners);
+    resetKVHeap(scanners, scanInfo.getComparator());
+  }
+
   public StoreScanner(final Scan scan, ScanInfo scanInfo, ScanType scanType,
       final NavigableSet<byte[]> columns, final List<? extends KeyValueScanner> scanners, long earliestPutTs,
       long readPt) throws IOException {
     this(null, scan, scanInfo, columns, readPt,
         scanType == ScanType.USER_SCAN ? scan.getCacheBlocks() : false, scanType);
     if (scanType == ScanType.USER_SCAN) {
-      this.matcher = UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now,
-        null);
+      this.matcher = UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, null);
     } else {
       if (scan.hasFilter() || (scan.getStartRow() != null && scan.getStartRow().length > 0)
           || (scan.getStopRow() != null && scan.getStopRow().length > 0)
@@ -367,10 +420,12 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
         // use legacy query matcher since we do not consider the scan object in our code. Only used
         // to keep compatibility for coprocessor.
         matcher = LegacyScanQueryMatcher.create(scan, scanInfo, columns, scanType, Long.MAX_VALUE,
-          earliestPutTs, oldestUnexpiredTS, now, null, null, store.getCoprocessorHost());
+          earliestPutTs, oldestUnexpiredTS, now, null, null,
+            store.getCoprocessorHost());
       } else {
         this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE,
-          earliestPutTs, oldestUnexpiredTS, now, null, null, null);
+          earliestPutTs, oldestUnexpiredTS, now, null, null,
+            null);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java
index 89725fe..5aa041e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.regionserver.querymatcher;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.TimestampType;
 import org.apache.hadoop.hbase.KeepDeletedCells;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.ScanInfo;
@@ -62,9 +63,21 @@ public abstract class DropDeletesCompactionScanQueryMatcher extends CompactionSc
   protected final MatchCode tryDropDelete(Cell cell) {
     long timestamp = cell.getTimestamp();
     // If it is not the time to drop the delete marker, just return
-    if (timeToPurgeDeletes > 0 && now - timestamp <= timeToPurgeDeletes) {
-      return MatchCode.INCLUDE;
+    if (timeToPurgeDeletes > 0) {
+      // Assumes now and timestamp should be of same type. It should be the case.
+      // Else there is something wrong. if it happens in tests, tests should be rewritten.
+      if (TimestampType.HYBRID.isLikelyOfType(now, true)) {
+        if (TimestampType.HYBRID.toEpochTimeMillisFromTimestamp(now) - TimestampType.HYBRID
+          .toEpochTimeMillisFromTimestamp(timestamp) <= timeToPurgeDeletes) {
+          return MatchCode.INCLUDE;
+        }
+      } else {
+        if (now - timestamp <= timeToPurgeDeletes) {
+          return MatchCode.INCLUDE;
+        }
+      }
     }
+
     if (keepDeletedCells == KeepDeletedCells.TRUE
         || (keepDeletedCells == KeepDeletedCells.TTL && timestamp >= oldestUnexpiredTS)) {
       // If keepDeletedCell is true, or the delete marker is not expired yet, we should include it

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
index e508a9a..2e59482 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TimestampType;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
@@ -153,8 +154,23 @@ public abstract class ScanQueryMatcher implements ShipperListener {
         long ts = cell.getTimestamp();
         assert t.getValueLength() == Bytes.SIZEOF_LONG;
         long ttl = TagUtil.getValueAsLong(t);
-        if (ts + ttl < now) {
-          return true;
+        if (TimestampType.HYBRID.isLikelyOfType(ts, true)) {
+          if (TimestampType.HYBRID.isLikelyOfType(now, true)) {
+            if (TimestampType.HYBRID.toEpochTimeMillisFromTimestamp(ts) + ttl < TimestampType.HYBRID
+                .toEpochTimeMillisFromTimestamp(now)) {
+              return true;
+            }
+          }
+          else {
+            if (TimestampType.HYBRID.toEpochTimeMillisFromTimestamp(ts) + ttl < now) {
+              return true;
+            }
+          }
+
+        } else {
+          if (ts + ttl < now) {
+            return true;
+          }
         }
         // Per cell TTLs cannot extend lifetime beyond family settings, so
         // fall through to check that

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index aa0c094..1c7b39c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TimestampType;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
@@ -777,7 +778,14 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS
     // any cells found there inclusively.
     long latestTs = Math.max(opTs, latestCellTs);
     if (latestTs == 0 || latestTs == HConstants.LATEST_TIMESTAMP) {
-      latestTs = EnvironmentEdgeManager.currentTime();
+      if (latestCellTs == HConstants.LATEST_TIMESTAMP || latestCellTs == 0) {
+        latestTs = HConstants.LATEST_TIMESTAMP - 1;
+      } else if (TimestampType.HYBRID.isLikelyOfType(latestCellTs, true)) {
+        latestTs = TimestampType.HYBRID.fromEpochTimeMillisToTimestamp(EnvironmentEdgeManager
+          .currentTime());
+      } else {
+        latestTs = EnvironmentEdgeManager.currentTime();
+      }
     }
     get.setTimeRange(0, latestTs + 1);
     // In case of Put operation we set to read all versions. This was done to consider the case

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 5c8b29b..75a5aba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.compress.Compression;
@@ -1807,6 +1808,22 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     return htd;
   }
 
+  public HTableDescriptor createTableDescriptor(final TableName name,
+      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted,
+      ClockType clockType) {
+    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name);
+    for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
+      builder.addColumnFamily(new HColumnDescriptor(cfName)
+        .setMinVersions(minVersions)
+        .setMaxVersions(versions)
+        .setKeepDeletedCells(keepDeleted)
+        .setBlockCacheEnabled(false)
+        .setTimeToLive(ttl));
+    }
+    builder.setClockType(clockType);
+    return new HTableDescriptor(builder.build());
+  }
+
   /**
    * Create a table of name <code>name</code>.
    * @param name Name to give table.
@@ -1817,6 +1834,17 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
         MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
   }
 
+  /**
+   * Create a table of name <code>name</code>.
+   * @param name Name to give table.
+   * @param clockType clock type of the table
+   * @return Column descriptor.
+   */
+  public HTableDescriptor createTableDescriptor(final TableName name, ClockType clockType) {
+    return createTableDescriptor(name,  HColumnDescriptor.DEFAULT_MIN_VERSIONS,
+        MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED, clockType);
+  }
+
   public HTableDescriptor createTableDescriptor(final TableName tableName,
       byte[] family) {
     return createTableDescriptor(tableName, new byte[][] {family}, 1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index a99345b..7312000 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -252,6 +252,16 @@ public class MockRegionServerServices implements RegionServerServices {
     return null;
   }
 
+  @Override public Clock getRegionServerClock(ClockType clockType) {
+    if (clockType.equals(ClockType.HLC)){
+      return new Clock.HLC();
+    } else if (clockType.equals(ClockType.SYSTEM_MONOTONIC)) {
+      return new Clock.SystemMonotonic();
+    } else {
+      return new Clock.System();
+    }
+  }
+
   @Override
   public ExecutorService getExecutorService() {
     return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
new file mode 100644
index 0000000..b885a79
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.TimestampType;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+
+@Category({MediumTests.class})
+public class TestClockWithCluster {
+  private static final Log LOG = LogFactory.getLog(TestClockWithCluster.class);
+  private static final  HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static Connection connection;
+  private byte[] columnFamily = Bytes.toBytes("testCF");
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    UTIL.startMiniCluster(1);
+    connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
+  }
+
+  @AfterClass
+  public static void tearDownClass() throws Exception {
+    connection.close();
+    UTIL.shutdownMiniCluster();
+  }
+
+  private void verifyTimestamps(Table table, final byte[] f, int startRow, int endRow,
+      TimestampType timestamp, boolean isMonotonic) throws IOException {
+    for (int i = startRow; i < endRow; i++) {
+      String failMsg = "Failed verification of row :" + i;
+      byte[] data = Bytes.toBytes(String.valueOf(i));
+      Get get = new Get(data);
+      Result result = table.get(get);
+      Cell cell = result.getColumnLatestCell(f, null);
+      assertTrue(failMsg, timestamp.isLikelyOfType(cell.getTimestamp(), isMonotonic));
+    }
+  }
+
+  @Test
+  public void testNewTablesAreCreatedWithSystemClock() throws IOException {
+    try {
+      Admin admin = connection.getAdmin();
+      TableName tableName = TableName.valueOf("TestNewTablesAreSystemByDefault");
+      admin.createTable(new HTableDescriptor(tableName).addFamily(new
+          HColumnDescriptor(columnFamily)));
+
+      Table table = connection.getTable(tableName);
+
+      ClockType clockType = admin.getTableDescriptor(tableName).getClockType();
+      assertEquals(ClockType.SYSTEM, clockType);
+      // write
+      UTIL.loadNumericRows(table, columnFamily, 0, 1000);
+      // read , check if the it is same.
+      UTIL.verifyNumericRows(table, Bytes.toBytes("testCF"), 0, 1000, 0);
+
+      // This check will be useful if Clock type were to be system monotonic or HLC.
+      verifyTimestamps(table, columnFamily, 0, 1000, TimestampType.PHYSICAL, false);
+    } catch(Exception e) {
+
+    }
+  }
+
+  @Test
+  public void testMetaTableClockTypeIsSystem() {
+    try {
+      Admin admin = connection.getAdmin();
+      Table table = connection.getTable(TableName.META_TABLE_NAME);
+      ClockType clockType = admin.getTableDescriptor(TableName.META_TABLE_NAME).getClockType();
+      assertEquals(ClockType.SYSTEM, clockType);
+    } catch(IOException ioE) {
+      fail("Execution should not come here");
+    }
+  }
+
+  @Test
+  public void testMetaTableTimestampsAreSystem() {
+    // Checks timestamps of whatever is present in meta table currently.
+    // ToDo: Include complete meta table sample with all column families to check all paths of
+    // meta table modification.
+    try {
+      Table table = connection.getTable(TableName.META_TABLE_NAME);
+      Result result = table.getScanner(new Scan()).next();
+      for (Cell cell : result.rawCells()) {
+        assertTrue(TimestampType.PHYSICAL.isLikelyOfType(cell.getTimestamp(), false));
+      }
+    } catch(IOException ioE) {
+      fail("Execution should not come here");
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
index 8805337..c349500 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
@@ -19,19 +19,24 @@
 
 package org.apache.hadoop.hbase.coprocessor;
 
+import static org.bouncycastle.asn1.x500.style.RFC4519Style.name;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ClockType;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
@@ -45,12 +50,17 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
+import org.apache.hadoop.hbase.util.TestTableName;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 /**
  * This test runs batch mutation with Increments which have custom TimeRange.
@@ -59,14 +69,16 @@ import org.junit.experimental.categories.Category;
  * See HBASE-15698
  */
 @Category({CoprocessorTests.class, MediumTests.class})
+@RunWith(Parameterized.class)
 public class TestIncrementTimeRange {
 
   private static final HBaseTestingUtility util = new HBaseTestingUtility();
   private static ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
 
-  private static final TableName TEST_TABLE = TableName.valueOf("test");
+  @Rule
+  public TestTableName TEST_TABLE = new TestTableName();
   private static final byte[] TEST_FAMILY = Bytes.toBytes("f1");
-
+  private static final byte[][] TEST_FAMILIES = new byte[][]{TEST_FAMILY};
   private static final byte[] ROW_A = Bytes.toBytes("aaa");
   private static final byte[] ROW_B = Bytes.toBytes("bbb");
   private static final byte[] ROW_C = Bytes.toBytes("ccc");
@@ -80,6 +92,18 @@ public class TestIncrementTimeRange {
   private Table hTableInterface;
   private Table table;
 
+  private ClockType clockType;
+
+  @Parameters(name = "{0}")
+  public static Iterable<Object> data() {
+    return Arrays.asList(new Object[] {ClockType.HLC, ClockType.SYSTEM_MONOTONIC, ClockType
+        .SYSTEM});
+  }
+
+  public TestIncrementTimeRange(ClockType clockType) {
+    this.clockType = clockType;
+  }
+
   @BeforeClass
   public static void setupBeforeClass() throws Exception {
     util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
@@ -98,7 +122,8 @@ public class TestIncrementTimeRange {
 
   @Before
   public void before() throws Exception {
-    table = util.createTable(TEST_TABLE, TEST_FAMILY);
+    HTableDescriptor htd = util.createTableDescriptor(TEST_TABLE.getTableName(), clockType);
+    table = util.createTable(htd, TEST_FAMILIES, new Configuration(HBaseConfiguration.create()));
 
     Put puta = new Put(ROW_A);
     puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1);
@@ -121,7 +146,7 @@ public class TestIncrementTimeRange {
       }
     } finally {
       try {
-        util.deleteTable(TEST_TABLE);
+        util.deleteTable(TEST_TABLE.getTableName());
       } catch (IOException ioe) {
       }
     }
@@ -150,7 +175,7 @@ public class TestIncrementTimeRange {
 
   @Test
   public void testHTableInterfaceMethods() throws Exception {
-    hTableInterface = util.getConnection().getTable(TEST_TABLE);
+    hTableInterface = util.getConnection().getTable(TEST_TABLE.getTableName());
     checkHTableInterfaceMethods();
   }
 
@@ -162,7 +187,7 @@ public class TestIncrementTimeRange {
 
     time = EnvironmentEdgeManager.currentTime();
     mee.setValue(time);
-    TimeRange range10 = new TimeRange(1, time+10);
+    TimeRange range10 = new TimeRange(1, time + 10);
     hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L)
         .setTimeRange(range10.getMin(), range10.getMax()));
     checkRowValue(ROW_A, Bytes.toBytes(11L));
@@ -171,7 +196,7 @@ public class TestIncrementTimeRange {
 
     time = EnvironmentEdgeManager.currentTime();
     mee.setValue(time);
-    TimeRange range2 = new TimeRange(1, time+20);
+    TimeRange range2 = new TimeRange(1, time + 20);
     List<Row> actions =
         Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L)
             .setTimeRange(range2.getMin(), range2.getMax()),

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
index 0bec03b..2630869 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.mapreduce;
 
+import static org.apache.hadoop.hbase.constraint.CheckConfigurationConstraint.getConfiguration;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -28,12 +29,15 @@ import java.io.PrintStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -189,13 +193,27 @@ public class TestCopyTable {
    */
   @Test
   public void testRenameFamily() throws Exception {
-    final TableName sourceTable = TableName.valueOf(name.getMethodName() + "source");
-    final TableName targetTable = TableName.valueOf(name.getMethodName() + "-target");
+    testRenameFamily(ClockType.SYSTEM);
+    testRenameFamily(ClockType.SYSTEM_MONOTONIC);
+    testRenameFamily(ClockType.HLC);
+  }
 
+  public void testRenameFamily(ClockType clockType) throws Exception {
+    TableName sourceTable = TableName.valueOf("sourceTable");
+    TableName targetTable = TableName.valueOf("targetTable");
+    HTableDescriptor sourceTableDesc = new HTableDescriptor(TableDescriptorBuilder
+      .newBuilder(sourceTable)
+      .setClockType(clockType)
+      .build());
+    HTableDescriptor targetTableDesc = new HTableDescriptor(TableDescriptorBuilder
+      .newBuilder(targetTable)
+      .setClockType(clockType)
+      .build());
     byte[][] families = { FAMILY_A, FAMILY_B };
-
-    Table t = TEST_UTIL.createTable(sourceTable, families);
-    Table t2 = TEST_UTIL.createTable(targetTable, families);
+    Table t = TEST_UTIL.createTable(sourceTableDesc, families, (byte[][]) null, new Configuration
+        (getConfiguration()));
+    Table t2 = TEST_UTIL.createTable(targetTableDesc, families, (byte[][]) null, new Configuration
+        (getConfiguration()));
     Put p = new Put(ROW1);
     p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data11"));
     p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data12"));
@@ -225,6 +243,9 @@ public class TestCopyTable {
     // Data from the family of B is not copied
     assertNull(b1);
 
+    TEST_UTIL.deleteTable(sourceTable);
+    TEST_UTIL.deleteTable(targetTable);
+
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 87522b6..cda2509 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 7ac7571..8ec05ef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ClockType;
+import org.apache.hadoop.hbase.Clock;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -579,6 +581,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
+  public Clock getRegionServerClock(ClockType clockType) {
+    Clock systemClock = new Clock.System();
+    return systemClock;
+  }
+
+  @Override
   public ExecutorService getExecutorService() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
index b0eadb5..40ba82d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
@@ -42,8 +42,12 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.hadoop.hbase.Clock;
+
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 /**
  * compacted memstore test case
@@ -169,24 +173,29 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
   /** Test getNextRow from memstore
    * @throws InterruptedException
    */
-  @Override
   @Test
   public void testGetNextRow() throws Exception {
-    addRows(this.memstore);
+    testGetNextRow(new Clock.HLC());
+    testGetNextRow(new Clock.SystemMonotonic());
+    testGetNextRow(new Clock.System());
+  }
+
+  public void testGetNextRow(Clock clock) throws Exception {
+    addRows(this.memstore, clock);
     // Add more versions to make it a little more interesting.
     Thread.sleep(1);
-    addRows(this.memstore);
+    addRows(this.memstore, clock);
     Cell closestToEmpty = ((CompactingMemStore)this.memstore).getNextRow(KeyValue.LOWESTKEY);
     assertTrue(CellComparator.COMPARATOR.compareRows(closestToEmpty,
-        new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
+        new KeyValue(Bytes.toBytes(0), clock.now())) == 0);
     for (int i = 0; i < ROW_COUNT; i++) {
       Cell nr = ((CompactingMemStore)this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i),
-          System.currentTimeMillis()));
+          clock.now()));
       if (i + 1 == ROW_COUNT) {
         assertEquals(nr, null);
       } else {
         assertTrue(CellComparator.COMPARATOR.compareRows(nr,
-            new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0);
+            new KeyValue(Bytes.toBytes(i + 1), clock.now())) == 0);
       }
     }
     //starting from each row, validate results should contain the starting row
@@ -195,9 +204,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
       ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE,
           KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator());
       ScanType scanType = ScanType.USER_SCAN;
-      InternalScanner scanner = new StoreScanner(new Scan(
-          Bytes.toBytes(startRowId)), scanInfo, scanType, null,
-          memstore.getScanners(0));
+      Store mockStore = mock(HStore.class);
+      when(mockStore.getClock()).thenReturn(Clock.getDummyClockOfGivenClockType(clock.clockType));
+      InternalScanner scanner = new StoreScanner(mockStore, new Scan(Bytes.toBytes(startRowId)),
+          scanInfo, scanType, null, memstore.getScanners(0)) ;
       List<Cell> results = new ArrayList<>();
       for (int i = 0; scanner.next(results); i++) {
         int rowId = startRowId + i;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 439f3d4..a748487 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Clock;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -60,6 +61,8 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
@@ -580,16 +583,22 @@ public class TestDefaultMemStore {
    */
   @Test
   public void testGetNextRow() throws Exception {
-    addRows(this.memstore);
+    testGetNextRow(new Clock.HLC());
+    testGetNextRow(new Clock.SystemMonotonic());
+    testGetNextRow(new Clock.System());
+  }
+
+  public void testGetNextRow(Clock clock) throws Exception {
+    addRows(this.memstore, clock);
     // Add more versions to make it a little more interesting.
     Thread.sleep(1);
-    addRows(this.memstore);
+    addRows(this.memstore, clock);
     Cell closestToEmpty = ((DefaultMemStore) this.memstore).getNextRow(KeyValue.LOWESTKEY);
     assertTrue(CellComparator.COMPARATOR.compareRows(closestToEmpty,
         new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
     for (int i = 0; i < ROW_COUNT; i++) {
       Cell nr = ((DefaultMemStore) this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i),
-          System.currentTimeMillis()));
+          clock.now()));
       if (i + 1 == ROW_COUNT) {
         assertEquals(nr, null);
       } else {
@@ -603,9 +612,11 @@ public class TestDefaultMemStore {
       ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE,
           KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator());
       ScanType scanType = ScanType.USER_SCAN;
-      try (InternalScanner scanner = new StoreScanner(new Scan(
-          Bytes.toBytes(startRowId)), scanInfo, scanType, null,
-          memstore.getScanners(0))) {
+      Store store = mock(HStore.class);
+      when(store.getClock()).thenReturn(Clock.getDummyClockOfGivenClockType(clock.clockType));
+
+      try (InternalScanner scanner = new StoreScanner(store, new Scan(Bytes.toBytes(startRowId)),
+          scanInfo, scanType, null, memstore.getScanners(0))) {
         List<Cell> results = new ArrayList<>();
         for (int i = 0; scanner.next(results); i++) {
           int rowId = startRowId + i;
@@ -1023,6 +1034,24 @@ public class TestDefaultMemStore {
     return ROW_COUNT;
   }
 
+  /**
+   * Adds {@link #ROW_COUNT} rows and {@link #QUALIFIER_COUNT}
+   * @param hmc Instance to add rows to.
+   * @return How many rows we added.
+   * @throws IOException
+   */
+  protected int addRows(final MemStore hmc, Clock clock) {
+    for (int i = 0; i < ROW_COUNT; i++) {
+      long timestamp = clock.now();
+      for (int ii = 0; ii < QUALIFIER_COUNT; ii++) {
+        byte [] row = Bytes.toBytes(i);
+        byte [] qf = makeQualifier(i, ii);
+        hmc.add(new KeyValue(row, FAMILY, qf, timestamp, qf), null);
+      }
+    }
+    return ROW_COUNT;
+  }
+
   private long runSnapshot(final AbstractMemStore hmc) throws UnexpectedStateException {
     // Save off old state.
     int oldHistorySize = hmc.getSnapshot().getCellsCount();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 4f46c88..88aed56 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -43,6 +43,7 @@ import static org.mockito.Mockito.when;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.security.PrivilegedExceptionAction;
@@ -77,6 +78,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
+import org.apache.hadoop.hbase.TimestampType;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
@@ -111,6 +115,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
@@ -6007,19 +6012,25 @@ public class TestHRegion {
 
   @Test
   public void testCellTTLs() throws IOException {
-    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
-    EnvironmentEdgeManager.injectEdge(edge);
+    testCellTTLs(ClockType.SYSTEM);
+    testCellTTLs(ClockType.SYSTEM_MONOTONIC);
+    testCellTTLs(ClockType.HLC);
+  }
 
+  public void testCellTTLs(ClockType clockType) throws IOException {
     final byte[] row = Bytes.toBytes("testRow");
     final byte[] q1 = Bytes.toBytes("q1");
     final byte[] q2 = Bytes.toBytes("q2");
     final byte[] q3 = Bytes.toBytes("q3");
     final byte[] q4 = Bytes.toBytes("q4");
 
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
-    HColumnDescriptor hcd = new HColumnDescriptor(fam1);
-    hcd.setTimeToLive(10); // 10 seconds
-    htd.addFamily(hcd);
+    HTableDescriptor htd = new HTableDescriptor(TableDescriptorBuilder
+      .newBuilder(TableName.valueOf(name.getMethodName()))
+      .addColumnFamily(new HColumnDescriptor(fam1)
+        .setTimeToLive(10)) // 10 seconds
+      .setClockType(clockType)
+      .build());
+    TimestampType timestampType = clockType.timestampType();
 
     Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
     conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
@@ -6028,22 +6039,32 @@ public class TestHRegion {
             HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY),
         TEST_UTIL.getDataTestDir(), conf, htd);
     assertNotNull(region);
+
+    region.setClock(Clock.getDummyClockOfGivenClockType(clockType));
+    long now = timestampType.toEpochTimeMillisFromTimestamp(region.getClock().now());
+    ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
+    EnvironmentEdgeManager.injectEdge(mee);
+    mee.setValue(now);
+
     try {
-      long now = EnvironmentEdgeManager.currentTime();
       // Add a cell that will expire in 5 seconds via cell TTL
-      region.put(new Put(row).add(new KeyValue(row, fam1, q1, now,
-        HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] {
+      region.put(new Put(row).add(new KeyValue(row, fam1, q1, timestampType
+          .fromEpochTimeMillisToTimestamp(now),
+          HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] {
           // TTL tags specify ts in milliseconds
           new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } )));
       // Add a cell that will expire after 10 seconds via family setting
-      region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY));
+      region.put(new Put(row).addColumn(fam1, q2, timestampType
+          .fromEpochTimeMillisToTimestamp(now), HConstants.EMPTY_BYTE_ARRAY));
       // Add a cell that will expire in 15 seconds via cell TTL
-      region.put(new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1,
-        HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] {
+      region.put(new Put(row).add(new KeyValue(row, fam1, q3, timestampType
+          .fromEpochTimeMillisToTimestamp(now + 10000 - 1),
+          HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] {
           // TTL tags specify ts in milliseconds
           new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } )));
       // Add a cell that will expire in 20 seconds via family setting
-      region.put(new Put(row).addColumn(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY));
+      region.put(new Put(row).addColumn(fam1, q4, timestampType.fromEpochTimeMillisToTimestamp
+          (now + 10000 - 1), HConstants.EMPTY_BYTE_ARRAY));
 
       // Flush so we are sure store scanning gets this right
       region.flush(true);
@@ -6056,7 +6077,7 @@ public class TestHRegion {
       assertNotNull(r.getValue(fam1, q4));
 
       // Increment time to T+5 seconds
-      edge.incrementTime(5000);
+      mee.setValue(now + 5001);
 
       r = region.get(new Get(row));
       assertNull(r.getValue(fam1, q1));
@@ -6065,7 +6086,7 @@ public class TestHRegion {
       assertNotNull(r.getValue(fam1, q4));
 
       // Increment time to T+10 seconds
-      edge.incrementTime(5000);
+      mee.setValue(now + 10001);
 
       r = region.get(new Get(row));
       assertNull(r.getValue(fam1, q1));
@@ -6074,7 +6095,7 @@ public class TestHRegion {
       assertNotNull(r.getValue(fam1, q4));
 
       // Increment time to T+15 seconds
-      edge.incrementTime(5000);
+      mee.setValue(now + 15000);
 
       r = region.get(new Get(row));
       assertNull(r.getValue(fam1, q1));
@@ -6083,7 +6104,7 @@ public class TestHRegion {
       assertNotNull(r.getValue(fam1, q4));
 
       // Increment time to T+20 seconds
-      edge.incrementTime(10000);
+      mee.setValue(now + 20000);
 
       r = region.get(new Get(row));
       assertNull(r.getValue(fam1, q1));
@@ -6112,7 +6133,12 @@ public class TestHRegion {
       assertEquals(Bytes.toLong(val), 2L);
 
       // Increment time to T+25 seconds
-      edge.incrementTime(5000);
+
+      if (clockType == ClockType.SYSTEM) {
+        mee.setValue(now + 25002);
+      } else {
+        mee.setValue(now + 25001);
+      }
 
       // Value should be back to 1
       r = region.get(new Get(row));
@@ -6121,7 +6147,7 @@ public class TestHRegion {
       assertEquals(Bytes.toLong(val), 1L);
 
       // Increment time to T+30 seconds
-      edge.incrementTime(5000);
+      mee.setValue(now + 30001);
 
       // Original value written at T+20 should be gone now via family TTL
       r = region.get(new Get(row));

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index f115b34..95024ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -60,6 +60,8 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -99,6 +101,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
 
 /**
  * Tests of HRegion methods for replaying flush, compaction, region open, etc events for secondary
@@ -171,6 +175,7 @@ public class TestHRegionReplayEvents {
     when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1));
     when(rss.getConfiguration()).thenReturn(CONF);
     when(rss.getRegionServerAccounting()).thenReturn(new RegionServerAccounting(CONF));
+    when(rss.getRegionServerClock((ClockType)any())).thenReturn(new Clock.System());
     String string = org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER
         .toString();
     ExecutorService es = new ExecutorService(string);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
index 89f7589..e07f042 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -30,6 +31,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -106,6 +109,7 @@ public class TestRegionSplitPolicy {
     final List<Region> regions = new ArrayList<>();
     Mockito.when(rss.getOnlineRegions(TABLENAME)).thenReturn(regions);
     Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
+    Mockito.when(rss.getRegionServerClock(ClockType.SYSTEM)).thenReturn(new Clock.System());
     // Set max size for this 'table'.
     long maxSplitSize = 1024L;
     htd.setMaxFileSize(maxSplitSize);
@@ -167,6 +171,7 @@ public class TestRegionSplitPolicy {
     Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
     Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(0L);
     Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(0L);
+    Mockito.when(rss.getRegionServerClock(ClockType.SYSTEM)).thenReturn(new Clock.System());
 
 
     BusyRegionSplitPolicy policy =

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
index 10f00a6..4a9415b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.regionserver;
 
 import static org.apache.hadoop.hbase.regionserver.KeyValueScanFixture.scanFixture;
 import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -38,11 +40,14 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeepDeletedCells;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueTestUtil;
+import org.apache.hadoop.hbase.TimestampType;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
@@ -816,16 +821,31 @@ public class TestStoreScanner {
    */
   @Test
   public void testWildCardTtlScan() throws IOException {
+    //testWildCardTtlScan(ClockType.SYSTEM);
+    //testWildCardTtlScan(ClockType.SYSTEM_MONOTONIC);
+    testWildCardTtlScan(ClockType.HLC);
+  }
+
+  public void testWildCardTtlScan(ClockType clockType) throws IOException {
     long now = System.currentTimeMillis();
+    TimestampType timestampType = clockType.timestampType();
     KeyValue [] kvs = new KeyValue[] {
-        KeyValueTestUtil.create("R1", "cf", "a", now-1000, KeyValue.Type.Put, "dont-care"),
-        KeyValueTestUtil.create("R1", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"),
-        KeyValueTestUtil.create("R1", "cf", "c", now-200, KeyValue.Type.Put, "dont-care"),
-        KeyValueTestUtil.create("R1", "cf", "d", now-10000, KeyValue.Type.Put, "dont-care"),
-        KeyValueTestUtil.create("R2", "cf", "a", now, KeyValue.Type.Put, "dont-care"),
-        KeyValueTestUtil.create("R2", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"),
-        KeyValueTestUtil.create("R2", "cf", "c", now-200, KeyValue.Type.Put, "dont-care"),
-        KeyValueTestUtil.create("R2", "cf", "c", now-1000, KeyValue.Type.Put, "dont-care")
+        KeyValueTestUtil.create("R1", "cf", "a", timestampType.fromEpochTimeMillisToTimestamp
+          (now-1000), KeyValue.Type.Put, "dont-care"),
+        KeyValueTestUtil.create("R1", "cf", "b", timestampType.fromEpochTimeMillisToTimestamp
+          (now-10), KeyValue.Type.Put, "dont-care"),
+        KeyValueTestUtil.create("R1", "cf", "c", timestampType.fromEpochTimeMillisToTimestamp
+          (now-200), KeyValue.Type.Put, "dont-care"),
+        KeyValueTestUtil.create("R1", "cf", "d", timestampType.fromEpochTimeMillisToTimestamp
+          (now-10000), KeyValue.Type.Put, "dont-care"),
+        KeyValueTestUtil.create("R2", "cf", "a", timestampType.fromEpochTimeMillisToTimestamp
+          (now), KeyValue.Type.Put, "dont-care"),
+        KeyValueTestUtil.create("R2", "cf", "b", timestampType.fromEpochTimeMillisToTimestamp
+          (now-10), KeyValue.Type.Put, "dont-care"),
+        KeyValueTestUtil.create("R2", "cf", "c", timestampType.fromEpochTimeMillisToTimestamp
+          (now-200), KeyValue.Type.Put, "dont-care"),
+        KeyValueTestUtil.create("R2", "cf", "c", timestampType.fromEpochTimeMillisToTimestamp
+          (now-1000), KeyValue.Type.Put, "dont-care")
     };
     List<KeyValueScanner> scanners = scanFixture(kvs);
     Scan scan = new Scan();
@@ -833,7 +853,9 @@ public class TestStoreScanner {
     ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE,
         HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.COMPARATOR);
     ScanType scanType = ScanType.USER_SCAN;
-    try (StoreScanner scanner = new StoreScanner(scan, scanInfo, scanType, null, scanners)) {
+    Store store = mock(HStore.class);
+    when(store.getClock()).thenReturn(Clock.getDummyClockOfGivenClockType(clockType));
+    try (StoreScanner scanner = new StoreScanner(store, scan, scanInfo, scanType, null, scanners)) {
       List<Cell> results = new ArrayList<>();
       Assert.assertEquals(true, scanner.next(results));
       Assert.assertEquals(2, results.size());
@@ -892,12 +914,19 @@ public class TestStoreScanner {
    */
   @Test
   public void testExpiredDeleteFamily() throws Exception {
+    testExpiredDeleteFamily(new Clock.HLC());
+    testExpiredDeleteFamily(new Clock.SystemMonotonic());
+    testExpiredDeleteFamily(new Clock.System());
+  }
+
+  public void testExpiredDeleteFamily(Clock clock) throws Exception {
     long now = System.currentTimeMillis();
+    TimestampType timestampType = clock.getTimestampType();
     KeyValue [] kvs = new KeyValue[] {
-        new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now-1000,
-            KeyValue.Type.DeleteFamily),
-        KeyValueTestUtil.create("R1", "cf", "a", now-10, KeyValue.Type.Put,
-            "dont-care"),
+        new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, timestampType
+          .fromEpochTimeMillisToTimestamp(now-1000), KeyValue.Type.DeleteFamily),
+        KeyValueTestUtil.create("R1", "cf", "a", timestampType.fromEpochTimeMillisToTimestamp
+          (now-10), KeyValue.Type.Put, "dont-care"),
     };
     List<KeyValueScanner> scanners = scanFixture(kvs);
     Scan scan = new Scan();
@@ -906,9 +935,10 @@ public class TestStoreScanner {
     ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE,
         HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.COMPARATOR);
     ScanType scanType = ScanType.USER_SCAN;
+    Store store = mock(HStore.class);
+    when(store.getClock()).thenReturn(clock);
     try (StoreScanner scanner =
-        new StoreScanner(scan, scanInfo, scanType, null, scanners)) {
-
+        new StoreScanner(store, scan, scanInfo, scanType, null, scanners)) {
       List<Cell> results = new ArrayList<>();
       Assert.assertEquals(true, scanner.next(results));
       Assert.assertEquals(1, results.size());
@@ -921,8 +951,15 @@ public class TestStoreScanner {
 
   @Test
   public void testDeleteMarkerLongevity() throws Exception {
+    testDeleteMarkerLongevity(new Clock.HLC());
+    testDeleteMarkerLongevity(new Clock.SystemMonotonic());
+    testDeleteMarkerLongevity(new Clock.System());
+  }
+
+  public void testDeleteMarkerLongevity(Clock clock) throws Exception {
     try {
       final long now = System.currentTimeMillis();
+      TimestampType timestampType = clock.getTimestampType();
       EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() {
         public long currentTime() {
           return now;
@@ -930,37 +967,40 @@ public class TestStoreScanner {
       });
       KeyValue[] kvs = new KeyValue[]{
         /*0*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null,
-        now - 100, KeyValue.Type.DeleteFamily), // live
+          timestampType.fromEpochTimeMillisToTimestamp(now - 100), KeyValue.Type.DeleteFamily),
+          // live
         /*1*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null,
-        now - 1000, KeyValue.Type.DeleteFamily), // expired
-        /*2*/ KeyValueTestUtil.create("R1", "cf", "a", now - 50,
-        KeyValue.Type.Put, "v3"), // live
-        /*3*/ KeyValueTestUtil.create("R1", "cf", "a", now - 55,
-        KeyValue.Type.Delete, "dontcare"), // live
-        /*4*/ KeyValueTestUtil.create("R1", "cf", "a", now - 55,
-        KeyValue.Type.Put, "deleted-version v2"), // deleted
-        /*5*/ KeyValueTestUtil.create("R1", "cf", "a", now - 60,
-        KeyValue.Type.Put, "v1"), // live
-        /*6*/ KeyValueTestUtil.create("R1", "cf", "a", now - 65,
-        KeyValue.Type.Put, "v0"), // max-version reached
+          timestampType.fromEpochTimeMillisToTimestamp(now - 1000), KeyValue.Type.DeleteFamily),
+          // expired
+        /*2*/ KeyValueTestUtil.create("R1", "cf", "a", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 50), KeyValue.Type.Put, "v3"), // live
+        /*3*/ KeyValueTestUtil.create("R1", "cf", "a", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 55), KeyValue.Type.Delete, "dontcare"), // live
+        /*4*/ KeyValueTestUtil.create("R1", "cf", "a", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 55), KeyValue.Type.Put, "deleted-version v2"), // deleted
+        /*5*/ KeyValueTestUtil.create("R1", "cf", "a", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 60), KeyValue.Type.Put, "v1"), // live
+        /*6*/ KeyValueTestUtil.create("R1", "cf", "a", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 65), KeyValue.Type.Put, "v0"), // max-version reached
         /*7*/ KeyValueTestUtil.create("R1", "cf", "a",
-        now - 100, KeyValue.Type.DeleteColumn, "dont-care"), // max-version
-        /*8*/ KeyValueTestUtil.create("R1", "cf", "b", now - 600,
-        KeyValue.Type.DeleteColumn, "dont-care"), //expired
-        /*9*/ KeyValueTestUtil.create("R1", "cf", "b", now - 70,
-        KeyValue.Type.Put, "v2"), //live
-        /*10*/ KeyValueTestUtil.create("R1", "cf", "b", now - 750,
-        KeyValue.Type.Put, "v1"), //expired
-        /*11*/ KeyValueTestUtil.create("R1", "cf", "c", now - 500,
-        KeyValue.Type.Delete, "dontcare"), //expired
-        /*12*/ KeyValueTestUtil.create("R1", "cf", "c", now - 600,
-        KeyValue.Type.Put, "v1"), //expired
-        /*13*/ KeyValueTestUtil.create("R1", "cf", "c", now - 1000,
-        KeyValue.Type.Delete, "dontcare"), //expired
-        /*14*/ KeyValueTestUtil.create("R1", "cf", "d", now - 60,
-        KeyValue.Type.Put, "expired put"), //live
-        /*15*/ KeyValueTestUtil.create("R1", "cf", "d", now - 100,
-        KeyValue.Type.Delete, "not-expired delete"), //live
+          timestampType.fromEpochTimeMillisToTimestamp(now - 100), KeyValue.Type.DeleteColumn,
+          "dont-care"), // max-version
+        /*8*/ KeyValueTestUtil.create("R1", "cf", "b", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 600), KeyValue.Type.DeleteColumn, "dont-care"), //expired
+        /*9*/ KeyValueTestUtil.create("R1", "cf", "b", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 70), KeyValue.Type.Put, "v2"), //live
+        /*10*/ KeyValueTestUtil.create("R1", "cf", "b", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 750), KeyValue.Type.Put, "v1"), //expired
+        /*11*/ KeyValueTestUtil.create("R1", "cf", "c", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 500), KeyValue.Type.Delete, "dontcare"), //expired
+        /*12*/ KeyValueTestUtil.create("R1", "cf", "c", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 600), KeyValue.Type.Put, "v1"), //expired
+        /*13*/ KeyValueTestUtil.create("R1", "cf", "c", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 1000), KeyValue.Type.Delete, "dontcare"), //expired
+        /*14*/ KeyValueTestUtil.create("R1", "cf", "d", timestampType
+              .fromEpochTimeMillisToTimestamp(now - 60), KeyValue.Type.Put, "expired put"), //live
+        /*15*/ KeyValueTestUtil.create("R1", "cf", "d", timestampType
+          .fromEpochTimeMillisToTimestamp(now - 100), KeyValue.Type.Delete, "not-expired delete"), //live
       };
       List<KeyValueScanner> scanners = scanFixture(kvs);
       Scan scan = new Scan();
@@ -972,10 +1012,10 @@ public class TestStoreScanner {
         HConstants.DEFAULT_BLOCKSIZE /* block size */,
         200, /* timeToPurgeDeletes */
         CellComparator.COMPARATOR);
-      try (StoreScanner scanner =
-        new StoreScanner(scan, scanInfo,
-          ScanType.COMPACT_DROP_DELETES, null, scanners,
-          HConstants.OLDEST_TIMESTAMP)) {
+      Store store = mock(HStore.class);
+      when(store.getClock()).thenReturn(clock);
+      try (StoreScanner scanner = new StoreScanner(store, scan, scanInfo, ScanType
+          .COMPACT_DROP_DELETES, null, scanners, HConstants.OLDEST_TIMESTAMP)) {
         List<Cell> results = new ArrayList<>();
         results = new ArrayList<>();
         Assert.assertEquals(true, scanner.next(results));

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index 51260a6..b5c7335 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
 
 import java.io.IOException;
 import java.util.NavigableMap;
@@ -32,6 +33,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -203,6 +206,7 @@ public class TestWALLockup {
     Mockito.when(server.isStopped()).thenReturn(false);
     Mockito.when(server.isAborted()).thenReturn(false);
     RegionServerServices services = Mockito.mock(RegionServerServices.class);
+    Mockito.when(services.getRegionServerClock(ClockType.SYSTEM)).thenReturn(new Clock.System());
 
     // OK. Now I have my mocked up Server & RegionServerServices and dodgy WAL, go ahead with test.
     FileSystem fs = FileSystem.get(CONF);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f66976ad/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index be725fe..8fe6be1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -52,6 +52,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -59,6 +61,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
@@ -657,13 +660,22 @@ public abstract class AbstractTestWALReplay {
    * @throws IOException
    */
   @Test
-  public void testReplayEditsAfterAbortingFlush() throws IOException {
+  public void testReplayEditsAfterAbortingFlush() throws Exception {
+    testReplayEditsAfterAbortingFlush(new Clock.System());
+    setUp();
+    testReplayEditsAfterAbortingFlush(new Clock.SystemMonotonic());
+    tearDown();
+    setUp();
+    testReplayEditsAfterAbortingFlush(new Clock.HLC());
+  }
+
+  public void testReplayEditsAfterAbortingFlush(Clock clock) throws IOException {
     final TableName tableName =
         TableName.valueOf("testReplayEditsAfterAbortingFlush");
     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
     final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
     deleteDir(basedir);
-    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
+    final HTableDescriptor htd = createBasic3FamilyHTD(tableName, clock.clockType);
     HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
     HBaseTestingUtility.closeRegionAndWAL(region3);
     // Write countPerFamily edits into the three families. Do a flush on one
@@ -672,6 +684,7 @@ public abstract class AbstractTestWALReplay {
     WAL wal = createWAL(this.conf, hbaseRootDir, logName);
     RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
     Mockito.doReturn(false).when(rsServices).isAborted();
+    when(rsServices.getRegionServerClock(clock.clockType)).thenReturn(clock);
     when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
     Configuration customConf = new Configuration(this.conf);
     customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
@@ -1211,14 +1224,21 @@ public abstract class AbstractTestWALReplay {
   }
 
   private HTableDescriptor createBasic3FamilyHTD(final TableName tableName) {
-    HTableDescriptor htd = new HTableDescriptor(tableName);
-    HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
-    htd.addFamily(a);
-    HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
-    htd.addFamily(b);
-    HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
-    htd.addFamily(c);
-    return htd;
+    return new HTableDescriptor(TableDescriptorBuilder.newBuilder(tableName)
+      .addColumnFamily(new HColumnDescriptor(Bytes.toBytes("a")))
+      .addColumnFamily(new HColumnDescriptor(Bytes.toBytes("b")))
+      .addColumnFamily(new HColumnDescriptor(Bytes.toBytes("c")))
+      .build());
+  }
+
+  private HTableDescriptor createBasic3FamilyHTD(final TableName tableName,
+      final ClockType clockType) {
+    return new HTableDescriptor(TableDescriptorBuilder.newBuilder(tableName)
+      .addColumnFamily(new HColumnDescriptor(Bytes.toBytes("a")))
+      .addColumnFamily(new HColumnDescriptor(Bytes.toBytes("b")))
+      .addColumnFamily(new HColumnDescriptor(Bytes.toBytes("c")))
+      .setClockType(clockType)
+      .build());
   }
 
   private void writerWALFile(Path file, List<FSWALEntry> entries) throws IOException {