You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@omid.apache.org by fp...@apache.org on 2018/01/30 23:39:35 UTC

[1/4] incubator-omid git commit: Revert to the state of the project at 0c371361781957c96b20295290a167f7be3b33e2

Repository: incubator-omid
Updated Branches:
  refs/heads/master fab2cfebb -> 7a9d7d6fd


http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/test/java/org/apache/omid/tso/client/TestIntegrationOfTSOClientServerBasicFunctionality.java
----------------------------------------------------------------------
diff --git a/tso-server/src/test/java/org/apache/omid/tso/client/TestIntegrationOfTSOClientServerBasicFunctionality.java b/tso-server/src/test/java/org/apache/omid/tso/client/TestIntegrationOfTSOClientServerBasicFunctionality.java
index c4c9c61..535d1db 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/client/TestIntegrationOfTSOClientServerBasicFunctionality.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/client/TestIntegrationOfTSOClientServerBasicFunctionality.java
@@ -21,10 +21,8 @@ import com.google.common.collect.Sets;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.Module;
-
 import org.apache.omid.TestUtils;
 import org.apache.omid.committable.CommitTable;
-import org.apache.omid.transaction.AbstractTransactionManager;
 import org.apache.omid.tso.TSOMockModule;
 import org.apache.omid.tso.TSOServer;
 import org.apache.omid.tso.TSOServerConfig;
@@ -125,21 +123,17 @@ public class TestIntegrationOfTSOClientServerBasicFunctionality {
         referenceTimestamp = startTsTx1;
 
         long startTsTx2 = tsoClient.getNewStartTimestamp().get();
-        referenceTimestamp += AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
-        assertEquals(startTsTx2, referenceTimestamp, "Should grow monotonically");
+        assertEquals(startTsTx2, ++referenceTimestamp, "Should grow monotonically");
         assertTrue(startTsTx2 > startTsTx1, "Two timestamps obtained consecutively should grow");
 
         long commitTsTx2 = tsoClient.commit(startTsTx2, Sets.newHashSet(c1)).get();
-        referenceTimestamp += AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
-        assertEquals(commitTsTx2, referenceTimestamp, "Should grow monotonically");
+        assertEquals(commitTsTx2, ++referenceTimestamp, "Should grow monotonically");
 
         long commitTsTx1 = tsoClient.commit(startTsTx1, Sets.newHashSet(c2)).get();
-        referenceTimestamp += AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
-        assertEquals(commitTsTx1, referenceTimestamp, "Should grow monotonically");
+        assertEquals(commitTsTx1, ++referenceTimestamp, "Should grow monotonically");
 
         long startTsTx3 = tsoClient.getNewStartTimestamp().get();
-        referenceTimestamp += AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
-        assertEquals(startTsTx3, referenceTimestamp, "Should grow monotonically");
+        assertEquals(startTsTx3, ++referenceTimestamp, "Should grow monotonically");
     }
 
     @Test(timeOut = 30_000)
@@ -216,50 +210,6 @@ public class TestIntegrationOfTSOClientServerBasicFunctionality {
     }
 
     @Test(timeOut = 30_000)
-    public void testTransactionStartedBeforeFenceAborts() throws Exception {
-
-        long startTsTx1 = tsoClient.getNewStartTimestamp().get();
-
-        long fenceID = tsoClient.getFence(c1.getTableId()).get();
-
-        assertTrue(fenceID > startTsTx1, "Fence ID should be higher thank Tx1ID");
-
-        try {
-            tsoClient.commit(startTsTx1, Sets.newHashSet(c1, c2)).get();
-            Assert.fail("TX should fail on commit");
-        } catch (ExecutionException ee) {
-            assertEquals(AbortException.class, ee.getCause().getClass(), "Should have aborted");
-        }
-    }
-
-    @Test(timeOut = 30_000)
-    public void testTransactionStartedBeforeNonOverlapFenceCommits() throws Exception {
-
-        long startTsTx1 = tsoClient.getNewStartTimestamp().get();
-
-        tsoClient.getFence(7).get();
-
-        try {
-            tsoClient.commit(startTsTx1, Sets.newHashSet(c1, c2)).get();
-        } catch (ExecutionException ee) {
-            Assert.fail("TX should successfully commit");        }
-    }
-
-    @Test(timeOut = 30_000)
-    public void testTransactionStartedAfterFenceCommits() throws Exception {
-
-        tsoClient.getFence(c1.getTableId()).get();
-
-        long startTsTx1 = tsoClient.getNewStartTimestamp().get();
-
-        try {
-            tsoClient.commit(startTsTx1, Sets.newHashSet(c1, c2)).get();
-        } catch (ExecutionException ee) {
-            Assert.fail("TX should successfully commit");
-        }
-    }
-
-    @Test(timeOut = 30_000)
     public void testConflictsAndMonotonicallyTimestampGrowthWithTwoDifferentTSOClients() throws Exception {
         long startTsTx1Client1 = tsoClient.getNewStartTimestamp().get();
         long startTsTx2Client1 = tsoClient.getNewStartTimestamp().get();

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientConnectionToTSO.java
----------------------------------------------------------------------
diff --git a/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientConnectionToTSO.java b/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientConnectionToTSO.java
index 26030b9..2650e0e 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientConnectionToTSO.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientConnectionToTSO.java
@@ -19,12 +19,10 @@ package org.apache.omid.tso.client;
 
 import com.google.inject.Guice;
 import com.google.inject.Injector;
-
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.test.TestingServer;
 import org.apache.curator.utils.CloseableUtils;
 import org.apache.omid.TestUtils;
-import org.apache.omid.transaction.AbstractTransactionManager;
 import org.apache.omid.tso.HALeaseManagementModule;
 import org.apache.omid.tso.TSOMockModule;
 import org.apache.omid.tso.TSOServer;
@@ -139,7 +137,7 @@ public class TestTSOClientConnectionToTSO {
         // ... so we should get responses from the methods
         Long startTS = tsoClient.getNewStartTimestamp().get();
         LOG.info("Start TS {} ", startTS);
-        assertEquals(startTS.longValue(), AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN);
+        assertEquals(startTS.longValue(), 1);
 
         // Close the tsoClient connection and stop the TSO Server
         tsoClient.close().get();
@@ -177,7 +175,7 @@ public class TestTSOClientConnectionToTSO {
         // ... so we should get responses from the methods
         Long startTS = tsoClient.getNewStartTimestamp().get();
         LOG.info("Start TS {} ", startTS);
-        assertEquals(startTS.longValue(), AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN);
+        assertEquals(startTS.longValue(), 1);
 
         // Close the tsoClient connection and stop the TSO Server
         tsoClient.close().get();
@@ -215,7 +213,7 @@ public class TestTSOClientConnectionToTSO {
         // ... and check that initially we get responses from the methods
         Long startTS = tsoClient.getNewStartTimestamp().get();
         LOG.info("Start TS {} ", startTS);
-        assertEquals(startTS.longValue(), AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN);
+        assertEquals(startTS.longValue(), 1);
 
         // Then stop the server...
         tsoServer.stopAndWait();

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientRequestAndResponseBehaviours.java
----------------------------------------------------------------------
diff --git a/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientRequestAndResponseBehaviours.java b/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientRequestAndResponseBehaviours.java
index a2da056..1b5dce8 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientRequestAndResponseBehaviours.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientRequestAndResponseBehaviours.java
@@ -21,11 +21,9 @@ import com.google.common.collect.Sets;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.Module;
-
 import org.apache.omid.TestUtils;
 import org.apache.omid.committable.CommitTable;
 import org.apache.omid.proto.TSOProto;
-import org.apache.omid.transaction.AbstractTransactionManager;
 import org.apache.omid.tso.PausableTimestampOracle;
 import org.apache.omid.tso.TSOMockModule;
 import org.apache.omid.tso.TSOServer;
@@ -350,7 +348,7 @@ public class TestTSOClientRequestAndResponseBehaviours {
         clientOneShot.makeRequest(createRetryCommitRequest(tx1ST));
         TSOProto.Response response = clientOneShot.makeRequest(createRetryCommitRequest(tx1ST));
         assertFalse(response.getCommitResponse().getAborted(), "Transaction should be committed");
-        assertEquals(response.getCommitResponse().getCommitTimestamp(), tx1ST + AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN);
+        assertEquals(response.getCommitResponse().getCommitTimestamp(), tx1ST + 1);
     }
 
     @Test(timeOut = 30_000)

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientRowAndCellLevelConflict.java
----------------------------------------------------------------------
diff --git a/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientRowAndCellLevelConflict.java b/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientRowAndCellLevelConflict.java
deleted file mode 100644
index da655a3..0000000
--- a/tso-server/src/test/java/org/apache/omid/tso/client/TestTSOClientRowAndCellLevelConflict.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.tso.client;
-
-import com.google.common.collect.Sets;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-
-import org.apache.omid.TestUtils;
-import org.apache.omid.tso.TSOMockModule;
-import org.apache.omid.tso.TSOServer;
-import org.apache.omid.tso.TSOServerConfig;
-import org.apache.omid.tso.client.OmidClientConfiguration.ConflictDetectionLevel;
-import org.apache.omid.tso.util.DummyCellIdImpl;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-
-import static org.testng.Assert.assertFalse;
-import static org.testng.Assert.assertTrue;
-
-public class TestTSOClientRowAndCellLevelConflict {
-
-    private static final Logger LOG = LoggerFactory.getLogger(TestTSOClientRowAndCellLevelConflict.class);
-
-    private static final String TSO_SERVER_HOST = "localhost";
-    private static final int TSO_SERVER_PORT = 5678;
-
-    private OmidClientConfiguration tsoClientConf;
-
-    // Required infrastructure for TSOClient test
-    private TSOServer tsoServer;
-
-    @BeforeMethod
-    public void beforeMethod() throws Exception {
-
-        TSOServerConfig tsoConfig = new TSOServerConfig();
-        tsoConfig.setConflictMapSize(1000);
-        tsoConfig.setPort(TSO_SERVER_PORT);
-        tsoConfig.setNumConcurrentCTWriters(2);
-        Module tsoServerMockModule = new TSOMockModule(tsoConfig);
-        Injector injector = Guice.createInjector(tsoServerMockModule);
-
-        LOG.info("==================================================================================================");
-        LOG.info("======================================= Init TSO Server ==========================================");
-        LOG.info("==================================================================================================");
-
-        tsoServer = injector.getInstance(TSOServer.class);
-        tsoServer.startAndWait();
-        TestUtils.waitForSocketListening(TSO_SERVER_HOST, TSO_SERVER_PORT, 100);
-
-        LOG.info("==================================================================================================");
-        LOG.info("===================================== TSO Server Initialized =====================================");
-        LOG.info("==================================================================================================");
-
-        OmidClientConfiguration tsoClientConf = new OmidClientConfiguration();
-        tsoClientConf.setConnectionString(TSO_SERVER_HOST + ":" + TSO_SERVER_PORT);
-
-        this.tsoClientConf = tsoClientConf;
-
-    }
-
-    @AfterMethod
-    public void afterMethod() throws Exception {
-        tsoServer.stopAndWait();
-        tsoServer = null;
-        TestUtils.waitForSocketNotListening(TSO_SERVER_HOST, TSO_SERVER_PORT, 1000);
-    }
-
-    @Test(timeOut = 30_000)
-    public void testRowLevelConflictAnalysisConflict() throws Exception {
-
-        tsoClientConf.setConflictAnalysisLevel(ConflictDetectionLevel.ROW);
-
-        TSOClient client = TSOClient.newInstance(tsoClientConf);
-
-        CellId c1 = new DummyCellIdImpl(0xdeadbeefL, 0xdeadbeeeL);
-        CellId c2 = new DummyCellIdImpl(0xfeedcafeL, 0xdeadbeeeL);
-
-        Set<CellId> testWriteSet1 = Sets.newHashSet(c1);
-        Set<CellId> testWriteSet2 = Sets.newHashSet(c2);
-        
-        long ts1 = client.getNewStartTimestamp().get();
-        long ts2 = client.getNewStartTimestamp().get();
-        
-        client.commit(ts1, testWriteSet1).get();
-
-        try {
-            client.commit(ts2, testWriteSet2).get();
-        } catch (ExecutionException e) {
-            assertTrue(e.getCause() instanceof AbortException, "Transaction should be aborted");
-            return;
-        }
-
-        assertTrue(false, "Transaction should be aborted");
-    }
-
-    @Test(timeOut = 30_000)
-    public void testRowLevelConflictAnalysisCommit() throws Exception {
-
-        tsoClientConf.setConflictAnalysisLevel(ConflictDetectionLevel.ROW);
-
-        TSOClient client = TSOClient.newInstance(tsoClientConf);
-
-        CellId c1 = new DummyCellIdImpl(0xdeadbeefL, 0xdeadbeeeL);
-        CellId c2 = new DummyCellIdImpl(0xfeedcafeL, 0xdeadbeefL);
-
-        Set<CellId> testWriteSet1 = Sets.newHashSet(c1);
-        Set<CellId> testWriteSet2 = Sets.newHashSet(c2);
-        
-        long ts1 = client.getNewStartTimestamp().get();
-        long ts2 = client.getNewStartTimestamp().get();
-        
-        client.commit(ts1, testWriteSet1).get();
-
-        try {
-            client.commit(ts2, testWriteSet2).get();
-        } catch (ExecutionException e) {
-            assertFalse(e.getCause() instanceof AbortException, "Transaction should be committed");
-            return;
-        }
-
-        assertTrue(true, "Transaction should be committed");
-    }
-
-    @Test(timeOut = 30_000)
-    public void testCellLevelConflictAnalysisConflict() throws Exception {
-
-        tsoClientConf.setConflictAnalysisLevel(ConflictDetectionLevel.CELL);
-
-        TSOClient client = TSOClient.newInstance(tsoClientConf);
-
-        CellId c1 = new DummyCellIdImpl(0xdeadbeefL, 0xdeadbeeeL);
-        CellId c2 = new DummyCellIdImpl(0xdeadbeefL, 0xdeadbeeeL);
-
-        Set<CellId> testWriteSet1 = Sets.newHashSet(c1);
-        Set<CellId> testWriteSet2 = Sets.newHashSet(c2);
-        
-        long ts1 = client.getNewStartTimestamp().get();
-        long ts2 = client.getNewStartTimestamp().get();
-        
-        client.commit(ts1, testWriteSet1).get();
-
-        try {
-            client.commit(ts2, testWriteSet2).get();
-        } catch (ExecutionException e) {
-            assertTrue(e.getCause() instanceof AbortException, "Transaction should be aborted");
-            return;
-        }
-
-        assertTrue(false, "Transaction should be aborted");
-    }
-
-    @Test(timeOut = 30_000)
-    public void testCellLevelConflictAnalysisCommit() throws Exception {
-
-        tsoClientConf.setConflictAnalysisLevel(ConflictDetectionLevel.CELL);
-
-        TSOClient client = TSOClient.newInstance(tsoClientConf);
-
-        CellId c1 = new DummyCellIdImpl(0xdeadbeefL, 0xdeadbeeeL);
-        CellId c2 = new DummyCellIdImpl(0xfeedcafeL, 0xdeadbeefL);
-
-        Set<CellId> testWriteSet1 = Sets.newHashSet(c1);
-        Set<CellId> testWriteSet2 = Sets.newHashSet(c2);
-        
-        long ts1 = client.getNewStartTimestamp().get();
-        long ts2 = client.getNewStartTimestamp().get();
-        
-        client.commit(ts1, testWriteSet1).get();
-
-        try {
-            client.commit(ts2, testWriteSet2).get();
-        } catch (ExecutionException e) {
-            assertFalse(e.getCause() instanceof AbortException, "Transaction should be committed");
-            return;
-        }
-
-        assertTrue(true, "Transaction should be committed");
-    }
-    
-}


[2/4] incubator-omid git commit: Revert to the state of the project at 0c371361781957c96b20295290a167f7be3b33e2

Posted by fp...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransactionManager.java
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransactionManager.java b/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransactionManager.java
index ec0302c..e7dc8cf 100644
--- a/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransactionManager.java
+++ b/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransactionManager.java
@@ -19,9 +19,7 @@ package org.apache.omid.transaction;
 
 import com.google.common.base.Function;
 import com.google.common.base.Optional;
-import com.google.common.hash.Hashing;
 import com.google.common.util.concurrent.Futures;
-
 import org.apache.omid.committable.CommitTable;
 import org.apache.omid.committable.CommitTable.CommitTimestamp;
 import org.apache.omid.metrics.Counter;
@@ -58,8 +56,6 @@ public abstract class AbstractTransactionManager implements TransactionManager {
 
     private static final Logger LOG = LoggerFactory.getLogger(AbstractTransactionManager.class);
 
-    public final static int MAX_CHECKPOINTS_PER_TXN = 50;
-
     public interface TransactionFactory<T extends CellId> {
 
         AbstractTransaction<T> createTransaction(long transactionId, long epoch, AbstractTransactionManager tm);
@@ -74,7 +70,6 @@ public abstract class AbstractTransactionManager implements TransactionManager {
     // Metrics
     private final Timer startTimestampTimer;
     private final Timer commitTimer;
-    private final Timer fenceTimer;
     private final Counter committedTxsCounter;
     private final Counter rolledbackTxsCounter;
     private final Counter errorTxsCounter;
@@ -109,7 +104,6 @@ public abstract class AbstractTransactionManager implements TransactionManager {
         // Metrics configuration
         this.startTimestampTimer = metrics.timer(name("omid", "tm", "hbase", "startTimestamp", "latency"));
         this.commitTimer = metrics.timer(name("omid", "tm", "hbase", "commit", "latency"));
-        this.fenceTimer = metrics.timer(name("omid", "tm", "hbase", "fence", "latency"));
         this.committedTxsCounter = metrics.counter(name("omid", "tm", "hbase", "committedTxs"));
         this.rolledbackTxsCounter = metrics.counter(name("omid", "tm", "hbase", "rolledbackTxs"));
         this.errorTxsCounter = metrics.counter(name("omid", "tm", "hbase", "erroredTxs"));
@@ -166,48 +160,6 @@ public abstract class AbstractTransactionManager implements TransactionManager {
     }
 
     /**
-     * Generates hash ID for table name, this hash is later-on sent to the TSO and used for fencing
-     * @param tableName - the table name
-     * @return
-     */
-    abstract public long getHashForTable(byte[] tableName);
-
-    /**
-     * Return the commit table client
-     * @return commitTableClient
-     */
-    public CommitTable.Client getCommitTableClient() {
-        return commitTableClient;
-    }
-
-    /**
-     * @see org.apache.omid.transaction.TransactionManager#fence()
-     */
-    @Override
-    public final Transaction fence(byte[] tableName) throws TransactionException {
-        long fenceTimestamp;
-        long tableID = getHashForTable(tableName); Hashing.murmur3_128().newHasher().putBytes(tableName).hash().asLong();
-
-        try {
-            fenceTimer.start();
-            try {
-                fenceTimestamp = tsoClient.getFence(tableID).get();
-            } finally {
-                fenceTimer.stop();
-            }
-
-            AbstractTransaction<? extends CellId> tx = transactionFactory.createTransaction(fenceTimestamp, fenceTimestamp, this);
-
-            return tx;
-        } catch (ExecutionException e) {
-            throw new TransactionException("Could not get fence", e);
-        } catch (InterruptedException ie) {
-            Thread.currentThread().interrupt();
-            throw new TransactionException("Interrupted creating a fence", ie);
-        }
-    }
-
-    /**
      * Allows transaction manager developers to perform actions after having started a transaction.
      * @param transaction
      *            the transaction that was just created.
@@ -312,6 +264,98 @@ public abstract class AbstractTransactionManager implements TransactionManager {
     public void postRollback(AbstractTransaction<? extends CellId> transaction) throws TransactionManagerException {}
 
     /**
+     * Check if the transaction commit data is in the shadow cell
+     * @param cellStartTimestamp
+     *            the transaction start timestamp
+     *        locator
+     *            the timestamp locator
+     * @throws IOException
+     */
+    Optional<CommitTimestamp> readCommitTimestampFromShadowCell(long cellStartTimestamp, CommitTimestampLocator locator)
+            throws IOException
+    {
+
+        Optional<CommitTimestamp> commitTS = Optional.absent();
+
+        Optional<Long> commitTimestamp = locator.readCommitTimestampFromShadowCell(cellStartTimestamp);
+        if (commitTimestamp.isPresent()) {
+            commitTS = Optional.of(new CommitTimestamp(SHADOW_CELL, commitTimestamp.get(), true)); // Valid commit TS
+        }
+
+        return commitTS;
+    }
+
+    /**
+     * This function returns the commit timestamp for a particular cell if the transaction was already committed in
+     * the system. In case the transaction was not committed and the cell was written by transaction initialized by a
+     * previous TSO server, an invalidation try occurs.
+     * Otherwise the function returns a value that indicates that the commit timestamp was not found.
+     * @param cellStartTimestamp
+     *          start timestamp of the cell to locate the commit timestamp for.
+     * @param epoch
+     *          the epoch of the TSO server the current tso client is working with.
+     * @param locator
+     *          a locator to find the commit timestamp in the system.
+     * @return the commit timestamp joint with the location where it was found
+     *         or an object indicating that it was not found in the system
+     * @throws IOException  in case of any I/O issues
+     */
+    public CommitTimestamp locateCellCommitTimestamp(long cellStartTimestamp, long epoch,
+                                                     CommitTimestampLocator locator) throws IOException {
+
+        try {
+            // 1) First check the cache
+            Optional<Long> commitTimestamp = locator.readCommitTimestampFromCache(cellStartTimestamp);
+            if (commitTimestamp.isPresent()) { // Valid commit timestamp
+                return new CommitTimestamp(CACHE, commitTimestamp.get(), true);
+            }
+
+            // 2) Then check the commit table
+            // If the data was written at a previous epoch, check whether the transaction was invalidated
+            Optional<CommitTimestamp> commitTimeStamp = commitTableClient.getCommitTimestamp(cellStartTimestamp).get();
+            if (commitTimeStamp.isPresent()) {
+                return commitTimeStamp.get();
+            }
+
+            // 3) Read from shadow cell
+            commitTimeStamp = readCommitTimestampFromShadowCell(cellStartTimestamp, locator);
+            if (commitTimeStamp.isPresent()) {
+                return commitTimeStamp.get();
+            }
+
+            // 4) Check the epoch and invalidate the entry
+            // if the data was written by a transaction from a previous epoch (previous TSO)
+            if (cellStartTimestamp < epoch) {
+                boolean invalidated = commitTableClient.tryInvalidateTransaction(cellStartTimestamp).get();
+                if (invalidated) { // Invalid commit timestamp
+                    return new CommitTimestamp(COMMIT_TABLE, CommitTable.INVALID_TRANSACTION_MARKER, false);
+                }
+            }
+
+            // 5) We did not manage to invalidate the transactions then check the commit table
+            commitTimeStamp = commitTableClient.getCommitTimestamp(cellStartTimestamp).get();
+            if (commitTimeStamp.isPresent()) {
+                return commitTimeStamp.get();
+            }
+
+            // 6) Read from shadow cell
+            commitTimeStamp = readCommitTimestampFromShadowCell(cellStartTimestamp, locator);
+            if (commitTimeStamp.isPresent()) {
+                return commitTimeStamp.get();
+            }
+
+            // *) Otherwise return not found
+            return new CommitTimestamp(NOT_PRESENT, -1L /** TODO Check if we should return this */, true);
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw new IOException("Interrupted while finding commit timestamp", e);
+        } catch (ExecutionException e) {
+            throw new IOException("Problem finding commit timestamp", e);
+        }
+
+    }
+
+    /**
      * @see java.io.Closeable#close()
      */
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/java/org/apache/omid/transaction/TransactionManager.java
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/java/org/apache/omid/transaction/TransactionManager.java b/transaction-client/src/main/java/org/apache/omid/transaction/TransactionManager.java
index 3b272da..239aafc 100644
--- a/transaction-client/src/main/java/org/apache/omid/transaction/TransactionManager.java
+++ b/transaction-client/src/main/java/org/apache/omid/transaction/TransactionManager.java
@@ -58,15 +58,4 @@ public interface TransactionManager extends Closeable {
      */
     void rollback(Transaction tx) throws TransactionException;
 
-    /**
-    * Creates a fence
-    *
-    * Creates a fence and returns a {@link Transaction} interface implementation that contains the fence information.
-    *
-    * @param tableName name of the table that requires a fence
-    * @return transaction representation contains the fence timestamp as the TransactionId.
-    * @throws TransactionException in case of any issues
-    */
-    Transaction fence(byte[] tableName) throws TransactionException;
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/java/org/apache/omid/tso/client/CellId.java
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/java/org/apache/omid/tso/client/CellId.java b/transaction-client/src/main/java/org/apache/omid/tso/client/CellId.java
index 9643960..e40105e 100644
--- a/transaction-client/src/main/java/org/apache/omid/tso/client/CellId.java
+++ b/transaction-client/src/main/java/org/apache/omid/tso/client/CellId.java
@@ -21,7 +21,4 @@ public interface CellId {
 
     long getCellId();
 
-    long getTableId();
-
-    long getRowId();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/java/org/apache/omid/tso/client/MockTSOClient.java
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/java/org/apache/omid/tso/client/MockTSOClient.java b/transaction-client/src/main/java/org/apache/omid/tso/client/MockTSOClient.java
index 344d343..b4a205e 100644
--- a/transaction-client/src/main/java/org/apache/omid/tso/client/MockTSOClient.java
+++ b/transaction-client/src/main/java/org/apache/omid/tso/client/MockTSOClient.java
@@ -18,21 +18,16 @@
 package org.apache.omid.tso.client;
 
 import com.google.common.util.concurrent.SettableFuture;
-
 import org.apache.omid.committable.CommitTable;
 
 import java.io.IOException;
-import java.util.HashSet;
 import java.util.Set;
-import java.util.Map;
-import java.util.HashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
 class MockTSOClient implements TSOProtocol {
     private final AtomicLong timestampGenerator = new AtomicLong();
     private static final int CONFLICT_MAP_SIZE = 1_000_000;
     private final long[] conflictMap = new long[CONFLICT_MAP_SIZE];
-    private final Map<Long, Long> fenceMap = new HashMap<Long, Long>();
     private final AtomicLong lwm = new AtomicLong();
 
     private final CommitTable.Writer commitTable;
@@ -51,58 +46,6 @@ class MockTSOClient implements TSOProtocol {
     }
 
     @Override
-    public TSOFuture<Long> getFence(long tableId) {
-        synchronized (conflictMap) {
-            SettableFuture<Long> f = SettableFuture.create();
-            long fenceTimestamp = timestampGenerator.incrementAndGet();
-            f.set(fenceTimestamp);
-            fenceMap.put(tableId, fenceTimestamp);
-            try {
-                // Persist the fence by using the fence identifier as both the start and commit timestamp.
-                commitTable.addCommittedTransaction(fenceTimestamp, fenceTimestamp);
-                commitTable.flush();
-            } catch (IOException ioe) {
-                f.setException(ioe);
-            }
-            return new ForwardingTSOFuture<>(f);
-        }
-    }
-
-    // Checks whether transaction transactionId started before a fence creation of a table transactionId modified.
-    private boolean hasConflictsWithFences(long transactionId, Set<? extends CellId> cells) {
-        Set<Long> tableIDs = new HashSet<Long>();
-        for (CellId c : cells) {
-            tableIDs.add(c.getTableId());
-        }
-
-        if (! fenceMap.isEmpty()) {
-            for (long tableId : tableIDs) {
-                Long fence = fenceMap.get(tableId);
-                if (fence != null && transactionId < fence) {
-                    return true;
-                }
-                if (fence != null && fence < lwm.get()) { // GC
-                    fenceMap.remove(tableId);
-                }
-            }
-        }
-
-        return false;
-    }
-
-    // Checks whether transactionId has a write-write conflict with a transaction committed after transactionId.
-    private boolean hasConflictsWithCommittedTransactions(long transactionId, Set<? extends CellId> cells) {
-        for (CellId c : cells) {
-            int index = Math.abs((int) (c.getCellId() % CONFLICT_MAP_SIZE));
-            if (conflictMap[index] >= transactionId) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    @Override
     public TSOFuture<Long> commit(long transactionId, Set<? extends CellId> cells) {
         synchronized (conflictMap) {
             SettableFuture<Long> f = SettableFuture.create();
@@ -111,9 +54,16 @@ class MockTSOClient implements TSOProtocol {
                 return new ForwardingTSOFuture<>(f);
             }
 
-            if (!hasConflictsWithFences(transactionId, cells) &&
-                !hasConflictsWithCommittedTransactions(transactionId, cells)) {
+            boolean canCommit = true;
+            for (CellId c : cells) {
+                int index = Math.abs((int) (c.getCellId() % CONFLICT_MAP_SIZE));
+                if (conflictMap[index] >= transactionId) {
+                    canCommit = false;
+                    break;
+                }
+            }
 
+            if (canCommit) {
                 long commitTimestamp = timestampGenerator.incrementAndGet();
                 for (CellId c : cells) {
                     int index = Math.abs((int) (c.getCellId() % CONFLICT_MAP_SIZE));

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/java/org/apache/omid/tso/client/OmidClientConfiguration.java
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/java/org/apache/omid/tso/client/OmidClientConfiguration.java b/transaction-client/src/main/java/org/apache/omid/tso/client/OmidClientConfiguration.java
index 6bc6481..3542c55 100644
--- a/transaction-client/src/main/java/org/apache/omid/tso/client/OmidClientConfiguration.java
+++ b/transaction-client/src/main/java/org/apache/omid/tso/client/OmidClientConfiguration.java
@@ -32,8 +32,6 @@ public class OmidClientConfiguration {
 
     public enum PostCommitMode {SYNC, ASYNC}
 
-    public enum ConflictDetectionLevel {CELL, ROW}
-
     // Basic connection related params
 
     private ConnType connectionType = ConnType.DIRECT;
@@ -53,7 +51,6 @@ public class OmidClientConfiguration {
     // Transaction Manager related params
 
     private PostCommitMode postCommitMode = PostCommitMode.SYNC;
-    private ConflictDetectionLevel conflictAnalysisLevel = ConflictDetectionLevel.CELL;
 
     // ----------------------------------------------------------------------------------------------------------------
     // Instantiation
@@ -177,13 +174,4 @@ public class OmidClientConfiguration {
         this.postCommitMode = postCommitMode;
     }
 
-    public ConflictDetectionLevel getConflictAnalysisLevel() {
-        return conflictAnalysisLevel;
-    }
-
-    @Inject(optional = true)
-    @Named("omid.tm.conflictAnalysisLevel")
-    public void setConflictAnalysisLevel(ConflictDetectionLevel conflictAnalysisLevel) {
-        this.conflictAnalysisLevel = conflictAnalysisLevel;
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/java/org/apache/omid/tso/client/TSOClient.java
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/java/org/apache/omid/tso/client/TSOClient.java b/transaction-client/src/main/java/org/apache/omid/tso/client/TSOClient.java
index 1c62876..1690ca6 100644
--- a/transaction-client/src/main/java/org/apache/omid/tso/client/TSOClient.java
+++ b/transaction-client/src/main/java/org/apache/omid/tso/client/TSOClient.java
@@ -21,10 +21,7 @@ import com.google.common.base.Charsets;
 import com.google.common.net.HostAndPort;
 import com.google.common.util.concurrent.AbstractFuture;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
 import org.apache.omid.proto.TSOProto;
-import org.apache.omid.transaction.TransactionException;
-import org.apache.omid.tso.client.OmidClientConfiguration.ConflictDetectionLevel;
 import org.apache.omid.zk.ZKUtils;
 import org.apache.statemachine.StateMachine;
 import org.apache.curator.framework.CuratorFramework;
@@ -57,9 +54,7 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayDeque;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
@@ -68,7 +63,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
-
 /**
  * Describes the abstract methods to communicate to the TSO server
  */
@@ -98,13 +92,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
     private InetSocketAddress tsoAddr;
     private String zkCurrentTsoPath;
 
-
-    // Use to extract unique table identifiers from the modified cells list.
-    private final Set<Long> tableIDs;
-    // Conflict detection level of the entire system. Can either be Row or Cell level.
-    private ConflictDetectionLevel conflictDetectionLevel;
-    private Set<Long> rowLevelWriteSet;
-
     // ----------------------------------------------------------------------------------------------------------------
     // Construction
     // ----------------------------------------------------------------------------------------------------------------
@@ -172,11 +159,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
         bootstrap.setOption("keepAlive", true);
         bootstrap.setOption("reuseAddress", true);
         bootstrap.setOption("connectTimeoutMillis", 100);
-
-        this.tableIDs = new HashSet<Long>();
-
-        conflictDetectionLevel = omidConf.getConflictAnalysisLevel();
-        rowLevelWriteSet = new HashSet<Long>();
     }
 
     // ----------------------------------------------------------------------------------------------------------------
@@ -204,33 +186,9 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
         TSOProto.Request.Builder builder = TSOProto.Request.newBuilder();
         TSOProto.CommitRequest.Builder commitbuilder = TSOProto.CommitRequest.newBuilder();
         commitbuilder.setStartTimestamp(transactionId);
-
-        rowLevelWriteSet.clear();
         for (CellId cell : cells) {
-            long id;
-
-            switch (conflictDetectionLevel) {
-            case ROW:
-                id = cell.getRowId();
-                if (rowLevelWriteSet.contains(id)) {
-                    continue;
-                } else {
-                    rowLevelWriteSet.add(id);
-                }
-                break;
-            case CELL:
-                id = cell.getCellId();
-                break;
-            default:
-                id = 0;
-                assert (false);
-            }
-
-            commitbuilder.addCellId(id);
-            tableIDs.add(cell.getTableId());
+            commitbuilder.addCellId(cell.getCellId());
         }
-        commitbuilder.addAllTableId(tableIDs);
-        tableIDs.clear();
         builder.setCommitRequest(commitbuilder.build());
         RequestEvent request = new RequestEvent(builder.build(), requestMaxRetries);
         fsm.sendEvent(request);
@@ -238,20 +196,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
     }
 
     /**
-     * @see TSOProtocol#getFence()
-     */
-    @Override
-    public TSOFuture<Long> getFence(long tableId) {
-        TSOProto.Request.Builder builder = TSOProto.Request.newBuilder();
-        TSOProto.FenceRequest.Builder fenceReqBuilder = TSOProto.FenceRequest.newBuilder();
-        fenceReqBuilder.setTableId(tableId);
-        builder.setFenceRequest(fenceReqBuilder.build());
-        RequestEvent request = new RequestEvent(builder.build(), requestMaxRetries);
-        fsm.sendEvent(request);
-        return new ForwardingTSOFuture<>(request);
-    }
-
-    /**
      * @see TSOProtocol#close()
      */
     @Override
@@ -299,14 +243,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
         return epoch;
     }
 
-    /**
-     * Used for family deletion
-     * @return the conflict detection level.
-     */
-    public ConflictDetectionLevel getConflictDetectionLevel() {
-        return conflictDetectionLevel;
-    }
-
     // ----------------------------------------------------------------------------------------------------------------
     // NodeCacheListener interface
     // ----------------------------------------------------------------------------------------------------------------
@@ -410,19 +346,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
         }
     }
 
-    private static class FenceRequestTimeoutEvent implements StateMachine.Event {
-
-        final long tableID;
-
-        FenceRequestTimeoutEvent(long tableID) {
-            this.tableID = tableID;
-        }
-
-        public long getTableID() {
-            return tableID;
-        }
-    }
-
     private static class RequestEvent extends UserEvent<Long> {
 
         TSOProto.Request req;
@@ -692,7 +615,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
 
         final Queue<RequestAndTimeout> timestampRequests;
         final Map<Long, RequestAndTimeout> commitRequests;
-        final Map<Long, RequestAndTimeout> fenceRequests;
         final Channel channel;
 
         final HashedWheelTimer timeoutExecutor;
@@ -704,7 +626,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
             this.timeoutExecutor = timeoutExecutor;
             timestampRequests = new ArrayDeque<>();
             commitRequests = new HashMap<>();
-            fenceRequests = new HashMap<>();
         }
 
         private Timeout newTimeout(final StateMachine.Event timeoutEvent) {
@@ -729,10 +650,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
                 TSOProto.CommitRequest commitReq = req.getCommitRequest();
                 commitRequests.put(commitReq.getStartTimestamp(), new RequestAndTimeout(
                         request, newTimeout(new CommitRequestTimeoutEvent(commitReq.getStartTimestamp()))));
-            } else if (req.hasFenceRequest()) {
-                TSOProto.FenceRequest fenceReq = req.getFenceRequest();
-                fenceRequests.put(fenceReq.getTableId(), new RequestAndTimeout(
-                        request, newTimeout(new FenceRequestTimeoutEvent(fenceReq.getTableId()))));
             } else {
                 request.error(new IllegalArgumentException("Unknown request type"));
                 return;
@@ -776,18 +693,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
                 } else {
                     e.getRequest().success(resp.getCommitResponse().getCommitTimestamp());
                 }
-            } else if (resp.hasFenceResponse()) {
-                long tableID = resp.getFenceResponse().getTableId();
-                RequestAndTimeout e = fenceRequests.remove(tableID);
-                if (e == null) {
-                    LOG.debug("Received fence response for request that doesn't exist. Table ID: {}", tableID);
-                    return;
-                }
-                if (e.getTimeout() != null) {
-                    e.getTimeout().cancel();
-                }
-
-                e.getRequest().success(resp.getFenceResponse().getFenceId());
             }
         }
 
@@ -814,18 +719,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
             return this;
         }
 
-        public StateMachine.State handleEvent(FenceRequestTimeoutEvent e) {
-            long tableID = e.getTableID();
-            if (fenceRequests.containsKey(tableID)) {
-                RequestAndTimeout r = fenceRequests.remove(tableID);
-                if (r.getTimeout() != null) {
-                    r.getTimeout().cancel();
-                }
-                queueRetryOrError(fsm, r.getRequest());
-            }
-            return this;
-        }
-
         public StateMachine.State handleEvent(CloseEvent e) {
             LOG.debug("CONNECTED STATE: CloseEvent");
             timeoutExecutor.stop();
@@ -869,15 +762,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
                 queueRetryOrError(fsm, r.getRequest());
                 iter.remove();
             }
-            iter = fenceRequests.entrySet().iterator();
-            while (iter.hasNext()) {
-                RequestAndTimeout r = iter.next().getValue();
-                if (r.getTimeout() != null) {
-                    r.getTimeout().cancel();
-                }
-                queueRetryOrError(fsm, r.getRequest());
-                iter.remove();
-            }
             channel.close();
         }
 
@@ -916,12 +800,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
                 }
                 r.getRequest().error(new ClosingException());
             }
-            for (RequestAndTimeout r : fenceRequests.values()) {
-                if (r.getTimeout() != null) {
-                    r.getTimeout().cancel();
-                }
-                r.getRequest().error(new ClosingException());
-            }
         }
     }
 
@@ -942,11 +820,6 @@ public class TSOClient implements TSOProtocol, NodeCacheListener {
             return this;
         }
 
-        public StateMachine.State handleEvent(FenceRequestTimeoutEvent e) {
-            // Ignored. They will be retried or errored
-            return this;
-        }
-
         public StateMachine.State handleEvent(ErrorEvent e) {
             // Ignored. They will be retried or errored
             return this;

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/java/org/apache/omid/tso/client/TSOProtocol.java
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/java/org/apache/omid/tso/client/TSOProtocol.java b/transaction-client/src/main/java/org/apache/omid/tso/client/TSOProtocol.java
index 5ad6326..198913a 100644
--- a/transaction-client/src/main/java/org/apache/omid/tso/client/TSOProtocol.java
+++ b/transaction-client/src/main/java/org/apache/omid/tso/client/TSOProtocol.java
@@ -17,11 +17,8 @@
  */
 package org.apache.omid.tso.client;
 
-import java.util.List;
 import java.util.Set;
 
-import org.apache.omid.tso.client.OmidClientConfiguration.ConflictDetectionLevel;
-
 /**
  * Defines the protocol used on the client side to abstract communication to the TSO server
  */
@@ -51,17 +48,6 @@ public interface TSOProtocol {
     TSOFuture<Long> commit(long transactionId, Set<? extends CellId> writeSet);
 
     /**
-     * Returns a new fence timestamp assigned by on the server-side
-     * @param tableId
-     *          the table to create fence for.
-     * @return the newly assigned timestamp as a future. If an error was detected, the future will contain a
-     * corresponding protocol exception
-     * see org.apache.omid.tso.TimestampOracle
-     * see org.apache.omid.tso.TSOServer
-     */
-    TSOFuture<Long> getFence(long tableId);
-
-    /**
      * Closes the communication with the TSO server
      * @return nothing. If an error was detected, the future will contain a corresponding protocol exception
      */

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/java/org/apache/omid/tso/util/DummyCellIdImpl.java
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/java/org/apache/omid/tso/util/DummyCellIdImpl.java b/transaction-client/src/main/java/org/apache/omid/tso/util/DummyCellIdImpl.java
index ab3a385..4556757 100644
--- a/transaction-client/src/main/java/org/apache/omid/tso/util/DummyCellIdImpl.java
+++ b/transaction-client/src/main/java/org/apache/omid/tso/util/DummyCellIdImpl.java
@@ -22,15 +22,9 @@ import org.apache.omid.tso.client.CellId;
 public class DummyCellIdImpl implements CellId {
 
     private final long cellId;
-    private final long rowId;
 
     public DummyCellIdImpl(long cellId) {
-        this(cellId, cellId);
-    }
-
-    public DummyCellIdImpl(long cellId, long rowId) {
         this.cellId = cellId;
-        this.rowId = rowId;
     }
 
     @Override
@@ -38,13 +32,4 @@ public class DummyCellIdImpl implements CellId {
         return cellId;
     }
 
-    @Override
-    public long getTableId() {
-        return cellId;
-    }
-
-    @Override
-    public long getRowId() {
-        return rowId;
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/resources/omid-client-config.yml
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/resources/omid-client-config.yml b/transaction-client/src/main/resources/omid-client-config.yml
index 478bd48..4263c35 100644
--- a/transaction-client/src/main/resources/omid-client-config.yml
+++ b/transaction-client/src/main/resources/omid-client-config.yml
@@ -36,8 +36,4 @@ executorThreads: 3
 
 # Configure whether the TM performs the post-commit actions for a tx (update shadow cells and clean commit table entry)
 # before returning to the control to the client (SYNC) or in parallel (ASYNC)
-postCommitMode: !!org.apache.omid.tso.client.OmidClientConfiguration$PostCommitMode SYNC
-
-# Conflict analysis level
-# Can either be cell level or row level. Default is cell level
-conflictDetectionLevel: !!org.apache.omid.tso.client.OmidClientConfiguration$ConflictDetectionLevel CELL
+postCommitMode: !!org.apache.omid.tso.client.OmidClientConfiguration$PostCommitMode SYNC
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/pom.xml
----------------------------------------------------------------------
diff --git a/tso-server/pom.xml b/tso-server/pom.xml
index e1488c1..88ec145 100644
--- a/tso-server/pom.xml
+++ b/tso-server/pom.xml
@@ -58,6 +58,7 @@
             <groupId>org.apache.omid</groupId>
             <artifactId>omid-transaction-client</artifactId>
             <version>${project.version}</version>
+            <scope>test</scope>
         </dependency>
 
         <!-- End of Dependencies on Omid modules -->

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/Batch.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/Batch.java b/tso-server/src/main/java/org/apache/omid/tso/Batch.java
index 111c81c..99d0c5c 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/Batch.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/Batch.java
@@ -103,16 +103,6 @@ public class Batch {
 
     }
 
-    void addFence(long tableID, long fenceTimestamp, Channel c, MonitoringContext context) {
-
-        Preconditions.checkState(!isFull(), "batch is full");
-        int index = numEvents++;
-        PersistEvent e = events[index];
-        context.timerStart("persistence.processor.fence.latency");
-        e.makePersistFence(tableID, fenceTimestamp, c, context);
-
-    }
-
     void addCommit(long startTimestamp, long commitTimestamp, Channel c, MonitoringContext context) {
 
         Preconditions.checkState(!isFull(), "batch is full");

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/PersistEvent.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/PersistEvent.java b/tso-server/src/main/java/org/apache/omid/tso/PersistEvent.java
index b89cdc5..db58677 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/PersistEvent.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/PersistEvent.java
@@ -25,7 +25,7 @@ public final class PersistEvent {
     private MonitoringContext monCtx;
 
     enum Type {
-        TIMESTAMP, COMMIT, ABORT, COMMIT_RETRY, FENCE
+        TIMESTAMP, COMMIT, ABORT, COMMIT_RETRY
     }
 
     private Type type = null;
@@ -71,16 +71,6 @@ public final class PersistEvent {
 
     }
 
-    void makePersistFence(long tableID, long fenceTimestamp, Channel c, MonitoringContext monCtx) {
-
-        this.type = Type.FENCE;
-        this.startTimestamp = tableID;
-        this.commitTimestamp = fenceTimestamp;
-        this.channel = c;
-        this.monCtx = monCtx;
-
-    }
-
     MonitoringContext getMonCtx() {
 
         return monCtx;

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessor.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessor.java b/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessor.java
index ddebf13..b96945d 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessor.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessor.java
@@ -33,8 +33,6 @@ interface PersistenceProcessor extends Closeable {
 
     void addTimestampToBatch(long startTimestamp, Channel c, MonitoringContext monCtx) throws Exception;
 
-    void addFenceToBatch(long tableID, long fenceTimestamp, Channel c, MonitoringContext monCtx) throws Exception;
-
     void triggerCurrentBatchFlush() throws Exception;
 
     Future<Void> persistLowWatermark(long lowWatermark);

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorHandler.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorHandler.java b/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorHandler.java
index a6d63c7..07241f0 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorHandler.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorHandler.java
@@ -95,11 +95,6 @@ public class PersistenceProcessorHandler implements WorkHandler<PersistenceProce
                 case ABORT:
                     event.getMonCtx().timerStop("persistence.processor.abort.latency");
                     break;
-                case FENCE:
-                    // Persist the fence by using the fence identifier as both the start and commit timestamp.
-                    writer.addCommittedTransaction(event.getCommitTimestamp(), event.getCommitTimestamp());
-                    commitEventsToFlush++;
-                    break;
                 default:
                     throw new IllegalStateException("Event not allowed in Persistent Processor Handler: " + event);
             }
@@ -124,10 +119,6 @@ public class PersistenceProcessorHandler implements WorkHandler<PersistenceProce
                 case ABORT:
                     event.getMonCtx().timerStart("reply.processor.abort.latency");
                     break;
-                case FENCE:
-                    event.getMonCtx().timerStop("persistence.processor.fence.latency");
-                    event.getMonCtx().timerStart("reply.processor.fence.latency");
-                    break;
                 default:
                     throw new IllegalStateException("Event not allowed in Persistent Processor Handler: " + event);
             }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorImpl.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorImpl.java b/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorImpl.java
index 628b73d..95d77ba 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorImpl.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/PersistenceProcessorImpl.java
@@ -146,10 +146,10 @@ class PersistenceProcessorImpl implements PersistenceProcessor {
     }
 
     @Override
-    public void addAbortToBatch(long startTimestamp, Channel c, MonitoringContext monCtx)
+    public void addAbortToBatch(long startTimestamp, Channel c, MonitoringContext context)
             throws Exception {
 
-        currentBatch.addAbort(startTimestamp, c, monCtx);
+        currentBatch.addAbort(startTimestamp, c, context);
         if (currentBatch.isFull()) {
             triggerCurrentBatchFlush();
         }
@@ -157,19 +157,9 @@ class PersistenceProcessorImpl implements PersistenceProcessor {
     }
 
     @Override
-    public void addTimestampToBatch(long startTimestamp, Channel c, MonitoringContext monCtx) throws Exception {
+    public void addTimestampToBatch(long startTimestamp, Channel c, MonitoringContext context) throws Exception {
 
-        currentBatch.addTimestamp(startTimestamp, c, monCtx);
-        if (currentBatch.isFull()) {
-            triggerCurrentBatchFlush();
-        }
-
-    }
-
-    @Override
-    public void addFenceToBatch(long tableID, long fenceTimestamp, Channel c, MonitoringContext monCtx) throws Exception {
-
-        currentBatch.addFence(tableID, fenceTimestamp, c, monCtx);
+        currentBatch.addTimestamp(startTimestamp, c, context);
         if (currentBatch.isFull()) {
             triggerCurrentBatchFlush();
         }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessor.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessor.java b/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessor.java
index 7e836aa..f196c42 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessor.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessor.java
@@ -67,18 +67,5 @@ interface ReplyProcessor extends Closeable {
 
     void sendTimestampResponse(long startTimestamp, Channel channel);
 
-    /**
-     * Allow to send a fence response back to the client.
-     *
-     * @param tableID
-     *            the table we are creating the fence for
-     * @param fenceTimestamp
-     *            the fence timestamp to return
-     * @param channel
-     *            the channel used to send the response back to the client
-     */
-
-    void sendFenceResponse(long tableID, long fenceTimestamp, Channel c);
-
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessorImpl.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessorImpl.java b/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessorImpl.java
index 28fe3a0..8e50323 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessorImpl.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/ReplyProcessorImpl.java
@@ -67,7 +67,6 @@ class ReplyProcessorImpl implements EventHandler<ReplyProcessorImpl.ReplyBatchEv
     private final Meter abortMeter;
     private final Meter commitMeter;
     private final Meter timestampMeter;
-    private final Meter fenceMeter;
 
     @Inject
     ReplyProcessorImpl(@Named("ReplyStrategy") WaitStrategy strategy,
@@ -101,7 +100,6 @@ class ReplyProcessorImpl implements EventHandler<ReplyProcessorImpl.ReplyBatchEv
         this.abortMeter = metrics.meter(name("tso", "aborts"));
         this.commitMeter = metrics.meter(name("tso", "commits"));
         this.timestampMeter = metrics.meter(name("tso", "timestampAllocation"));
-        this.fenceMeter = metrics.meter(name("tso", "fences"));
 
         LOG.info("ReplyProcessor initialized");
 
@@ -130,11 +128,6 @@ class ReplyProcessorImpl implements EventHandler<ReplyProcessorImpl.ReplyBatchEv
                     event.getMonCtx().timerStop("reply.processor.timestamp.latency");
                     timestampMeter.mark();
                     break;
-                case FENCE:
-                    sendFenceResponse(event.getStartTimestamp(), event.getCommitTimestamp(), event.getChannel());
-                    event.getMonCtx().timerStop("reply.processor.fence.latency");
-                    fenceMeter.mark();
-                    break;
                 case COMMIT_RETRY:
                     throw new IllegalStateException("COMMIT_RETRY events must be filtered before this step: " + event);
                 default:
@@ -225,18 +218,6 @@ class ReplyProcessorImpl implements EventHandler<ReplyProcessorImpl.ReplyBatchEv
     }
 
     @Override
-    public void sendFenceResponse(long tableID, long fenceTimestamp, Channel c) {
-
-        TSOProto.Response.Builder builder = TSOProto.Response.newBuilder();
-        TSOProto.FenceResponse.Builder fenceBuilder = TSOProto.FenceResponse.newBuilder();
-        fenceBuilder.setTableId(tableID);
-        fenceBuilder.setFenceId(fenceTimestamp);
-        builder.setFenceResponse(fenceBuilder.build());
-        c.write(builder.build());
-
-    }
-
-    @Override
     public void close() {
 
         LOG.info("Terminating Reply Processor...");

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/RequestProcessor.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/RequestProcessor.java b/tso-server/src/main/java/org/apache/omid/tso/RequestProcessor.java
index 062329d..8ab6c9f 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/RequestProcessor.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/RequestProcessor.java
@@ -27,7 +27,6 @@ public interface RequestProcessor extends TSOStateManager.StateObserver, Closeab
 
     void timestampRequest(Channel c, MonitoringContext monCtx);
 
-    void commitRequest(long startTimestamp, Collection<Long> writeSet, Collection<Long> tableIdSet, boolean isRetry, Channel c, MonitoringContext monCtx);
+    void commitRequest(long startTimestamp, Collection<Long> writeSet, boolean isRetry, Channel c, MonitoringContext monCtx);
 
-    void fenceRequest(long tableID, Channel c, MonitoringContext monCtx);
 }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/RequestProcessorImpl.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/RequestProcessorImpl.java b/tso-server/src/main/java/org/apache/omid/tso/RequestProcessorImpl.java
index 04458f1..65416bc 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/RequestProcessorImpl.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/RequestProcessorImpl.java
@@ -24,7 +24,6 @@ import com.lmax.disruptor.RingBuffer;
 import com.lmax.disruptor.TimeoutBlockingWaitStrategy;
 import com.lmax.disruptor.TimeoutHandler;
 import com.lmax.disruptor.dsl.Disruptor;
-
 import org.apache.omid.metrics.MetricsRegistry;
 import org.apache.omid.tso.TSOStateManager.TSOState;
 import org.jboss.netty.channel.Channel;
@@ -32,14 +31,10 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.inject.Inject;
-
 import java.io.IOException;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.Iterator;
-import java.util.Map;
 import java.util.NoSuchElementException;
-import java.util.Set;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ThreadFactory;
@@ -60,7 +55,6 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
 
     private final TimestampOracle timestampOracle;
     private final CommitHashMap hashmap;
-    private final Map<Long, Long> tableFences;
     private final MetricsRegistry metrics;
     private final PersistenceProcessor persistProc;
 
@@ -96,7 +90,6 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
         this.persistProc = persistProc;
         this.timestampOracle = timestampOracle;
         this.hashmap = new CommitHashMap(config.getConflictMapSize());
-        this.tableFences = new HashMap<Long, Long>();
 
         LOG.info("RequestProcessor initialized");
 
@@ -123,9 +116,6 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
             case COMMIT:
                 handleCommit(event);
                 break;
-            case FENCE:
-                handleFence(event);
-                break;
             default:
                 throw new IllegalStateException("Event not allowed in Request Processor: " + event);
         }
@@ -157,24 +147,13 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
     }
 
     @Override
-    public void commitRequest(long startTimestamp, Collection<Long> writeSet, Collection<Long> tableIdSet, boolean isRetry, Channel c,
+    public void commitRequest(long startTimestamp, Collection<Long> writeSet, boolean isRetry, Channel c,
                               MonitoringContext monCtx) {
 
         monCtx.timerStart("request.processor.commit.latency");
         long seq = requestRing.next();
         RequestEvent e = requestRing.get(seq);
-        RequestEvent.makeCommitRequest(e, startTimestamp, monCtx, writeSet, tableIdSet, isRetry, c);
-        requestRing.publish(seq);
-
-    }
-
-    @Override
-    public void fenceRequest(long tableID, Channel c, MonitoringContext monCtx) {
-
-        monCtx.timerStart("request.processor.fence.latency");
-        long seq = requestRing.next();
-        RequestEvent e = requestRing.get(seq);
-        RequestEvent.makeFenceRequest(e, tableID, c, monCtx);
+        RequestEvent.makeCommitRequest(e, startTimestamp, monCtx, writeSet, isRetry, c);
         requestRing.publish(seq);
 
     }
@@ -187,56 +166,38 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
 
     }
 
-    // Checks whether transaction transactionId started before a fence creation of a table transactionId modified.
-    private boolean hasConflictsWithFences(long startTimestamp, Collection<Long> tableIdSet) {
-        if (!tableFences.isEmpty()) {
-            for (long tableId: tableIdSet) {
-                Long fence = tableFences.get(tableId);
-                if (fence != null && fence > startTimestamp) {
-                    return true;
-                }
-                if (fence != null && fence < lowWatermark) {
-                    tableFences.remove(tableId); // Garbage collect entries of old fences.
-                }
-            }
-        }
-
-        return false;
-    }
-
- // Checks whether transactionId has a write-write conflict with a transaction committed after transactionId.
-    private boolean hasConflictsWithCommittedTransactions(long startTimestamp, Iterable<Long> writeSet) {
-        for (long cellId : writeSet) {
-            long value = hashmap.getLatestWriteForCell(cellId);
-            if (value != 0 && value >= startTimestamp) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
     private void handleCommit(RequestEvent event) throws Exception {
 
         long startTimestamp = event.getStartTimestamp();
         Iterable<Long> writeSet = event.writeSet();
-        Collection<Long> tableIdSet = event.getTableIdSet();
         boolean isCommitRetry = event.isCommitRetry();
         Channel c = event.getChannel();
 
-        boolean nonEmptyWriteSet = writeSet.iterator().hasNext();
+        boolean txCanCommit;
+
+        int numCellsInWriteset = 0;
+        // 0. check if it should abort
+        if (startTimestamp <= lowWatermark) {
+            txCanCommit = false;
+        } else {
+            // 1. check the write-write conflicts
+            txCanCommit = true;
+            for (long cellId : writeSet) {
+                long value = hashmap.getLatestWriteForCell(cellId);
+                if (value != 0 && value >= startTimestamp) {
+                    txCanCommit = false;
+                    break;
+                }
+                numCellsInWriteset++;
+            }
+        }
 
-        // If the transaction started before the low watermark, or
-        // it started before a fence and modified the table the fence created for, or
-        // it has a write-write conflict with a transaction committed after it started
-        // Then it should abort. Otherwise, it can commit.
-        if (startTimestamp > lowWatermark &&
-            !hasConflictsWithFences(startTimestamp, tableIdSet) &&
-            !hasConflictsWithCommittedTransactions(startTimestamp, writeSet)) {
+        if (txCanCommit) {
+            // 2. commit
 
             long commitTimestamp = timestampOracle.next();
 
-            if (nonEmptyWriteSet) {
+            if (numCellsInWriteset > 0) {
                 long newLowWatermark = lowWatermark;
 
                 for (long r : writeSet) {
@@ -266,16 +227,6 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
 
     }
 
-    private void handleFence(RequestEvent event) throws Exception {
-        long tableID = event.getTableId();
-        Channel c = event.getChannel();
-
-        long fenceTimestamp = timestampOracle.next();
-
-        tableFences.put(tableID, fenceTimestamp);
-        persistProc.addFenceToBatch(tableID, fenceTimestamp, c, event.getMonCtx());
-    }
-
     @Override
     public void close() throws IOException {
 
@@ -298,7 +249,7 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
     final static class RequestEvent implements Iterable<Long> {
 
         enum Type {
-            TIMESTAMP, COMMIT, FENCE
+            TIMESTAMP, COMMIT
         }
 
         private Type type = null;
@@ -313,9 +264,6 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
         private Long writeSet[] = new Long[MAX_INLINE];
         private Collection<Long> writeSetAsCollection = null; // for the case where there's more than MAX_INLINE
 
-        private Collection<Long> tableIdSet = null;
-        private long tableID = 0;
-
         static void makeTimestampRequest(RequestEvent e, Channel c, MonitoringContext monCtx) {
             e.type = Type.TIMESTAMP;
             e.channel = c;
@@ -326,7 +274,6 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
                                       long startTimestamp,
                                       MonitoringContext monCtx,
                                       Collection<Long> writeSet,
-                                      Collection<Long> TableIdSet,
                                       boolean isRetry,
                                       Channel c) {
             e.monCtx = monCtx;
@@ -343,20 +290,10 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
                 int i = 0;
                 for (Long cellId : writeSet) {
                     e.writeSet[i] = cellId;
-                    ++i;
+                    i++;
                 }
             }
-            e.tableIdSet = TableIdSet;
-        }
 
-        static void makeFenceRequest(RequestEvent e,
-                                     long tableID,
-                                     Channel c,
-                                     MonitoringContext monCtx) {
-            e.type = Type.FENCE;
-            e.channel = c;
-            e.monCtx = monCtx;
-            e.tableID = tableID;
         }
 
         MonitoringContext getMonCtx() {
@@ -375,14 +312,6 @@ class RequestProcessorImpl implements EventHandler<RequestProcessorImpl.RequestE
             return channel;
         }
 
-        Collection<Long> getTableIdSet() {
-            return tableIdSet;
-        }
-
-        long getTableId() {
-            return tableID;
-        }
-
         @Override
         public Iterator<Long> iterator() {
 

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/TSOChannelHandler.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/TSOChannelHandler.java b/tso-server/src/main/java/org/apache/omid/tso/TSOChannelHandler.java
index a218a1d..fe99880 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/TSOChannelHandler.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/TSOChannelHandler.java
@@ -170,13 +170,9 @@ public class TSOChannelHandler extends SimpleChannelHandler implements Closeable
                 TSOProto.CommitRequest cr = request.getCommitRequest();
                 requestProcessor.commitRequest(cr.getStartTimestamp(),
                                                cr.getCellIdList(),
-                                               cr.getTableIdList(),
                                                cr.getIsRetry(),
                                                ctx.getChannel(),
                                                new MonitoringContext(metrics));
-            } else if (request.hasFenceRequest()) {
-                TSOProto.FenceRequest fr = request.getFenceRequest();
-                requestProcessor.fenceRequest(fr.getTableId(), ctx.getChannel(), new MonitoringContext(metrics));
             } else {
                 LOG.error("Invalid request {}. Closing channel {}", request, ctx.getChannel());
                 ctx.getChannel().close();

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/TSOModule.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/TSOModule.java b/tso-server/src/main/java/org/apache/omid/tso/TSOModule.java
index 4d0d844..a7aec27 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/TSOModule.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/TSOModule.java
@@ -24,9 +24,6 @@ import com.google.inject.Provides;
 
 import javax.inject.Named;
 import javax.inject.Singleton;
-
-import org.apache.omid.tso.TSOServerConfig.TIMESTAMP_TYPE;
-
 import java.net.SocketException;
 import java.net.UnknownHostException;
 
@@ -46,13 +43,7 @@ class TSOModule extends AbstractModule {
 
         bind(TSOChannelHandler.class).in(Singleton.class);
         bind(TSOStateManager.class).to(TSOStateManagerImpl.class).in(Singleton.class);
-
-        if (config.getTimestampTypeEnum() == TIMESTAMP_TYPE.WORLD_TIME) {
-            bind(TimestampOracle.class).to(WorldClockOracleImpl.class).in(Singleton.class);
-        } else {
-            bind(TimestampOracle.class).to(TimestampOracleImpl.class).in(Singleton.class);
-        }
-
+        bind(TimestampOracle.class).to(TimestampOracleImpl.class).in(Singleton.class);
         bind(Panicker.class).to(SystemExitPanicker.class).in(Singleton.class);
 
         install(new BatchPoolModule(config));

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/TSOServerConfig.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/TSOServerConfig.java b/tso-server/src/main/java/org/apache/omid/tso/TSOServerConfig.java
index 8f061a1..3292211 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/TSOServerConfig.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/TSOServerConfig.java
@@ -44,11 +44,6 @@ public class TSOServerConfig extends SecureHBaseConfig {
         LOW_CPU
     };
 
-    public static enum TIMESTAMP_TYPE {
-      INCREMENTAL,
-      WORLD_TIME
-    };
-
     // ----------------------------------------------------------------------------------------------------------------
     // Instantiation
     // ----------------------------------------------------------------------------------------------------------------
@@ -87,8 +82,6 @@ public class TSOServerConfig extends SecureHBaseConfig {
 
     private String networkIfaceName = NetworkUtils.getDefaultNetworkInterface();
 
-    private String timestampType;
-
     public int getPort() {
         return port;
     }
@@ -137,18 +130,6 @@ public class TSOServerConfig extends SecureHBaseConfig {
         this.networkIfaceName = networkIfaceName;
     }
 
-    public String getTimestampType() {
-        return timestampType;
-    }
-
-    public void setTimestampType(String type) {
-        this.timestampType = type;
-    }
-
-    public TIMESTAMP_TYPE getTimestampTypeEnum() {
-        return TSOServerConfig.TIMESTAMP_TYPE.valueOf(timestampType);
-    }
-
     public Module getTimestampStoreModule() {
         return timestampStoreModule;
     }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/TimestampOracleImpl.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/TimestampOracleImpl.java b/tso-server/src/main/java/org/apache/omid/tso/TimestampOracleImpl.java
index 454526f..0a65c01 100644
--- a/tso-server/src/main/java/org/apache/omid/tso/TimestampOracleImpl.java
+++ b/tso-server/src/main/java/org/apache/omid/tso/TimestampOracleImpl.java
@@ -19,17 +19,14 @@ package org.apache.omid.tso;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
 import org.apache.omid.metrics.Gauge;
 import org.apache.omid.metrics.MetricsRegistry;
 import org.apache.omid.timestamp.storage.TimestampStorage;
-import org.apache.omid.transaction.AbstractTransactionManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.inject.Inject;
 import javax.inject.Singleton;
-
 import java.io.IOException;
 import java.util.concurrent.Executor;
 import java.util.concurrent.Executors;
@@ -137,12 +134,9 @@ public class TimestampOracleImpl implements TimestampOracle {
     @SuppressWarnings("StatementWithEmptyBody")
     @Override
     public long next() {
-        lastTimestamp += AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
+        lastTimestamp++;
 
-        if (lastTimestamp >= nextAllocationThreshold) {
-            // set the nextAllocationThread to max value of long in order to
-            // make sure only one call to this function will execute a thread to extend the timestamp batch.
-            nextAllocationThreshold = Long.MAX_VALUE; 
+        if (lastTimestamp == nextAllocationThreshold) {
             executor.execute(allocateTimestampsBatchTask);
         }
 

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/java/org/apache/omid/tso/WorldClockOracleImpl.java
----------------------------------------------------------------------
diff --git a/tso-server/src/main/java/org/apache/omid/tso/WorldClockOracleImpl.java b/tso-server/src/main/java/org/apache/omid/tso/WorldClockOracleImpl.java
deleted file mode 100644
index 4a9c5b5..0000000
--- a/tso-server/src/main/java/org/apache/omid/tso/WorldClockOracleImpl.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.tso;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-import org.apache.omid.metrics.Gauge;
-import org.apache.omid.metrics.MetricsRegistry;
-import org.apache.omid.timestamp.storage.TimestampStorage;
-import org.apache.omid.transaction.AbstractTransactionManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.inject.Inject;
-import javax.inject.Singleton;
-
-import java.io.IOException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.omid.metrics.MetricsUtils.name;
-
-/**
- * The Timestamp Oracle that gives monotonically increasing timestamps based on world time
- */
-@Singleton
-public class WorldClockOracleImpl implements TimestampOracle {
-
-    private static final Logger LOG = LoggerFactory.getLogger(WorldClockOracleImpl.class);
-
-    static final long MAX_TX_PER_MS = 1_000_000; // 1 million
-    static final long TIMESTAMP_INTERVAL_MS = 10_000; // 10 seconds interval
-    private static final long TIMESTAMP_ALLOCATION_INTERVAL_MS = 7_000; // 7 seconds
-
-    private long lastTimestamp;
-    private long maxTimestamp;
-
-    private TimestampStorage storage;
-    private Panicker panicker;
-
-    private volatile long maxAllocatedTime;
-
-    private final ScheduledExecutorService scheduler =
-            Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setNameFormat("ts-persist-%d").build());
-
-    private Runnable allocateTimestampsBatchTask;
-
-    private class AllocateTimestampBatchTask implements Runnable {
-        long previousMaxTime;
-
-        AllocateTimestampBatchTask(long previousMaxTime) {
-            this.previousMaxTime = previousMaxTime;
-        }
-
-        @Override
-        public void run() {
-            long newMaxTime = (System.currentTimeMillis() + TIMESTAMP_INTERVAL_MS) * MAX_TX_PER_MS;
-            try {
-                storage.updateMaxTimestamp(previousMaxTime, newMaxTime);
-                maxAllocatedTime = newMaxTime;
-                previousMaxTime = newMaxTime;
-            } catch (Throwable e) {
-                panicker.panic("Can't store the new max timestamp", e);
-            }
-        }
-    }
-
-    @Inject
-    public WorldClockOracleImpl(MetricsRegistry metrics,
-                               TimestampStorage tsStorage,
-                               Panicker panicker) throws IOException {
-
-        this.storage = tsStorage;
-        this.panicker = panicker;
-
-        metrics.gauge(name("tso", "maxTimestamp"), new Gauge<Long>() {
-            @Override
-            public Long getValue() {
-                return maxTimestamp;
-            }
-        });
-
-    }
-
-    @Override
-    public void initialize() throws IOException {
-
-        this.lastTimestamp = this.maxTimestamp = storage.getMaxTimestamp();
-
-        this.allocateTimestampsBatchTask = new AllocateTimestampBatchTask(lastTimestamp);
-
-        // Trigger first allocation of timestamps
-        scheduler.schedule(allocateTimestampsBatchTask, 0, TimeUnit.MILLISECONDS);
-
-        // Waiting for the current epoch to start. Occurs in case of failover when the previous TSO allocated the current time frame.
-        while ((System.currentTimeMillis() * MAX_TX_PER_MS) < this.lastTimestamp) {
-            try {
-                Thread.sleep(1000);
-            } catch (InterruptedException e) {
-               continue;
-            }
-        }
-
-        // Launch the periodic timestamp interval allocation. In this case, the timestamp interval is extended even though the TSO is idle.
-        // Because we are world time based, this guarantees that the first request after a long time does not need to wait for new interval allocation.
-        scheduler.scheduleAtFixedRate(allocateTimestampsBatchTask, TIMESTAMP_ALLOCATION_INTERVAL_MS, TIMESTAMP_ALLOCATION_INTERVAL_MS, TimeUnit.MILLISECONDS);
-    }
-
-    /**
-     * Returns the next timestamp if available. Otherwise spins till the ts-persist thread allocates a new timestamp.
-     */
-    @Override
-    public long next() {
-
-        long currentMsFirstTimestamp = System.currentTimeMillis() * MAX_TX_PER_MS;
-
-        lastTimestamp += AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
-
-        // Return the next timestamp in case we are still in the same millisecond as the previous timestamp was. 
-        if (lastTimestamp >= currentMsFirstTimestamp) {
-            return lastTimestamp;
-        }
-
-        if (currentMsFirstTimestamp >= maxTimestamp) { // Intentional race to reduce synchronization overhead in every access to maxTimestamp                                                                                                                       
-            while (maxAllocatedTime <= currentMsFirstTimestamp) { // Waiting for the interval allocation
-                try {
-                    Thread.sleep(1000);
-                } catch (InterruptedException e) {
-                   continue;
-                }
-            }
-            assert (maxAllocatedTime > maxTimestamp);
-            maxTimestamp = maxAllocatedTime;
-        }
-
-        lastTimestamp = currentMsFirstTimestamp;
-
-        return lastTimestamp;
-    }
-
-    @Override
-    public long getLast() {
-        return lastTimestamp;
-    }
-
-    @Override
-    public String toString() {
-        return String.format("TimestampOracle -> LastTimestamp: %d, MaxTimestamp: %d", lastTimestamp, maxTimestamp);
-    }
-
-    @VisibleForTesting
-    static class InMemoryTimestampStorage implements TimestampStorage {
-
-        long maxTime = 0;
-
-        @Override
-        public void updateMaxTimestamp(long previousMaxTime, long nextMaxTime) {
-            maxTime = nextMaxTime;
-            LOG.info("Updating max timestamp: (previous:{}, new:{})", previousMaxTime, nextMaxTime);
-        }
-
-        @Override
-        public long getMaxTimestamp() {
-            return maxTime;
-        }
-
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/main/resources/default-omid-server-configuration.yml
----------------------------------------------------------------------
diff --git a/tso-server/src/main/resources/default-omid-server-configuration.yml b/tso-server/src/main/resources/default-omid-server-configuration.yml
index 4e45122..017af4f 100644
--- a/tso-server/src/main/resources/default-omid-server-configuration.yml
+++ b/tso-server/src/main/resources/default-omid-server-configuration.yml
@@ -26,10 +26,7 @@ numConcurrentCTWriters: 2
 batchSizePerCTWriter: 25
 # When this timeout expires, the contents of the batch are flushed to the datastore
 batchPersistTimeoutInMs: 10
-# Timestamp generation strategy
-# INCREMENTAL - [Default] regular counter
-# WORLD_TIME - world time based counter
-timestampType: INCREMENTAL
+
 # Default module configuration (No TSO High Availability & in-memory storage for timestamp and commit tables)
 timestampStoreModule: !!org.apache.omid.tso.InMemoryTimestampStorageModule [ ]
 commitTableStoreModule: !!org.apache.omid.tso.InMemoryCommitTableStorageModule [ ]

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/test/java/org/apache/omid/tso/TestRequestProcessor.java
----------------------------------------------------------------------
diff --git a/tso-server/src/test/java/org/apache/omid/tso/TestRequestProcessor.java b/tso-server/src/test/java/org/apache/omid/tso/TestRequestProcessor.java
index 1c44d05..405102a 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/TestRequestProcessor.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/TestRequestProcessor.java
@@ -19,10 +19,8 @@ package org.apache.omid.tso;
 
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.SettableFuture;
-
 import org.apache.omid.metrics.MetricsRegistry;
 import org.apache.omid.metrics.NullMetricsProvider;
-import org.apache.omid.transaction.AbstractTransactionManager;
 import org.jboss.netty.channel.Channel;
 import org.mockito.ArgumentCaptor;
 import org.slf4j.Logger;
@@ -30,7 +28,6 @@ import org.slf4j.LoggerFactory;
 import org.testng.annotations.BeforeMethod;
 import org.testng.annotations.Test;
 
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
@@ -65,9 +62,6 @@ public class TestRequestProcessor {
         // Build the required scaffolding for the test
         MetricsRegistry metrics = new NullMetricsProvider();
 
-        TSOServerConfig config = new TSOServerConfig();
-        config.setConflictMapSize(CONFLICT_MAP_SIZE);
-
         TimestampOracleImpl timestampOracle =
                 new TimestampOracleImpl(metrics, new TimestampOracleImpl.InMemoryTimestampStorage(), new MockPanicker());
 
@@ -78,6 +72,9 @@ public class TestRequestProcessor {
         f.set(null);
         doReturn(f).when(persist).persistLowWatermark(any(Long.class));
 
+        TSOServerConfig config = new TSOServerConfig();
+        config.setConflictMapSize(CONFLICT_MAP_SIZE);
+
         requestProc = new RequestProcessorImpl(metrics, timestampOracle, persist, new MockPanicker(), config);
 
         // Initialize the state for the experiment
@@ -98,8 +95,7 @@ public class TestRequestProcessor {
         // verify that timestamps increase monotonically
         for (int i = 0; i < 100; i++) {
             requestProc.timestampRequest(null, new MonitoringContext(metrics));
-            verify(persist, timeout(100).times(1)).addTimestampToBatch(eq(firstTS), any(Channel.class), any(MonitoringContext.class));
-            firstTS += AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
+            verify(persist, timeout(100).times(1)).addTimestampToBatch(eq(firstTS++), any(Channel.class), any(MonitoringContext.class));
         }
 
     }
@@ -114,10 +110,10 @@ public class TestRequestProcessor {
         long firstTS = TScapture.getValue();
 
         List<Long> writeSet = Lists.newArrayList(1L, 20L, 203L);
-        requestProc.commitRequest(firstTS - AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContext(metrics));
-        verify(persist, timeout(100).times(1)).addAbortToBatch(eq(firstTS - AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN), any(Channel.class), any(MonitoringContext.class));
+        requestProc.commitRequest(firstTS - 1, writeSet, false, null, new MonitoringContext(metrics));
+        verify(persist, timeout(100).times(1)).addAbortToBatch(eq(firstTS - 1), any(Channel.class), any(MonitoringContext.class));
 
-        requestProc.commitRequest(firstTS, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContext(metrics));
+        requestProc.commitRequest(firstTS, writeSet, false, null, new MonitoringContext(metrics));
         ArgumentCaptor<Long> commitTScapture = ArgumentCaptor.forClass(Long.class);
 
         verify(persist, timeout(100).times(1)).addCommitToBatch(eq(firstTS), commitTScapture.capture(), any(Channel.class), any(MonitoringContext.class));
@@ -136,24 +132,14 @@ public class TestRequestProcessor {
                 TScapture.capture(), any(Channel.class), any(MonitoringContext.class));
         long thirdTS = TScapture.getValue();
 
-        requestProc.commitRequest(thirdTS, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContext(metrics));
+        requestProc.commitRequest(thirdTS, writeSet, false, null, new MonitoringContext(metrics));
         verify(persist, timeout(100).times(1)).addCommitToBatch(eq(thirdTS), anyLong(), any(Channel.class), any(MonitoringContext.class));
-        requestProc.commitRequest(secondTS, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContext(metrics));
+        requestProc.commitRequest(secondTS, writeSet, false, null, new MonitoringContext(metrics));
         verify(persist, timeout(100).times(1)).addAbortToBatch(eq(secondTS), any(Channel.class), any(MonitoringContext.class));
 
     }
 
     @Test(timeOut = 30_000)
-    public void testFence() throws Exception {
-
-        requestProc.fenceRequest(666L, null, new MonitoringContext(metrics));
-        ArgumentCaptor<Long> firstTScapture = ArgumentCaptor.forClass(Long.class);
-        verify(persist, timeout(100).times(1)).addFenceToBatch(eq(666L),
-                firstTScapture.capture(), any(Channel.class), any(MonitoringContext.class));
-
-    }
-
-    @Test(timeOut = 30_000)
     public void testCommitRequestAbortsWhenResettingRequestProcessorState() throws Exception {
 
         List<Long> writeSet = Collections.emptyList();
@@ -171,7 +157,7 @@ public class TestRequestProcessor {
         stateManager.initialize();
 
         // ...check that the transaction is aborted when trying to commit
-        requestProc.commitRequest(startTS, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContext(metrics));
+        requestProc.commitRequest(startTS, writeSet, false, null, new MonitoringContext(metrics));
         verify(persist, timeout(100).times(1)).addAbortToBatch(eq(startTS), any(Channel.class), any(MonitoringContext.class));
 
     }
@@ -180,21 +166,21 @@ public class TestRequestProcessor {
     public void testLowWatermarkIsStoredOnlyWhenACacheElementIsEvicted() throws Exception {
 
         final int ANY_START_TS = 1;
-        final long FIRST_COMMIT_TS_EVICTED = AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
-        final long NEXT_COMMIT_TS_THAT_SHOULD_BE_EVICTED = FIRST_COMMIT_TS_EVICTED + AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
+        final long FIRST_COMMIT_TS_EVICTED = 1L;
+        final long NEXT_COMMIT_TS_THAT_SHOULD_BE_EVICTED = 2L;
 
         // Fill the cache to provoke a cache eviction
         for (long i = 0; i < CONFLICT_MAP_SIZE + CONFLICT_MAP_ASSOCIATIVITY; i++) {
             long writeSetElementHash = i + 1; // This is to match the assigned CT: K/V in cache = WS Element Hash/CT
             List<Long> writeSet = Lists.newArrayList(writeSetElementHash);
-            requestProc.commitRequest(ANY_START_TS, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContext(metrics));
+            requestProc.commitRequest(ANY_START_TS, writeSet, false, null, new MonitoringContext(metrics));
         }
 
         Thread.currentThread().sleep(3000); // Allow the Request processor to finish the request processing
 
         // Check that first time its called is on init
         verify(persist, timeout(100).times(1)).persistLowWatermark(eq(0L));
-        // Then, check it is called when cache is full and the first element is evicted (should be a AbstractTransactionManager.NUM_OF_CHECKPOINTS)
+        // Then, check it is called when cache is full and the first element is evicted (should be a 1)
         verify(persist, timeout(100).times(1)).persistLowWatermark(eq(FIRST_COMMIT_TS_EVICTED));
         // Finally it should never be called with the next element
         verify(persist, timeout(100).never()).persistLowWatermark(eq(NEXT_COMMIT_TS_THAT_SHOULD_BE_EVICTED));

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/test/java/org/apache/omid/tso/TestTSOChannelHandlerNetty.java
----------------------------------------------------------------------
diff --git a/tso-server/src/test/java/org/apache/omid/tso/TestTSOChannelHandlerNetty.java b/tso-server/src/test/java/org/apache/omid/tso/TestTSOChannelHandlerNetty.java
index 157bb48..968f4a9 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/TestTSOChannelHandlerNetty.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/TestTSOChannelHandlerNetty.java
@@ -248,8 +248,6 @@ public class TestTSOChannelHandlerNetty {
         testWritingTimestampRequest(channel);
 
         testWritingCommitRequest(channel);
-
-        testWritingFenceRequest(channel);
     }
 
     private void testWritingTimestampRequest(Channel channel) throws InterruptedException {
@@ -262,7 +260,7 @@ public class TestTSOChannelHandlerNetty {
         channel.write(tsBuilder.build()).await();
         verify(requestProcessor, timeout(100).times(1)).timestampRequest(any(Channel.class), any(MonitoringContext.class));
         verify(requestProcessor, timeout(100).never())
-                .commitRequest(anyLong(), anyCollectionOf(Long.class), anyCollectionOf(Long.class), anyBoolean(), any(Channel.class), any(MonitoringContext.class));
+                .commitRequest(anyLong(), anyCollectionOf(Long.class), anyBoolean(), any(Channel.class), any(MonitoringContext.class));
     }
 
     private void testWritingCommitRequest(Channel channel) throws InterruptedException {
@@ -279,23 +277,7 @@ public class TestTSOChannelHandlerNetty {
         channel.write(commitBuilder.build()).await();
         verify(requestProcessor, timeout(100).never()).timestampRequest(any(Channel.class), any(MonitoringContext.class));
         verify(requestProcessor, timeout(100).times(1))
-                .commitRequest(eq(666L), anyCollectionOf(Long.class), anyCollectionOf(Long.class), eq(false), any(Channel.class), any(MonitoringContext.class));
-    }
-
-    private void testWritingFenceRequest(Channel channel) throws InterruptedException {
-        // Reset mock
-        reset(requestProcessor);
-        TSOProto.Request.Builder fenceBuilder = TSOProto.Request.newBuilder();
-        TSOProto.FenceRequest.Builder fenceRequestBuilder = TSOProto.FenceRequest.newBuilder();
-        fenceRequestBuilder.setTableId(666);
-        fenceBuilder.setFenceRequest(fenceRequestBuilder.build());
-        TSOProto.Request r = fenceBuilder.build();
-        assertTrue(r.hasFenceRequest());
-        // Write into the channel
-        channel.write(fenceBuilder.build()).await();
-        verify(requestProcessor, timeout(100).never()).timestampRequest(any(Channel.class), any(MonitoringContext.class));
-        verify(requestProcessor, timeout(100).times(1))
-                .fenceRequest(eq(666L), any(Channel.class), any(MonitoringContext.class));
+                .commitRequest(eq(666L), anyCollectionOf(Long.class), eq(false), any(Channel.class), any(MonitoringContext.class));
     }
 
     // ----------------------------------------------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/test/java/org/apache/omid/tso/TestTimestampOracle.java
----------------------------------------------------------------------
diff --git a/tso-server/src/test/java/org/apache/omid/tso/TestTimestampOracle.java b/tso-server/src/test/java/org/apache/omid/tso/TestTimestampOracle.java
index a5f236c..c75e95b 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/TestTimestampOracle.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/TestTimestampOracle.java
@@ -19,7 +19,6 @@ package org.apache.omid.tso;
 
 import org.apache.omid.metrics.MetricsRegistry;
 import org.apache.omid.timestamp.storage.TimestampStorage;
-import org.apache.omid.transaction.AbstractTransactionManager;
 import org.mockito.InjectMocks;
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
@@ -52,8 +51,6 @@ public class TestTimestampOracle {
     private Panicker panicker;
     @Mock
     private TimestampStorage timestampStorage;
-    @Mock
-    TSOServerConfig config;
 
     // Component under test
     @InjectMocks
@@ -73,7 +70,7 @@ public class TestTimestampOracle {
         long last = timestampOracle.next();
         for (int i = 0; i < (3 * TimestampOracleImpl.TIMESTAMP_BATCH); i++) {
             long current = timestampOracle.next();
-            assertEquals(current, last + AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN, "Not monotonic growth");
+            assertEquals(current, last + 1, "Not monotonic growth");
             last = current;
         }
         assertTrue(timestampOracle.getLast() == last);

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/tso-server/src/test/java/org/apache/omid/tso/TestWorldTimeOracle.java
----------------------------------------------------------------------
diff --git a/tso-server/src/test/java/org/apache/omid/tso/TestWorldTimeOracle.java b/tso-server/src/test/java/org/apache/omid/tso/TestWorldTimeOracle.java
deleted file mode 100644
index df59530..0000000
--- a/tso-server/src/test/java/org/apache/omid/tso/TestWorldTimeOracle.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.tso;
-
-import org.apache.omid.metrics.MetricsRegistry;
-import org.apache.omid.timestamp.storage.TimestampStorage;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.util.concurrent.CountDownLatch;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.atLeastOnce;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.verify;
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertTrue;
-
-public class TestWorldTimeOracle {
-
-    private static final Logger LOG = LoggerFactory.getLogger(TestWorldTimeOracle.class);
-
-    @Mock
-    private MetricsRegistry metrics;
-    @Mock
-    private Panicker panicker;
-    @Mock
-    private TimestampStorage timestampStorage;
-    @Mock
-    private TSOServerConfig config;
-
-    // Component under test
-    @InjectMocks
-    private WorldClockOracleImpl worldClockOracle;
-
-    @BeforeMethod(alwaysRun = true, timeOut = 30_000)
-    public void initMocksAndComponents() {
-        MockitoAnnotations.initMocks(this);
-    }
-
-    @Test(timeOut = 30_000)
-    public void testMonotonicTimestampGrowth() throws Exception {
-
-        // Intialize component under test
-        worldClockOracle.initialize();
-
-        long last = worldClockOracle.next();
-        
-        int timestampIntervalSec = (int) (WorldClockOracleImpl.TIMESTAMP_INTERVAL_MS / 1000) * 2;
-        for (int i = 0; i < timestampIntervalSec; i++) {
-            long current = worldClockOracle.next();
-            assertTrue(current > last+1 , "Timestamp should be based on world time");
-            last = current;
-            Thread.sleep(1000);
-        }
-
-        assertTrue(worldClockOracle.getLast() == last);
-        LOG.info("Last timestamp: {}", last);
-    }
-
-    @Test(timeOut = 10_000)
-    public void testTimestampOraclePanicsWhenTheStorageHasProblems() throws Exception {
-
-        // Intialize component under test
-        worldClockOracle.initialize();
-
-        // Cause an exception when updating the max timestamp
-        final CountDownLatch updateMaxTimestampMethodCalled = new CountDownLatch(1);
-        doAnswer(new Answer() {
-            @Override
-            public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
-                updateMaxTimestampMethodCalled.countDown();
-                throw new RuntimeException("Out of memory or something");
-            }
-        }).when(timestampStorage).updateMaxTimestamp(anyLong(), anyLong());
-
-        // Make the previous exception to be thrown
-        Thread allocThread = new Thread("AllocThread") {
-            @Override
-            public void run() {
-                while (true) {
-                    worldClockOracle.next();
-                }
-            }
-        };
-        allocThread.start();
-
-        updateMaxTimestampMethodCalled.await();
-
-        // Verify that it has blown up
-        verify(panicker, atLeastOnce()).panic(anyString(), any(Throwable.class));
-    }
-
-}


[4/4] incubator-omid git commit: Revert to the state of the project at 0c371361781957c96b20295290a167f7be3b33e2

Posted by fp...@apache.org.
Revert to the state of the project at 0c371361781957c96b20295290a167f7be3b33e2

This reverts the state of the master branch to the state it was before Ohad's commits related to
Phoenix integration:

https://github.com/apache/incubator-omid/commit/0005b3603571b28e01e5fbd8943e23efc5ead781
https://github.com/apache/incubator-omid/commit/60c9e714fd862f17407202565ac1eb7011910e1e
https://github.com/apache/incubator-omid/commit/0cae2ff18f507f640edbe484e24000630856d3f0
https://github.com/apache/incubator-omid/commit/1f83aeda963472c861b4b622c08b3c8c725fbd16
https://github.com/apache/incubator-omid/commit/4c8d7821fbbad943f46f3dfc0ac51b0ea908f90f
https://github.com/apache/incubator-omid/commit/fab2cfebbd9f4c3d95a1d02a428c90c1dc649db5

Change-Id: Id94c40909e1522dbe01861eb1ccfef47fb4c0668


Project: http://git-wip-us.apache.org/repos/asf/incubator-omid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-omid/commit/7a9d7d6f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-omid/tree/7a9d7d6f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-omid/diff/7a9d7d6f

Branch: refs/heads/master
Commit: 7a9d7d6fd2aaf7975c39b342b5944bd699c681da
Parents: fab2cfe
Author: Francisco Perez-Sorrosal <fp...@apache.org>
Authored: Tue Jan 30 15:36:14 2018 -0800
Committer: Francisco Perez-Sorrosal <fp...@apache.org>
Committed: Tue Jan 30 15:37:03 2018 -0800

----------------------------------------------------------------------
 common/src/main/proto/TSOProto.proto            |  19 -
 .../transaction/AttributeSetSnapshotFilter.java |  85 ---
 .../apache/omid/transaction/HBaseCellId.java    |  22 +-
 .../HBaseOmidClientConfiguration.java           |  10 -
 .../transaction/HBaseSyncPostCommitter.java     |   2 +-
 .../omid/transaction/HBaseTransaction.java      |  10 +-
 .../transaction/HBaseTransactionClient.java     |   2 +
 .../transaction/HBaseTransactionManager.java    |  47 +-
 .../omid/transaction/HTableAccessWrapper.java   |  54 --
 .../apache/omid/transaction/SnapshotFilter.java |  50 --
 .../omid/transaction/SnapshotFilterImpl.java    | 508 ------------------
 .../org/apache/omid/transaction/TTable.java     | 345 ++++++++-----
 .../omid/transaction/TableAccessWrapper.java    |  34 --
 .../apache/omid/transaction/TestCheckpoint.java | 320 ------------
 .../omid/transaction/TestColumnIterator.java    |   2 +-
 .../transaction/TestHBaseTransactionClient.java |  60 ++-
 .../TestHBaseTransactionManager.java            |   2 +-
 .../omid/transaction/TestShadowCells.java       |   7 +-
 .../org/apache/omid/transaction/CellUtils.java  |   3 -
 hbase-coprocessor/pom.xml                       |   1 +
 .../hbase/regionserver/OmidRegionScanner.java   | 128 -----
 .../hbase/regionserver/RegionAccessWrapper.java |  59 ---
 .../omid/transaction/OmidSnapshotFilter.java    | 160 ------
 .../TSOForSnapshotFilterTestModule.java         | 134 -----
 .../omid/transaction/TestSnapshotFilter.java    | 514 -------------------
 .../hadoop/hbase/regionserver/Region.java       |   7 -
 .../omid/transaction/AbstractTransaction.java   | 108 +---
 .../transaction/AbstractTransactionManager.java | 140 +++--
 .../omid/transaction/TransactionManager.java    |  11 -
 .../java/org/apache/omid/tso/client/CellId.java |   3 -
 .../apache/omid/tso/client/MockTSOClient.java   |  68 +--
 .../tso/client/OmidClientConfiguration.java     |  12 -
 .../org/apache/omid/tso/client/TSOClient.java   | 129 +----
 .../org/apache/omid/tso/client/TSOProtocol.java |  14 -
 .../apache/omid/tso/util/DummyCellIdImpl.java   |  15 -
 .../src/main/resources/omid-client-config.yml   |   6 +-
 tso-server/pom.xml                              |   1 +
 .../main/java/org/apache/omid/tso/Batch.java    |  10 -
 .../java/org/apache/omid/tso/PersistEvent.java  |  12 +-
 .../apache/omid/tso/PersistenceProcessor.java   |   2 -
 .../omid/tso/PersistenceProcessorHandler.java   |   9 -
 .../omid/tso/PersistenceProcessorImpl.java      |  18 +-
 .../org/apache/omid/tso/ReplyProcessor.java     |  13 -
 .../org/apache/omid/tso/ReplyProcessorImpl.java |  19 -
 .../org/apache/omid/tso/RequestProcessor.java   |   3 +-
 .../apache/omid/tso/RequestProcessorImpl.java   | 121 +----
 .../org/apache/omid/tso/TSOChannelHandler.java  |   4 -
 .../java/org/apache/omid/tso/TSOModule.java     |  11 +-
 .../org/apache/omid/tso/TSOServerConfig.java    |  19 -
 .../apache/omid/tso/TimestampOracleImpl.java    |  10 +-
 .../apache/omid/tso/WorldClockOracleImpl.java   | 185 -------
 .../default-omid-server-configuration.yml       |   5 +-
 .../apache/omid/tso/TestRequestProcessor.java   |  42 +-
 .../omid/tso/TestTSOChannelHandlerNetty.java    |  22 +-
 .../apache/omid/tso/TestTimestampOracle.java    |   5 +-
 .../apache/omid/tso/TestWorldTimeOracle.java    | 119 -----
 ...tionOfTSOClientServerBasicFunctionality.java |  58 +--
 .../client/TestTSOClientConnectionToTSO.java    |   8 +-
 ...stTSOClientRequestAndResponseBehaviours.java |   4 +-
 .../TestTSOClientRowAndCellLevelConflict.java   | 203 --------
 60 files changed, 461 insertions(+), 3533 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/common/src/main/proto/TSOProto.proto
----------------------------------------------------------------------
diff --git a/common/src/main/proto/TSOProto.proto b/common/src/main/proto/TSOProto.proto
index b434421..749beaa 100644
--- a/common/src/main/proto/TSOProto.proto
+++ b/common/src/main/proto/TSOProto.proto
@@ -24,7 +24,6 @@ message Request {
     optional TimestampRequest timestampRequest = 1;
     optional CommitRequest commitRequest = 2;
     optional HandshakeRequest handshakeRequest = 3;
-    optional FenceRequest fenceRequest = 4;
 }
 
 message TimestampRequest {
@@ -34,29 +33,18 @@ message CommitRequest {
     optional int64 startTimestamp = 1;
     optional bool isRetry = 2 [default = false];
     repeated int64 cellId = 3;
-    repeated int64 TableId = 4;
-}
-
-message FenceRequest {
-    optional int64 TableId = 1;
 }
 
 message Response {
     optional TimestampResponse timestampResponse = 1;
     optional CommitResponse commitResponse = 2;
     optional HandshakeResponse handshakeResponse = 3;
-    optional FenceResponse fenceResponse = 4;
 }
 
 message TimestampResponse {
     optional int64 startTimestamp = 1;
 }
 
-message FenceResponse {
-    optional int64 TableId = 1;
-    optional int64 FenceId = 2;
-}
-
 message CommitResponse {
     optional bool aborted = 1;
     optional int64 startTimestamp = 2;
@@ -76,10 +64,3 @@ message HandshakeResponse {
     optional bool clientCompatible = 1;
     optional Capabilities serverCapabilities = 2;
 }
-
-message Transaction {
-    optional int64 timestamp = 1;
-    optional int64 readTimestamp = 2;
-    optional int32 visibilityLevel = 3;
-    optional int64 epoch = 4;
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/AttributeSetSnapshotFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/AttributeSetSnapshotFilter.java b/hbase-client/src/main/java/org/apache/omid/transaction/AttributeSetSnapshotFilter.java
deleted file mode 100644
index 6906939..0000000
--- a/hbase-client/src/main/java/org/apache/omid/transaction/AttributeSetSnapshotFilter.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.transaction;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.omid.committable.CommitTable.CommitTimestamp;
-import org.apache.omid.proto.TSOProto;
-
-import com.google.common.base.Optional;
-
-public class AttributeSetSnapshotFilter implements SnapshotFilter {
-
-    private HTableInterface table;
-
-    public AttributeSetSnapshotFilter(HTableInterface table) {
-        this.table = table;
-    }
-
-    private TSOProto.Transaction.Builder getBuilder(HBaseTransaction transaction) {
-        return TSOProto.Transaction.newBuilder().setTimestamp(transaction.getTransactionId())
-                .setReadTimestamp(transaction.getReadTimestamp())
-                .setVisibilityLevel(transaction.getVisibilityLevel().ordinal())
-                .setEpoch(transaction.getEpoch());
-    }
-
-    @Override
-    public Result get(TTable ttable, Get get, HBaseTransaction transaction) throws IOException {
-        get.setAttribute(CellUtils.TRANSACTION_ATTRIBUTE, getBuilder(transaction).build().toByteArray());
-        get.setAttribute(CellUtils.CLIENT_GET_ATTRIBUTE, Bytes.toBytes(true));
-
-        return table.get(get);
-    }
-
-    @Override
-    public ResultScanner getScanner(TTable ttable, Scan scan, HBaseTransaction transaction) throws IOException {
-        scan.setAttribute(CellUtils.TRANSACTION_ATTRIBUTE, getBuilder(transaction).build().toByteArray());
-
-        return table.getScanner(scan);
-    }
-
-    @Override
-    public List<Cell> filterCellsForSnapshot(List<Cell> rawCells, HBaseTransaction transaction,
-                                      int versionsToRequest, Map<String, List<Cell>> familyDeletionCache) throws IOException {
-        throw new UnsupportedOperationException();
-    }
-
-    public boolean isCommitted(HBaseCellId hBaseCellId, long epoch) throws TransactionException {
-        throw new UnsupportedOperationException();
-    }
-
-    public CommitTimestamp locateCellCommitTimestamp(long cellStartTimestamp, long epoch,
-            CommitTimestampLocator locator) throws IOException {
-        throw new UnsupportedOperationException();        
-    }
-
-    public Optional<CommitTimestamp> readCommitTimestampFromShadowCell(long cellStartTimestamp, CommitTimestampLocator locator)
-            throws IOException {
-        throw new UnsupportedOperationException();                
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/HBaseCellId.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseCellId.java b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseCellId.java
index 63e6376..8d0641b 100644
--- a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseCellId.java
+++ b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseCellId.java
@@ -17,9 +17,7 @@
  */
 package org.apache.omid.transaction;
 
-import com.google.common.hash.Hasher;
 import com.google.common.hash.Hashing;
-
 import org.apache.omid.tso.client.CellId;
 import org.apache.hadoop.hbase.client.HTableInterface;
 
@@ -71,7 +69,7 @@ public class HBaseCellId implements CellId {
 
     @Override
     public long getCellId() {
-        return getHasher()
+        return Hashing.murmur3_128().newHasher()
                 .putBytes(table.getTableName())
                 .putBytes(row)
                 .putBytes(family)
@@ -79,22 +77,4 @@ public class HBaseCellId implements CellId {
                 .hash().asLong();
     }
 
-    @Override
-    public long getTableId() {
-        return getHasher()
-                .putBytes(table.getTableName())
-                .hash().asLong();
-    }
-
-    @Override
-    public long getRowId() {
-        return getHasher()
-                .putBytes(table.getTableName())
-                .putBytes(row)
-                .hash().asLong();
-    }
-
-    public static Hasher getHasher() {
-        return Hashing.murmur3_128().newHasher();
-    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/HBaseOmidClientConfiguration.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseOmidClientConfiguration.java b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseOmidClientConfiguration.java
index d945688..f5a1823 100644
--- a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseOmidClientConfiguration.java
+++ b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseOmidClientConfiguration.java
@@ -20,11 +20,9 @@ package org.apache.omid.transaction;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.inject.Inject;
 import com.google.inject.name.Named;
-
 import org.apache.omid.YAMLUtils;
 import org.apache.omid.metrics.MetricsRegistry;
 import org.apache.omid.tools.hbase.SecureHBaseConfig;
-import org.apache.omid.tso.client.OmidClientConfiguration.ConflictDetectionLevel;
 import org.apache.omid.tso.client.OmidClientConfiguration.PostCommitMode;
 import org.apache.omid.tso.client.OmidClientConfiguration;
 import org.apache.hadoop.conf.Configuration;
@@ -75,14 +73,6 @@ public class HBaseOmidClientConfiguration extends SecureHBaseConfig {
         omidClientConfiguration.setPostCommitMode(postCommitMode);
     }
 
-    public ConflictDetectionLevel getConflictAnalysisLevel() {
-        return omidClientConfiguration.getConflictAnalysisLevel();
-    }
-
-    public void setConflictAnalysisLevel(ConflictDetectionLevel conflictAnalysisLevel) {
-        omidClientConfiguration.setConflictAnalysisLevel(conflictAnalysisLevel);
-    }
-
     public String getCommitTableName() {
         return commitTableName;
     }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/HBaseSyncPostCommitter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseSyncPostCommitter.java b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseSyncPostCommitter.java
index 06e5c89..952d067 100644
--- a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseSyncPostCommitter.java
+++ b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseSyncPostCommitter.java
@@ -66,7 +66,7 @@ public class HBaseSyncPostCommitter implements PostCommitActions {
                 Put put = new Put(cell.getRow());
                 put.add(cell.getFamily(),
                         CellUtils.addShadowCellSuffix(cell.getQualifier(), 0, cell.getQualifier().length),
-                        cell.getTimestamp(),
+                        tx.getStartTimestamp(),
                         Bytes.toBytes(tx.getCommitTimestamp()));
                 try {
                     cell.getTable().put(put);

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransaction.java b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransaction.java
index c32aff7..2bae5f5 100644
--- a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransaction.java
+++ b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransaction.java
@@ -19,32 +19,26 @@ package org.apache.omid.transaction;
 
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.omid.transaction.AbstractTransaction.VisibilityLevel;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Set;
 
 public class HBaseTransaction extends AbstractTransaction<HBaseCellId> {
     private static final Logger LOG = LoggerFactory.getLogger(HBaseTransaction.class);
 
-    public HBaseTransaction(long transactionId, long epoch, Set<HBaseCellId> writeSet, AbstractTransactionManager tm) {
+    HBaseTransaction(long transactionId, long epoch, Set<HBaseCellId> writeSet, AbstractTransactionManager tm) {
         super(transactionId, epoch, writeSet, tm);
     }
 
-    public HBaseTransaction(long transactionId, long readTimestamp, VisibilityLevel visibilityLevel, long epoch, Set<HBaseCellId> writeSet, AbstractTransactionManager tm) {
-        super(transactionId, readTimestamp, visibilityLevel, epoch, writeSet, tm);
-    }
-
     @Override
     public void cleanup() {
         Set<HBaseCellId> writeSet = getWriteSet();
         for (final HBaseCellId cell : writeSet) {
             Delete delete = new Delete(cell.getRow());
-            delete.deleteColumn(cell.getFamily(), cell.getQualifier(), cell.getTimestamp());
+            delete.deleteColumn(cell.getFamily(), cell.getQualifier(), getStartTimestamp());
             try {
                 cell.getTable().delete(delete);
             } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionClient.java b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionClient.java
index f5311fb..c5d4d59 100644
--- a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionClient.java
+++ b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionClient.java
@@ -18,5 +18,7 @@
 package org.apache.omid.transaction;
 
 public interface HBaseTransactionClient {
+    boolean isCommitted(HBaseCellId hBaseCellId) throws TransactionException;
+
     long getLowWatermark() throws TransactionException;
 }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionManager.java b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionManager.java
index 71d1f98..990758c 100644
--- a/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionManager.java
+++ b/hbase-client/src/main/java/org/apache/omid/transaction/HBaseTransactionManager.java
@@ -23,14 +23,12 @@ import com.google.common.collect.Maps;
 import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
 import org.apache.omid.committable.CommitTable;
 import org.apache.omid.committable.CommitTable.CommitTimestamp;
 import org.apache.omid.committable.hbase.HBaseCommitTable;
 import org.apache.omid.committable.hbase.HBaseCommitTableConfig;
 import org.apache.omid.tools.hbase.HBaseLogin;
 import org.apache.omid.tso.client.CellId;
-import org.apache.omid.tso.client.OmidClientConfiguration.ConflictDetectionLevel;
 import org.apache.omid.tso.client.TSOClient;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
@@ -197,9 +195,35 @@ public class HBaseTransactionManager extends AbstractTransactionManager implemen
         }
     }
 
+    // ----------------------------------------------------------------------------------------------------------------
+    // HBaseTransactionClient method implementations
+    // ----------------------------------------------------------------------------------------------------------------
+
     @Override
-    public long getHashForTable(byte[] tableName) {
-        return HBaseCellId.getHasher().putBytes(tableName).hash().asLong();
+    public boolean isCommitted(HBaseCellId hBaseCellId) throws TransactionException {
+        try {
+            CommitTimestamp tentativeCommitTimestamp =
+                    locateCellCommitTimestamp(hBaseCellId.getTimestamp(), tsoClient.getEpoch(),
+                                              new CommitTimestampLocatorImpl(hBaseCellId, Maps.<Long, Long>newHashMap()));
+
+            // If transaction that added the cell was invalidated
+            if (!tentativeCommitTimestamp.isValid()) {
+                return false;
+            }
+
+            switch (tentativeCommitTimestamp.getLocation()) {
+                case COMMIT_TABLE:
+                case SHADOW_CELL:
+                    return true;
+                case NOT_PRESENT:
+                    return false;
+                case CACHE: // cache was empty
+                default:
+                    return false;
+            }
+        } catch (IOException e) {
+            throw new TransactionException("Failure while checking if a transaction was committed", e);
+        }
     }
 
     @Override
@@ -229,27 +253,14 @@ public class HBaseTransactionManager extends AbstractTransactionManager implemen
 
     }
 
-    public ConflictDetectionLevel getConflictDetectionLevel() {
-        return tsoClient.getConflictDetectionLevel();
-    }
-
     static class CommitTimestampLocatorImpl implements CommitTimestampLocator {
 
         private HBaseCellId hBaseCellId;
         private final Map<Long, Long> commitCache;
-        private TableAccessWrapper tableAccessWrapper;
-
-        CommitTimestampLocatorImpl(HBaseCellId hBaseCellId, Map<Long, Long> commitCache, TableAccessWrapper tableAccessWrapper) {
-            this.hBaseCellId = hBaseCellId;
-            this.commitCache = commitCache;
-            this.tableAccessWrapper = tableAccessWrapper;
-        }
 
         CommitTimestampLocatorImpl(HBaseCellId hBaseCellId, Map<Long, Long> commitCache) {
             this.hBaseCellId = hBaseCellId;
             this.commitCache = commitCache;
-            this.tableAccessWrapper = null;
-            this.tableAccessWrapper = new HTableAccessWrapper(hBaseCellId.getTable(), hBaseCellId.getTable());
         }
 
         @Override
@@ -269,7 +280,7 @@ public class HBaseTransactionManager extends AbstractTransactionManager implemen
             get.addColumn(family, shadowCellQualifier);
             get.setMaxVersions(1);
             get.setTimeStamp(startTimestamp);
-            Result result = tableAccessWrapper.get(get);
+            Result result = hBaseCellId.getTable().get(get);
             if (result.containsColumn(family, shadowCellQualifier)) {
                 return Optional.of(Bytes.toLong(result.getValue(family, shadowCellQualifier)));
             }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/HTableAccessWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/HTableAccessWrapper.java b/hbase-client/src/main/java/org/apache/omid/transaction/HTableAccessWrapper.java
deleted file mode 100644
index 1994cad..0000000
--- a/hbase-client/src/main/java/org/apache/omid/transaction/HTableAccessWrapper.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.transaction;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-
-// This class wraps the HTableInterface object when doing client side filtering.
-public class HTableAccessWrapper implements TableAccessWrapper {
-
-    private final HTableInterface writeTable;
-    private final HTableInterface readTable;
-    
-    public HTableAccessWrapper(HTableInterface table, HTableInterface healerTable) {
-        this.readTable = table;
-        this.writeTable = healerTable;
-    }
-
-    @Override
-    public Result[] get(List<Get> get) throws IOException {
-        return readTable.get(get);
-    }
-
-    @Override
-    public Result get(Get get) throws IOException {
-        return readTable.get(get);
-    }
-
-    @Override
-    public void put(Put put) throws IOException {
-        writeTable.put(put);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/SnapshotFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/SnapshotFilter.java b/hbase-client/src/main/java/org/apache/omid/transaction/SnapshotFilter.java
deleted file mode 100644
index 112544f..0000000
--- a/hbase-client/src/main/java/org/apache/omid/transaction/SnapshotFilter.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.transaction;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.omid.committable.CommitTable.CommitTimestamp;
-
-import com.google.common.base.Optional;
-
-public interface SnapshotFilter {
-    
-    public Result get(TTable ttable, Get get, HBaseTransaction transaction) throws IOException;
-
-    public ResultScanner getScanner(TTable ttable, Scan scan, HBaseTransaction transaction) throws IOException;
-
-    public List<Cell> filterCellsForSnapshot(List<Cell> rawCells, HBaseTransaction transaction,
-            int versionsToRequest, Map<String, List<Cell>> familyDeletionCache) throws IOException;
-
-    public boolean isCommitted(HBaseCellId hBaseCellId, long epoch) throws TransactionException;
-
-    public CommitTimestamp locateCellCommitTimestamp(long cellStartTimestamp, long epoch,
-            CommitTimestampLocator locator) throws IOException;
-
-    public Optional<CommitTimestamp> readCommitTimestampFromShadowCell(long cellStartTimestamp, CommitTimestampLocator locator)
-            throws IOException;
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/SnapshotFilterImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/SnapshotFilterImpl.java b/hbase-client/src/main/java/org/apache/omid/transaction/SnapshotFilterImpl.java
deleted file mode 100644
index a656aec..0000000
--- a/hbase-client/src/main/java/org/apache/omid/transaction/SnapshotFilterImpl.java
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.transaction;
-
-import static org.apache.omid.committable.CommitTable.CommitTimestamp.Location.CACHE;
-import static org.apache.omid.committable.CommitTable.CommitTimestamp.Location.COMMIT_TABLE;
-import static org.apache.omid.committable.CommitTable.CommitTimestamp.Location.NOT_PRESENT;
-import static org.apache.omid.committable.CommitTable.CommitTimestamp.Location.SHADOW_CELL;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.omid.committable.CommitTable;
-import org.apache.omid.committable.CommitTable.CommitTimestamp;
-import org.apache.omid.proto.TSOProto;
-import org.apache.omid.transaction.AbstractTransaction.VisibilityLevel;
-import org.apache.omid.transaction.HBaseTransactionManager.CommitTimestampLocatorImpl;
-import org.apache.omid.transaction.TTable.TransactionalClientScanner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Function;
-import com.google.common.base.Optional;
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimaps;
-
-public class SnapshotFilterImpl implements SnapshotFilter {
-
-    private static Logger LOG = LoggerFactory.getLogger(SnapshotFilterImpl.class);
-
-    private TableAccessWrapper tableAccessWrapper;
-
-    private CommitTable.Client commitTableClient;
-    
-    public SnapshotFilterImpl(TableAccessWrapper tableAccessWrapper, CommitTable.Client commitTableClient) throws IOException {
-        this.tableAccessWrapper = tableAccessWrapper;
-        this.commitTableClient = commitTableClient;
-    }
-
-    public SnapshotFilterImpl(TableAccessWrapper tableAccessWrapper) throws IOException {
-        this(tableAccessWrapper, null);
-    }
-
-    public SnapshotFilterImpl(CommitTable.Client commitTableClient) throws IOException {
-        this(null, commitTableClient);
-    }
-
-    void setTableAccessWrapper(TableAccessWrapper tableAccessWrapper) {
-        this.tableAccessWrapper = tableAccessWrapper;
-    }
-
-    /**
-     * Check whether a cell was deleted using family deletion marker
-     *
-     * @param cell                The cell to check
-     * @param transaction         Defines the current snapshot
-     * @param familyDeletionCache Accumulates the family deletion markers to identify cells that deleted with a higher version
-     * @param commitCache         Holds shadow cells information
-     * @return Whether the cell was deleted
-     */
-    private boolean checkFamilyDeletionCache(Cell cell, HBaseTransaction transaction, Map<String, List<Cell>> familyDeletionCache, Map<Long, Long> commitCache) throws IOException {
-        List<Cell> familyDeletionCells = familyDeletionCache.get(Bytes.toString((cell.getRow())));
-        if (familyDeletionCells != null) {
-            for(Cell familyDeletionCell : familyDeletionCells) {
-                String family = Bytes.toString(cell.getFamily());
-                String familyDeletion = Bytes.toString(familyDeletionCell.getFamily());
-                if (family.equals(familyDeletion)) {
-                    Optional<Long> familyDeletionCommitTimestamp = getCommitTimestamp(familyDeletionCell, transaction, commitCache);
-                    if (familyDeletionCommitTimestamp.isPresent() && familyDeletionCommitTimestamp.get() >= cell.getTimestamp()) {
-                        return true;
-                    }
-                }
-            }
-        }
-        return false;
-    }
-
-    private void healShadowCell(Cell cell, long commitTimestamp) {
-        Put put = new Put(CellUtil.cloneRow(cell));
-        byte[] family = CellUtil.cloneFamily(cell);
-        byte[] shadowCellQualifier = CellUtils.addShadowCellSuffix(cell.getQualifierArray(),
-                                                                   cell.getQualifierOffset(),
-                                                                   cell.getQualifierLength());
-        put.add(family, shadowCellQualifier, cell.getTimestamp(), Bytes.toBytes(commitTimestamp));
-        try {
-            tableAccessWrapper.put(put);
-        } catch (IOException e) {
-            LOG.warn("Failed healing shadow cell for kv {}", cell, e);
-        }
-    }
-
-    /**
-     * Check if the transaction commit data is in the shadow cell
-     * @param cellStartTimestamp
-     *            the transaction start timestamp
-     *        locator
-     *            the timestamp locator
-     * @throws IOException
-     */
-    @Override
-    public Optional<CommitTimestamp> readCommitTimestampFromShadowCell(long cellStartTimestamp, CommitTimestampLocator locator)
-            throws IOException
-    {
-
-        Optional<CommitTimestamp> commitTS = Optional.absent();
-
-        Optional<Long> commitTimestamp = locator.readCommitTimestampFromShadowCell(cellStartTimestamp);
-        if (commitTimestamp.isPresent()) {
-            commitTS = Optional.of(new CommitTimestamp(SHADOW_CELL, commitTimestamp.get(), true)); // Valid commit TS
-        }
-
-        return commitTS;
-    }
-
-    /**
-     * This function returns the commit timestamp for a particular cell if the transaction was already committed in
-     * the system. In case the transaction was not committed and the cell was written by transaction initialized by a
-     * previous TSO server, an invalidation try occurs.
-     * Otherwise the function returns a value that indicates that the commit timestamp was not found.
-     * @param cellStartTimestamp
-     *          start timestamp of the cell to locate the commit timestamp for.
-     * @param epoch
-     *          the epoch of the TSO server the current tso client is working with.
-     * @param locator
-     *          a locator to find the commit timestamp in the system.
-     * @return the commit timestamp joint with the location where it was found
-     *         or an object indicating that it was not found in the system
-     * @throws IOException  in case of any I/O issues
-     */
-    @Override
-    public CommitTimestamp locateCellCommitTimestamp(long cellStartTimestamp, long epoch,
-                                                     CommitTimestampLocator locator) throws IOException {
-
-        try {
-            // 1) First check the cache
-            Optional<Long> commitTimestamp = locator.readCommitTimestampFromCache(cellStartTimestamp);
-            if (commitTimestamp.isPresent()) { // Valid commit timestamp
-                return new CommitTimestamp(CACHE, commitTimestamp.get(), true);
-            }
-
-            // 2) Then check the commit table
-            // If the data was written at a previous epoch, check whether the transaction was invalidated
-            Optional<CommitTimestamp> commitTimeStamp = commitTableClient.getCommitTimestamp(cellStartTimestamp).get();
-            if (commitTimeStamp.isPresent()) {
-                return commitTimeStamp.get();
-            }
-
-            // 3) Read from shadow cell
-            commitTimeStamp = readCommitTimestampFromShadowCell(cellStartTimestamp, locator);
-            if (commitTimeStamp.isPresent()) {
-                return commitTimeStamp.get();
-            }
-
-            // 4) Check the epoch and invalidate the entry
-            // if the data was written by a transaction from a previous epoch (previous TSO)
-            if (cellStartTimestamp < epoch) {
-                boolean invalidated = commitTableClient.tryInvalidateTransaction(cellStartTimestamp).get();
-                if (invalidated) { // Invalid commit timestamp
-                    return new CommitTimestamp(COMMIT_TABLE, CommitTable.INVALID_TRANSACTION_MARKER, false);
-                }
-            }
-
-            // 5) We did not manage to invalidate the transactions then check the commit table
-            commitTimeStamp = commitTableClient.getCommitTimestamp(cellStartTimestamp).get();
-            if (commitTimeStamp.isPresent()) {
-                return commitTimeStamp.get();
-            }
-
-            // 6) Read from shadow cell
-            commitTimeStamp = readCommitTimestampFromShadowCell(cellStartTimestamp, locator);
-            if (commitTimeStamp.isPresent()) {
-                return commitTimeStamp.get();
-            }
-
-            // *) Otherwise return not found
-            return new CommitTimestamp(NOT_PRESENT, -1L /** TODO Check if we should return this */, true);
-        } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            throw new IOException("Interrupted while finding commit timestamp", e);
-        } catch (ExecutionException e) {
-            throw new IOException("Problem finding commit timestamp", e);
-        }
-
-    }
-
-    private Optional<Long> tryToLocateCellCommitTimestamp(long epoch,
-            Cell cell,
-            Map<Long, Long> commitCache)
-                    throws IOException {
-
-        CommitTimestamp tentativeCommitTimestamp =
-                locateCellCommitTimestamp(
-                        cell.getTimestamp(),
-                        epoch,
-                        new CommitTimestampLocatorImpl(
-                                new HBaseCellId(null,
-                                        CellUtil.cloneRow(cell),
-                                        CellUtil.cloneFamily(cell),
-                                        CellUtil.cloneQualifier(cell),
-                                        cell.getTimestamp()),
-                                        commitCache,
-                                        tableAccessWrapper));
-
-        // If transaction that added the cell was invalidated
-        if (!tentativeCommitTimestamp.isValid()) {
-            return Optional.absent();
-        }
-
-        switch (tentativeCommitTimestamp.getLocation()) {
-        case COMMIT_TABLE:
-            // If the commit timestamp is found in the persisted commit table,
-            // that means the writing process of the shadow cell in the post
-            // commit phase of the client probably failed, so we heal the shadow
-            // cell with the right commit timestamp for avoiding further reads to
-            // hit the storage
-            healShadowCell(cell, tentativeCommitTimestamp.getValue());
-            return Optional.of(tentativeCommitTimestamp.getValue());
-        case CACHE:
-        case SHADOW_CELL:
-            return Optional.of(tentativeCommitTimestamp.getValue());
-        case NOT_PRESENT:
-            return Optional.absent();
-        default:
-            assert (false);
-            return Optional.absent();
-        }
-    }
-    
-    
-    private Optional<Long> getCommitTimestamp(Cell kv, HBaseTransaction transaction, Map<Long, Long> commitCache)
-            throws IOException {
-
-        long startTimestamp = transaction.getStartTimestamp();
-
-        if (kv.getTimestamp() == startTimestamp) {
-            return Optional.of(startTimestamp);
-        }
-
-        if (commitTableClient == null) {
-            assert (transaction.getTransactionManager() != null);
-            commitTableClient = transaction.getTransactionManager().getCommitTableClient();
-        }
-
-        return tryToLocateCellCommitTimestamp(transaction.getEpoch(), kv,
-                commitCache);
-    }
-    
-    private Map<Long, Long> buildCommitCache(List<Cell> rawCells) {
-
-        Map<Long, Long> commitCache = new HashMap<>();
-
-        for (Cell cell : rawCells) {
-            if (CellUtils.isShadowCell(cell)) {
-                commitCache.put(cell.getTimestamp(), Bytes.toLong(CellUtil.cloneValue(cell)));
-            }
-        }
-
-        return commitCache;
-    }
-
-    private void buildFamilyDeletionCache(List<Cell> rawCells, Map<String, List<Cell>> familyDeletionCache) {
-
-        for (Cell cell : rawCells) {
-            if (CellUtil.matchingQualifier(cell, CellUtils.FAMILY_DELETE_QUALIFIER) &&
-                    CellUtil.matchingValue(cell, HConstants.EMPTY_BYTE_ARRAY)) {
-
-                String row = Bytes.toString(cell.getRow());
-                List<Cell> cells = familyDeletionCache.get(row);
-                if (cells == null) {
-                    cells = new ArrayList<>();
-                    familyDeletionCache.put(row, cells);
-                }
-
-                cells.add(cell);
-            }
-        }
-
-    }
-
-    private boolean isCellInTransaction(Cell kv, HBaseTransaction transaction, Map<Long, Long> commitCache) {
-
-        long startTimestamp = transaction.getStartTimestamp();
-        long readTimestamp = transaction.getReadTimestamp();
-
-        // A cell was written by a transaction if its timestamp is larger than its startTimestamp and smaller or equal to its readTimestamp.
-        // There also might be a case where the cell was written by the transaction and its timestamp equals to its writeTimestamp, however,
-        // this case occurs after checkpoint and in this case we do not want to read this data.
-        if (kv.getTimestamp() >= startTimestamp && kv.getTimestamp() <= readTimestamp) {
-            return true;
-        }
-
-        return false;
-    }
-
-    private boolean isCellInSnapshot(Cell kv, HBaseTransaction transaction, Map<Long, Long> commitCache)
-        throws IOException {
-
-        Optional<Long> commitTimestamp = getCommitTimestamp(kv, transaction, commitCache);
-
-        return commitTimestamp.isPresent() && commitTimestamp.get() < transaction.getStartTimestamp();
-    }
-
-    private Get createPendingGet(Cell cell, int versionCount) throws IOException {
-
-        Get pendingGet = new Get(CellUtil.cloneRow(cell));
-        pendingGet.addColumn(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell));
-        pendingGet.addColumn(CellUtil.cloneFamily(cell), CellUtils.addShadowCellSuffix(cell.getQualifierArray(),
-                                                                                       cell.getQualifierOffset(),
-                                                                                       cell.getQualifierLength()));
-        pendingGet.setMaxVersions(versionCount);
-        pendingGet.setTimeRange(0, cell.getTimestamp());
-
-        return pendingGet;
-    }
-
-    /**
-     * Filters the raw results returned from HBase and returns only those belonging to the current snapshot, as defined
-     * by the transaction object. If the raw results don't contain enough information for a particular qualifier, it
-     * will request more versions from HBase.
-     *
-     * @param rawCells          Raw cells that we are going to filter
-     * @param transaction       Defines the current snapshot
-     * @param versionsToRequest Number of versions requested from hbase
-     * @param familyDeletionCache Accumulates the family deletion markers to identify cells that deleted with a higher version
-     * @return Filtered KVs belonging to the transaction snapshot
-     */
-    @Override
-    public List<Cell> filterCellsForSnapshot(List<Cell> rawCells, HBaseTransaction transaction,
-                                      int versionsToRequest, Map<String, List<Cell>> familyDeletionCache) throws IOException {
-
-        assert (rawCells != null && transaction != null && versionsToRequest >= 1);
-
-        List<Cell> keyValuesInSnapshot = new ArrayList<>();
-        List<Get> pendingGetsList = new ArrayList<>();
-
-        int numberOfVersionsToFetch = versionsToRequest * 2;
-        if (numberOfVersionsToFetch < 1) {
-            numberOfVersionsToFetch = versionsToRequest;
-        }
-
-        Map<Long, Long> commitCache = buildCommitCache(rawCells);
-        buildFamilyDeletionCache(rawCells, familyDeletionCache);
-
-        for (Collection<Cell> columnCells : groupCellsByColumnFilteringShadowCellsAndFamilyDeletion(rawCells)) {
-            boolean snapshotValueFound = false;
-            Cell oldestCell = null;
-            for (Cell cell : columnCells) {
-                snapshotValueFound = checkFamilyDeletionCache(cell, transaction, familyDeletionCache, commitCache);
-
-                if (snapshotValueFound == true) {
-                    if (transaction.getVisibilityLevel() == VisibilityLevel.SNAPSHOT_ALL) {
-                        snapshotValueFound = false;
-                    } else {
-                        break;
-                    }
-                }
-
-                if (isCellInTransaction(cell, transaction, commitCache) ||
-                    isCellInSnapshot(cell, transaction, commitCache)) {
-                    if (!CellUtil.matchingValue(cell, CellUtils.DELETE_TOMBSTONE)) {
-                        keyValuesInSnapshot.add(cell);
-                    }
-
-                    // We can finish looking for additional results in two cases:
-                    // 1. if we found a result and we are not in SNAPSHOT_ALL mode.
-                    // 2. if we found a result that was not written by the current transaction.
-                    if (transaction.getVisibilityLevel() != VisibilityLevel.SNAPSHOT_ALL ||
-                        !isCellInTransaction(cell, transaction, commitCache)) {
-                        snapshotValueFound = true;
-                        break;
-                    }
-                }
-                oldestCell = cell;
-            }
-            if (!snapshotValueFound) {
-                assert (oldestCell != null);
-                Get pendingGet = createPendingGet(oldestCell, numberOfVersionsToFetch);
-                pendingGetsList.add(pendingGet);
-            }
-        }
-
-        if (!pendingGetsList.isEmpty()) {
-            Result[] pendingGetsResults = tableAccessWrapper.get(pendingGetsList);
-            for (Result pendingGetResult : pendingGetsResults) {
-                if (!pendingGetResult.isEmpty()) {
-                    keyValuesInSnapshot.addAll(
-                        filterCellsForSnapshot(pendingGetResult.listCells(), transaction, numberOfVersionsToFetch, familyDeletionCache));
-                }
-            }
-        }
-
-        Collections.sort(keyValuesInSnapshot, KeyValue.COMPARATOR);
-
-        return keyValuesInSnapshot;
-    }
-
-    @Override
-    public Result get(TTable ttable, Get get, HBaseTransaction transaction) throws IOException {
-        Result result = tableAccessWrapper.get(get);
-
-        List<Cell> filteredKeyValues = Collections.emptyList();
-        if (!result.isEmpty()) {
-            filteredKeyValues = ttable.filterCellsForSnapshot(result.listCells(), transaction, get.getMaxVersions(), new HashMap<String, List<Cell>>());
-        }
-
-        return Result.create(filteredKeyValues);
-    }
-
-    @Override
-    public ResultScanner getScanner(TTable ttable, Scan scan, HBaseTransaction transaction) throws IOException {
-
-        return ttable.new TransactionalClientScanner(transaction, scan, 1);
-
-    }
-
-    @Override
-    public boolean isCommitted(HBaseCellId hBaseCellId, long epoch) throws TransactionException {
-        try {
-            long timestamp = hBaseCellId.getTimestamp() - (hBaseCellId.getTimestamp() % AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN);
-            CommitTimestamp tentativeCommitTimestamp =
-                    locateCellCommitTimestamp(timestamp, epoch,
-                                              new CommitTimestampLocatorImpl(hBaseCellId, Maps.<Long, Long>newHashMap(), tableAccessWrapper));
-
-            // If transaction that added the cell was invalidated
-            if (!tentativeCommitTimestamp.isValid()) {
-                return false;
-            }
-
-            switch (tentativeCommitTimestamp.getLocation()) {
-                case COMMIT_TABLE:
-                case SHADOW_CELL:
-                    return true;
-                case NOT_PRESENT:
-                    return false;
-                case CACHE: // cache was empty
-                default:
-                    return false;
-            }
-        } catch (IOException e) {
-            throw new TransactionException("Failure while checking if a transaction was committed", e);
-        }
-    }
-
-    static ImmutableList<Collection<Cell>> groupCellsByColumnFilteringShadowCellsAndFamilyDeletion(List<Cell> rawCells) {
-
-        Predicate<Cell> shadowCellAndFamilyDeletionFilter = new Predicate<Cell>() {
-
-            @Override
-            public boolean apply(Cell cell) {
-                boolean familyDeletionMarkerCondition = CellUtil.matchingQualifier(cell, CellUtils.FAMILY_DELETE_QUALIFIER) &&
-                                                        CellUtil.matchingValue(cell, HConstants.EMPTY_BYTE_ARRAY);
-
-                return cell != null && !CellUtils.isShadowCell(cell) && !familyDeletionMarkerCondition;
-            }
-
-        };
-
-        Function<Cell, ColumnWrapper> cellToColumnWrapper = new Function<Cell, ColumnWrapper>() {
-
-            @Override
-            public ColumnWrapper apply(Cell cell) {
-                return new ColumnWrapper(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell));
-            }
-
-        };
-
-        return Multimaps.index(Iterables.filter(rawCells, shadowCellAndFamilyDeletionFilter), cellToColumnWrapper)
-            .asMap().values()
-            .asList();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/TTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/TTable.java b/hbase-client/src/main/java/org/apache/omid/transaction/TTable.java
index 12dfb71..9b82148 100644
--- a/hbase-client/src/main/java/org/apache/omid/transaction/TTable.java
+++ b/hbase-client/src/main/java/org/apache/omid/transaction/TTable.java
@@ -17,8 +17,12 @@
  */
 package org.apache.omid.transaction;
 
-import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
 import com.google.common.base.Optional;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Multimaps;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
@@ -39,16 +43,15 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.omid.committable.CommitTable;
 import org.apache.omid.committable.CommitTable.CommitTimestamp;
-import org.apache.omid.proto.TSOProto;
-import org.apache.omid.tso.client.OmidClientConfiguration.ConflictDetectionLevel;
+import org.apache.omid.transaction.HBaseTransactionManager.CommitTimestampLocatorImpl;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -57,7 +60,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.NavigableMap;
 import java.util.NavigableSet;
-import java.util.Set;
 
 /**
  * Provides transactional methods for accessing and modifying a given snapshot of data identified by an opaque {@link
@@ -71,8 +73,6 @@ public class TTable implements Closeable {
 
     private HTableInterface table;
 
-    private SnapshotFilter snapshotFilter;
-
     // ----------------------------------------------------------------------------------------------------------------
     // Construction
     // ----------------------------------------------------------------------------------------------------------------
@@ -81,10 +81,6 @@ public class TTable implements Closeable {
         this(new HTable(conf, tableName));
     }
 
-    public TTable(Configuration conf, byte[] tableName, CommitTable.Client commitTableClient) throws IOException {
-        this(new HTable(conf, tableName), commitTableClient);
-    }
-
     public TTable(String tableName) throws IOException {
         this(HBaseConfiguration.create(), Bytes.toBytes(tableName));
     }
@@ -93,40 +89,16 @@ public class TTable implements Closeable {
         this(conf, Bytes.toBytes(tableName));
     }
 
-    public TTable(Configuration conf, String tableName, CommitTable.Client commitTableClient) throws IOException {
-        this(conf, Bytes.toBytes(tableName), commitTableClient);
-    }
-
     public TTable(HTableInterface hTable) throws IOException {
         table = hTable;
         healerTable = new HTable(table.getConfiguration(), table.getTableName());
-        boolean serverSideFilter = table.getConfiguration().getBoolean("omid.server.side.filter", false);
-        snapshotFilter = (serverSideFilter) ?  new AttributeSetSnapshotFilter(hTable) : new SnapshotFilterImpl(new HTableAccessWrapper(hTable, healerTable));
-    }
-
-    public TTable(HTableInterface hTable, CommitTable.Client commitTableClient) throws IOException {
-        table = hTable;
-        healerTable = new HTable(table.getConfiguration(), table.getTableName());
-        boolean serverSideFilter = table.getConfiguration().getBoolean("omid.server.side.filter", false);
-        snapshotFilter = (serverSideFilter) ?  new AttributeSetSnapshotFilter(hTable) : new SnapshotFilterImpl(new HTableAccessWrapper(hTable, healerTable), commitTableClient);
     }
 
     public TTable(HTableInterface hTable, HTableInterface healerTable) throws IOException {
         table = hTable;
         this.healerTable = healerTable;
-        Configuration config = table.getConfiguration();
-        boolean serverSideFilter = (config == null) ? false : config.getBoolean("omid.server.side.filter", false);
-        snapshotFilter = (serverSideFilter) ?  new AttributeSetSnapshotFilter(hTable) : new SnapshotFilterImpl(new HTableAccessWrapper(hTable, healerTable));
     }
 
-    public TTable(HTableInterface hTable, HTableInterface healerTable, CommitTable.Client commitTableClient) throws IOException {
-        table = hTable;
-        this.healerTable = healerTable;
-        boolean serverSideFilter = table.getConfiguration().getBoolean("omid.server.side.filter", false);
-        snapshotFilter = (serverSideFilter) ?  new AttributeSetSnapshotFilter(hTable) : new SnapshotFilterImpl(new HTableAccessWrapper(hTable, healerTable), commitTableClient);
-    }
-
-
     // ----------------------------------------------------------------------------------------------------------------
     // Closeable implementation
     // ----------------------------------------------------------------------------------------------------------------
@@ -160,7 +132,7 @@ public class TTable implements Closeable {
 
         HBaseTransaction transaction = enforceHBaseTransactionAsParam(tx);
 
-        final long readTimestamp = transaction.getReadTimestamp();
+        final long readTimestamp = transaction.getStartTimestamp();
         final Get tsget = new Get(get.getRow()).setFilter(get.getFilter());
         TimeRange timeRange = get.getTimeRange();
         long startTime = timeRange.getMin();
@@ -177,41 +149,19 @@ public class TTable implements Closeable {
                     tsget.addColumn(family, qualifier);
                     tsget.addColumn(family, CellUtils.addShadowCellSuffix(qualifier));
                 }
-                tsget.addColumn(family, CellUtils.FAMILY_DELETE_QUALIFIER);
-                tsget.addColumn(family, CellUtils.addShadowCellSuffix(CellUtils.FAMILY_DELETE_QUALIFIER));
             }
         }
         LOG.trace("Initial Get = {}", tsget);
 
-        return snapshotFilter.get(this, tsget, transaction);
-    }
-
-    private void familyQualifierBasedDeletion(HBaseTransaction tx, Put deleteP, Get deleteG) throws IOException {
-        Result result = this.get(tx, deleteG);
+        // Return the KVs that belong to the transaction snapshot, ask for more
+        // versions if needed
+        Result result = table.get(tsget);
+        List<Cell> filteredKeyValues = Collections.emptyList();
         if (!result.isEmpty()) {
-            for (Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> entryF : result.getMap()
-                    .entrySet()) {
-                byte[] family = entryF.getKey();
-                for (Entry<byte[], NavigableMap<Long, byte[]>> entryQ : entryF.getValue().entrySet()) {
-                    byte[] qualifier = entryQ.getKey();
-                    tx.addWriteSetElement(new HBaseCellId(table, deleteP.getRow(), family, qualifier,
-                            tx.getWriteTimestamp()));
-                }
-                deleteP.add(family, CellUtils.FAMILY_DELETE_QUALIFIER, tx.getWriteTimestamp(),
-                        HConstants.EMPTY_BYTE_ARRAY);
-            }
+            filteredKeyValues = filterCellsForSnapshot(result.listCells(), transaction, tsget.getMaxVersions());
         }
-    }
-
-    private void  familyQualifierBasedDeletionWithOutRead(HBaseTransaction tx, Put deleteP, Get deleteG) {
-        Set<byte[]> fset = deleteG.getFamilyMap().keySet();
 
-        for (byte[] family : fset) {
-            deleteP.add(family, CellUtils.FAMILY_DELETE_QUALIFIER, tx.getWriteTimestamp(),
-                    HConstants.EMPTY_BYTE_ARRAY);
-        }
-        tx.addWriteSetElement(new HBaseCellId(table, deleteP.getRow(), null, null,
-                tx.getWriteTimestamp()));
+        return Result.create(filteredKeyValues);
     }
 
     /**
@@ -227,48 +177,47 @@ public class TTable implements Closeable {
 
         HBaseTransaction transaction = enforceHBaseTransactionAsParam(tx);
 
-        final long writeTimestamp = transaction.getStartTimestamp();
-        boolean deleteFamily = false;
+        final long startTimestamp = transaction.getStartTimestamp();
+        boolean issueGet = false;
 
-        final Put deleteP = new Put(delete.getRow(), writeTimestamp);
+        final Put deleteP = new Put(delete.getRow(), startTimestamp);
         final Get deleteG = new Get(delete.getRow());
         Map<byte[], List<Cell>> fmap = delete.getFamilyCellMap();
         if (fmap.isEmpty()) {
-            familyQualifierBasedDeletion(transaction, deleteP, deleteG);
+            issueGet = true;
         }
-
         for (List<Cell> cells : fmap.values()) {
             for (Cell cell : cells) {
-                CellUtils.validateCell(cell, writeTimestamp);
+                CellUtils.validateCell(cell, startTimestamp);
                 switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
                     case DeleteColumn:
                         deleteP.add(CellUtil.cloneFamily(cell),
                                     CellUtil.cloneQualifier(cell),
-                                    writeTimestamp,
+                                    startTimestamp,
                                     CellUtils.DELETE_TOMBSTONE);
                         transaction.addWriteSetElement(
                             new HBaseCellId(table,
                                             delete.getRow(),
                                             CellUtil.cloneFamily(cell),
                                             CellUtil.cloneQualifier(cell),
-                                            writeTimestamp));
+                                            cell.getTimestamp()));
                         break;
                     case DeleteFamily:
                         deleteG.addFamily(CellUtil.cloneFamily(cell));
-                        deleteFamily = true;
+                        issueGet = true;
                         break;
                     case Delete:
                         if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP) {
                             deleteP.add(CellUtil.cloneFamily(cell),
                                         CellUtil.cloneQualifier(cell),
-                                        writeTimestamp,
+                                        startTimestamp,
                                         CellUtils.DELETE_TOMBSTONE);
                             transaction.addWriteSetElement(
                                 new HBaseCellId(table,
                                                 delete.getRow(),
                                                 CellUtil.cloneFamily(cell),
                                                 CellUtil.cloneQualifier(cell),
-                                                writeTimestamp));
+                                                cell.getTimestamp()));
                             break;
                         } else {
                             throw new UnsupportedOperationException(
@@ -279,11 +228,21 @@ public class TTable implements Closeable {
                 }
             }
         }
-        if (deleteFamily) {
-            if (enforceHBaseTransactionManagerAsParam(transaction.getTransactionManager()).getConflictDetectionLevel() == ConflictDetectionLevel.ROW) {
-                familyQualifierBasedDeletionWithOutRead(transaction, deleteP, deleteG);
-            } else {
-                familyQualifierBasedDeletion(transaction, deleteP, deleteG);
+        if (issueGet) {
+            // It's better to perform a transactional get to avoid deleting more
+            // than necessary
+            Result result = this.get(transaction, deleteG);
+            if (!result.isEmpty()) {
+                for (Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> entryF : result.getMap()
+                    .entrySet()) {
+                    byte[] family = entryF.getKey();
+                    for (Entry<byte[], NavigableMap<Long, byte[]>> entryQ : entryF.getValue().entrySet()) {
+                        byte[] qualifier = entryQ.getKey();
+                        deleteP.add(family, qualifier, CellUtils.DELETE_TOMBSTONE);
+                        transaction.addWriteSetElement(new HBaseCellId(table, delete.getRow(), family, qualifier,
+                                                                       transaction.getStartTimestamp()));
+                    }
+                }
             }
         }
 
@@ -306,19 +265,18 @@ public class TTable implements Closeable {
 
         HBaseTransaction transaction = enforceHBaseTransactionAsParam(tx);
 
-        final long writeTimestamp = transaction.getWriteTimestamp();
-
+        final long startTimestamp = transaction.getStartTimestamp();
         // create put with correct ts
-        final Put tsput = new Put(put.getRow(), writeTimestamp);
+        final Put tsput = new Put(put.getRow(), startTimestamp);
         Map<byte[], List<Cell>> kvs = put.getFamilyCellMap();
         for (List<Cell> kvl : kvs.values()) {
             for (Cell c : kvl) {
-                CellUtils.validateCell(c, writeTimestamp);
+                CellUtils.validateCell(c, startTimestamp);
                 // Reach into keyvalue to update timestamp.
                 // It's not nice to reach into keyvalue internals,
                 // but we want to avoid having to copy the whole thing
                 KeyValue kv = KeyValueUtil.ensureKeyValue(c);
-                Bytes.putLong(kv.getValueArray(), kv.getTimestampOffset(), writeTimestamp);
+                Bytes.putLong(kv.getValueArray(), kv.getTimestampOffset(), startTimestamp);
                 tsput.add(kv);
 
                 transaction.addWriteSetElement(
@@ -349,7 +307,7 @@ public class TTable implements Closeable {
 
         Scan tsscan = new Scan(scan);
         tsscan.setMaxVersions(1);
-        tsscan.setTimeRange(0, transaction.getReadTimestamp() + 1);
+        tsscan.setTimeRange(0, transaction.getStartTimestamp() + 1);
         Map<byte[], NavigableSet<byte[]>> kvs = scan.getFamilyMap();
         for (Map.Entry<byte[], NavigableSet<byte[]>> entry : kvs.entrySet()) {
             byte[] family = entry.getKey();
@@ -360,34 +318,181 @@ public class TTable implements Closeable {
             for (byte[] qualifier : qualifiers) {
                 tsscan.addColumn(family, CellUtils.addShadowCellSuffix(qualifier));
             }
-            if (!qualifiers.isEmpty()) {
-                tsscan.addColumn(entry.getKey(), CellUtils.FAMILY_DELETE_QUALIFIER);
+        }
+        return new TransactionalClientScanner(transaction, tsscan, 1);
+    }
+
+    /**
+     * Filters the raw results returned from HBase and returns only those belonging to the current snapshot, as defined
+     * by the transaction object. If the raw results don't contain enough information for a particular qualifier, it
+     * will request more versions from HBase.
+     *
+     * @param rawCells          Raw cells that we are going to filter
+     * @param transaction       Defines the current snapshot
+     * @param versionsToRequest Number of versions requested from hbase
+     * @return Filtered KVs belonging to the transaction snapshot
+     */
+    List<Cell> filterCellsForSnapshot(List<Cell> rawCells, HBaseTransaction transaction,
+                                      int versionsToRequest) throws IOException {
+
+        assert (rawCells != null && transaction != null && versionsToRequest >= 1);
+
+        List<Cell> keyValuesInSnapshot = new ArrayList<>();
+        List<Get> pendingGetsList = new ArrayList<>();
+
+        int numberOfVersionsToFetch = versionsToRequest * 2;
+        if (numberOfVersionsToFetch < 1) {
+            numberOfVersionsToFetch = versionsToRequest;
+        }
+
+        Map<Long, Long> commitCache = buildCommitCache(rawCells);
+
+        for (Collection<Cell> columnCells : groupCellsByColumnFilteringShadowCells(rawCells)) {
+            boolean snapshotValueFound = false;
+            Cell oldestCell = null;
+            for (Cell cell : columnCells) {
+                if (isCellInSnapshot(cell, transaction, commitCache)) {
+                    if (!CellUtil.matchingValue(cell, CellUtils.DELETE_TOMBSTONE)) {
+                        keyValuesInSnapshot.add(cell);
+                    }
+                    snapshotValueFound = true;
+                    break;
+                }
+                oldestCell = cell;
+            }
+            if (!snapshotValueFound) {
+                assert (oldestCell != null);
+                Get pendingGet = createPendingGet(oldestCell, numberOfVersionsToFetch);
+                pendingGetsList.add(pendingGet);
+            }
+        }
+
+        if (!pendingGetsList.isEmpty()) {
+            Result[] pendingGetsResults = table.get(pendingGetsList);
+            for (Result pendingGetResult : pendingGetsResults) {
+                if (!pendingGetResult.isEmpty()) {
+                    keyValuesInSnapshot.addAll(
+                        filterCellsForSnapshot(pendingGetResult.listCells(), transaction, numberOfVersionsToFetch));
+                }
             }
         }
 
-        return snapshotFilter.getScanner(this, tsscan, transaction);
+        Collections.sort(keyValuesInSnapshot, KeyValue.COMPARATOR);
+
+        assert (keyValuesInSnapshot.size() <= rawCells.size());
+        return keyValuesInSnapshot;
     }
 
-    
-    List<Cell> filterCellsForSnapshot(List<Cell> rawCells, HBaseTransaction transaction,
-                                      int versionsToRequest, Map<String, List<Cell>> familyDeletionCache) throws IOException {
-        return snapshotFilter.filterCellsForSnapshot(rawCells, transaction, versionsToRequest, familyDeletionCache);
+    private Map<Long, Long> buildCommitCache(List<Cell> rawCells) {
+
+        Map<Long, Long> commitCache = new HashMap<>();
+
+        for (Cell cell : rawCells) {
+            if (CellUtils.isShadowCell(cell)) {
+                commitCache.put(cell.getTimestamp(), Bytes.toLong(CellUtil.cloneValue(cell)));
+            }
+        }
+
+        return commitCache;
     }
 
+    private boolean isCellInSnapshot(Cell kv, HBaseTransaction transaction, Map<Long, Long> commitCache)
+        throws IOException {
+
+        long startTimestamp = transaction.getStartTimestamp();
 
-    public class TransactionalClientScanner implements ResultScanner {
+        if (kv.getTimestamp() == startTimestamp) {
+            return true;
+        }
+
+        Optional<Long> commitTimestamp =
+            tryToLocateCellCommitTimestamp(transaction.getTransactionManager(), transaction.getEpoch(), kv,
+                                           commitCache);
+
+        return commitTimestamp.isPresent() && commitTimestamp.get() < startTimestamp;
+    }
+
+    private Get createPendingGet(Cell cell, int versionCount) throws IOException {
+
+        Get pendingGet = new Get(CellUtil.cloneRow(cell));
+        pendingGet.addColumn(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell));
+        pendingGet.addColumn(CellUtil.cloneFamily(cell), CellUtils.addShadowCellSuffix(cell.getQualifierArray(),
+                                                                                       cell.getQualifierOffset(),
+                                                                                       cell.getQualifierLength()));
+        pendingGet.setMaxVersions(versionCount);
+        pendingGet.setTimeRange(0, cell.getTimestamp());
+
+        return pendingGet;
+    }
+
+    private Optional<Long> tryToLocateCellCommitTimestamp(AbstractTransactionManager transactionManager,
+                                                          long epoch,
+                                                          Cell cell,
+                                                          Map<Long, Long> commitCache)
+        throws IOException {
+
+        CommitTimestamp tentativeCommitTimestamp =
+            transactionManager.locateCellCommitTimestamp(
+                cell.getTimestamp(),
+                epoch,
+                new CommitTimestampLocatorImpl(
+                    new HBaseCellId(table,
+                                    CellUtil.cloneRow(cell),
+                                    CellUtil.cloneFamily(cell),
+                                    CellUtil.cloneQualifier(cell),
+                                    cell.getTimestamp()),
+                    commitCache));
+
+        // If transaction that added the cell was invalidated
+        if (!tentativeCommitTimestamp.isValid()) {
+            return Optional.absent();
+        }
+
+        switch (tentativeCommitTimestamp.getLocation()) {
+            case COMMIT_TABLE:
+                // If the commit timestamp is found in the persisted commit table,
+                // that means the writing process of the shadow cell in the post
+                // commit phase of the client probably failed, so we heal the shadow
+                // cell with the right commit timestamp for avoiding further reads to
+                // hit the storage
+                healShadowCell(cell, tentativeCommitTimestamp.getValue());
+                return Optional.of(tentativeCommitTimestamp.getValue());
+            case CACHE:
+            case SHADOW_CELL:
+                return Optional.of(tentativeCommitTimestamp.getValue());
+            case NOT_PRESENT:
+                return Optional.absent();
+            default:
+                assert (false);
+                return Optional.absent();
+        }
+    }
+
+    void healShadowCell(Cell cell, long commitTimestamp) {
+        Put put = new Put(CellUtil.cloneRow(cell));
+        byte[] family = CellUtil.cloneFamily(cell);
+        byte[] shadowCellQualifier = CellUtils.addShadowCellSuffix(cell.getQualifierArray(),
+                                                                   cell.getQualifierOffset(),
+                                                                   cell.getQualifierLength());
+        put.add(family, shadowCellQualifier, cell.getTimestamp(), Bytes.toBytes(commitTimestamp));
+        try {
+            healerTable.put(put);
+        } catch (IOException e) {
+            LOG.warn("Failed healing shadow cell for kv {}", cell, e);
+        }
+    }
+
+    protected class TransactionalClientScanner implements ResultScanner {
 
         private HBaseTransaction state;
         private ResultScanner innerScanner;
         private int maxVersions;
-        Map<String, List<Cell>> familyDeletionCache;
 
         TransactionalClientScanner(HBaseTransaction state, Scan scan, int maxVersions)
             throws IOException {
             this.state = state;
             this.innerScanner = table.getScanner(scan);
             this.maxVersions = maxVersions;
-            this.familyDeletionCache = new HashMap<String, List<Cell>>();
         }
 
 
@@ -400,7 +505,7 @@ public class TTable implements Closeable {
                     return null;
                 }
                 if (!result.isEmpty()) {
-                    filteredResult = filterCellsForSnapshot(result.listCells(), state, maxVersions, familyDeletionCache);
+                    filteredResult = filterCellsForSnapshot(result.listCells(), state, maxVersions);
                 }
             }
             return Result.create(filteredResult);
@@ -689,38 +794,28 @@ public class TTable implements Closeable {
         }
     }
 
-    private HBaseTransactionManager enforceHBaseTransactionManagerAsParam(TransactionManager tm) {
-        if (tm instanceof HBaseTransactionManager) {
-            return (HBaseTransactionManager) tm;
-        } else {
-            throw new IllegalArgumentException(
-                String.format("The transaction manager object passed %s is not an instance of HBaseTransactionManager ",
-                              tm.getClass().getName()));
-        }
-    }
+    static ImmutableList<Collection<Cell>> groupCellsByColumnFilteringShadowCells(List<Cell> rawCells) {
 
-    // For testing
+        Predicate<Cell> shadowCellFilter = new Predicate<Cell>() {
 
-    @VisibleForTesting
-    boolean isCommitted(HBaseCellId hBaseCellId, long epoch) throws TransactionException {
-        return snapshotFilter.isCommitted(hBaseCellId, epoch);
-    }
+            @Override
+            public boolean apply(Cell cell) {
+                return cell != null && !CellUtils.isShadowCell(cell);
+            }
 
-    @VisibleForTesting
-    CommitTimestamp locateCellCommitTimestamp(long cellStartTimestamp, long epoch,
-            CommitTimestampLocator locator) throws IOException {
-        return snapshotFilter.locateCellCommitTimestamp(cellStartTimestamp, epoch, locator);
-    }
+        };
 
-    @VisibleForTesting
-    Optional<CommitTimestamp> readCommitTimestampFromShadowCell(long cellStartTimestamp, CommitTimestampLocator locator)
-            throws IOException
-    {
-        return snapshotFilter.readCommitTimestampFromShadowCell(cellStartTimestamp, locator);
-    }
+        Function<Cell, ColumnWrapper> cellToColumnWrapper = new Function<Cell, ColumnWrapper>() {
 
-    SnapshotFilter getSnapshotFilter() {
-        return snapshotFilter;
-    }
+            @Override
+            public ColumnWrapper apply(Cell cell) {
+                return new ColumnWrapper(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell));
+            }
 
+        };
+
+        return Multimaps.index(Iterables.filter(rawCells, shadowCellFilter), cellToColumnWrapper)
+            .asMap().values()
+            .asList();
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/main/java/org/apache/omid/transaction/TableAccessWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/omid/transaction/TableAccessWrapper.java b/hbase-client/src/main/java/org/apache/omid/transaction/TableAccessWrapper.java
deleted file mode 100644
index 2fc5362..0000000
--- a/hbase-client/src/main/java/org/apache/omid/transaction/TableAccessWrapper.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.transaction;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Get;
-
-//This interface is used to wrap the HTableInterface and Region object when doing client and server side filtering accordingly.
-public interface TableAccessWrapper {
-
-    public Result[] get(List<Get> get) throws IOException;
-    public Result get(Get get) throws IOException;
-    public void   put(Put put) throws IOException;
-
-}


[3/4] incubator-omid git commit: Revert to the state of the project at 0c371361781957c96b20295290a167f7be3b33e2

Posted by fp...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/test/java/org/apache/omid/transaction/TestCheckpoint.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/omid/transaction/TestCheckpoint.java b/hbase-client/src/test/java/org/apache/omid/transaction/TestCheckpoint.java
deleted file mode 100644
index 078ea5f..0000000
--- a/hbase-client/src/test/java/org/apache/omid/transaction/TestCheckpoint.java
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.transaction;
-
-import java.util.List;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.omid.transaction.AbstractTransaction.VisibilityLevel;
-import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.ITestContext;
-import org.testng.annotations.Test;
-
-import static org.testng.Assert.assertTrue;
-
-@Test(groups = "sharedHBase")
-public class TestCheckpoint extends OmidTestBase {
-
-    private static final Logger LOG = LoggerFactory.getLogger(TestCheckpoint.class);
-
-    private HBaseTransaction enforceHBaseTransactionAsParam(Transaction tx) {
-        if (tx instanceof HBaseTransaction) {
-            return (HBaseTransaction) tx;
-        } else {
-            throw new IllegalArgumentException(
-                String.format("The transaction object passed %s is not an instance of HBaseTransaction",
-                              tx.getClass().getName()));
-        }
-    }
-
-    @Test(timeOut = 30_000)
-    public void testFewCheckPoints(ITestContext context) throws Exception {
-
-        TransactionManager tm = newTransactionManager(context);
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] famName1 = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-        byte[] dataValue2 = Bytes.toBytes("testWrite-2");
-        byte[] dataValue3 = Bytes.toBytes("testWrite-3");
-
-        Transaction tx1 = tm.begin();
-
-        HBaseTransaction hbaseTx1 = enforceHBaseTransactionAsParam(tx1);
-
-        Put row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue1);
-        tt.put(tx1, row1);
-
-        Get g = new Get(rowName1).setMaxVersions(1);
-
-        Result r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue1, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.checkpoint();
-
-        row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue2);
-        tt.put(tx1, row1);
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue1, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.setVisibilityLevel(VisibilityLevel.SNAPSHOT);
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue2, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.checkpoint();
-
-        row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue3);
-        tt.put(tx1, row1);
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue2, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.checkpoint();
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue3, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.setVisibilityLevel(VisibilityLevel.SNAPSHOT_ALL);
-
-        r = tt.get(tx1, g);
-        
-        assertTrue(r.size() == 3, "Expected 3 results and found " + r.size());
-
-        List<Cell> cells = r.getColumnCells(famName1, colName1);
-        assertTrue(Bytes.equals(dataValue3, cells.get(0).getValue()),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        assertTrue(Bytes.equals(dataValue2, cells.get(1).getValue()),
-              "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        assertTrue(Bytes.equals(dataValue1, cells.get(2).getValue()),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        tt.close();
-    }
-
-    @Test(timeOut = 30_000)
-    public void testSNAPSHOT(ITestContext context) throws Exception {
-        TransactionManager tm = newTransactionManager(context);
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] famName1 = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] dataValue0 = Bytes.toBytes("testWrite-0");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-        byte[] dataValue2 = Bytes.toBytes("testWrite-2");
-
-        Transaction tx1 = tm.begin();
-
-        Put row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue0);
-        tt.put(tx1, row1);
-
-        tm.commit(tx1);
-
-        tx1 = tm.begin();
-
-        HBaseTransaction hbaseTx1 = enforceHBaseTransactionAsParam(tx1);
-
-        Get g = new Get(rowName1).setMaxVersions(1);
-
-        Result r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue0, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue1);
-        tt.put(tx1, row1);
-
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue1, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.checkpoint();
-
-        row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue2);
-        tt.put(tx1, row1);
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue1, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.setVisibilityLevel(VisibilityLevel.SNAPSHOT);
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue2, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        tt.close();
-    }
-    
-    @Test(timeOut = 30_000)
-    public void testSNAPSHOT_ALL(ITestContext context) throws Exception {
-        TransactionManager tm = newTransactionManager(context);
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] famName1 = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] dataValue0 = Bytes.toBytes("testWrite-0");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-        byte[] dataValue2 = Bytes.toBytes("testWrite-2");
-
-        Transaction tx1 = tm.begin();
-
-        Put row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue0);
-        tt.put(tx1, row1);
-
-        tm.commit(tx1);
-
-        tx1 = tm.begin();
-        
-        HBaseTransaction hbaseTx1 = enforceHBaseTransactionAsParam(tx1);
-
-        Get g = new Get(rowName1).setMaxVersions(100);
-
-        Result r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue0, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue1);
-        tt.put(tx1, row1);
-
-        g = new Get(rowName1).setMaxVersions(100);
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue1, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.checkpoint();
-
-        row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue2);
-        tt.put(tx1, row1);
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue1, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.setVisibilityLevel(VisibilityLevel.SNAPSHOT_ALL);
-
-        r = tt.get(tx1, g);
-        
-        assertTrue(r.size() == 3, "Expected 3 results and found " + r.size());
-
-        List<Cell> cells = r.getColumnCells(famName1, colName1);
-        assertTrue(Bytes.equals(dataValue2, cells.get(0).getValue()),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        assertTrue(Bytes.equals(dataValue1, cells.get(1).getValue()),
-              "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        assertTrue(Bytes.equals(dataValue0, cells.get(2).getValue()),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        tt.close();
-    }
-
-    @Test(timeOut = 30_000)
-    public void testSNAPSHOT_EXCLUDE_CURRENT(ITestContext context) throws Exception {
-        TransactionManager tm = newTransactionManager(context);
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] famName1 = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-        byte[] dataValue2 = Bytes.toBytes("testWrite-2");
-
-        Transaction tx1 = tm.begin();
-
-        HBaseTransaction hbaseTx1 = enforceHBaseTransactionAsParam(tx1);
-
-        Put row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue1);
-        tt.put(tx1, row1);
-
-        Get g = new Get(rowName1).setMaxVersions(1);
-
-        Result r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue1, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.checkpoint();
-
-        row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue2);
-        tt.put(tx1, row1);
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue1, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-
-        hbaseTx1.setVisibilityLevel(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
-
-        r = tt.get(tx1, g);
-        assertTrue(Bytes.equals(dataValue1, r.getValue(famName1, colName1)),
-                "Unexpected value for SI read " + tx1 + ": " + Bytes.toString(r.getValue(famName1, colName1)));
-        
-        tt.close();
-    }
-
-    @Test(timeOut = 30_000)
-    public void testOutOfCheckpoints(ITestContext context) throws Exception {
-        TransactionManager tm = newTransactionManager(context);
-
-        Transaction tx1 = tm.begin();
-
-        HBaseTransaction hbaseTx1 = enforceHBaseTransactionAsParam(tx1);
-
-        for (int i=0; i < AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN - 1; ++i) {
-            hbaseTx1.checkpoint();
-        }
-
-        try {
-            hbaseTx1.checkpoint();
-            Assert.fail();
-        } catch (TransactionException e) {
-            // expected
-        }
-
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/test/java/org/apache/omid/transaction/TestColumnIterator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/omid/transaction/TestColumnIterator.java b/hbase-client/src/test/java/org/apache/omid/transaction/TestColumnIterator.java
index 5b72856..2eacd22 100644
--- a/hbase-client/src/test/java/org/apache/omid/transaction/TestColumnIterator.java
+++ b/hbase-client/src/test/java/org/apache/omid/transaction/TestColumnIterator.java
@@ -61,7 +61,7 @@ public class TestColumnIterator {
     public void testGroupingCellsByColumnFilteringShadowCells() {
 
         ImmutableList<Collection<Cell>> groupedColumnsWithoutShadowCells =
-                SnapshotFilterImpl.groupCellsByColumnFilteringShadowCellsAndFamilyDeletion(cells);
+                TTable.groupCellsByColumnFilteringShadowCells(cells);
         Log.info("Column Groups " + groupedColumnsWithoutShadowCells);
         assertEquals(groupedColumnsWithoutShadowCells.size(), 3, "Should be 3 column groups");
         int group1Counter = 0;

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionClient.java b/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionClient.java
index 735af04..c349657 100644
--- a/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionClient.java
+++ b/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionClient.java
@@ -56,7 +56,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
     @Test(timeOut = 30_000)
     public void testIsCommitted(ITestContext context) throws Exception {
         TransactionManager tm = newTransactionManager(context);
-        TTable table = spy(new TTable(hbaseConf, TEST_TABLE, ((AbstractTransactionManager)tm).getCommitTableClient()));
+        TTable table = new TTable(hbaseConf, TEST_TABLE);
 
         HBaseTransaction t1 = (HBaseTransaction) tm.begin();
 
@@ -83,9 +83,9 @@ public class TestHBaseTransactionClient extends OmidTestBase {
         HBaseCellId hBaseCellId3 = new HBaseCellId(htable, row2, family, qualifier, t3.getStartTimestamp());
 
         HBaseTransactionClient hbaseTm = (HBaseTransactionClient) newTransactionManager(context);
-        assertTrue(table.isCommitted(hBaseCellId1, 0), "row1 should be committed");
-        assertFalse(table.isCommitted(hBaseCellId2, 0), "row2 should not be committed for kv2");
-        assertTrue(table.isCommitted(hBaseCellId3, 0), "row2 should be committed for kv3");
+        assertTrue(hbaseTm.isCommitted(hBaseCellId1), "row1 should be committed");
+        assertFalse(hbaseTm.isCommitted(hBaseCellId2), "row2 should not be committed for kv2");
+        assertTrue(hbaseTm.isCommitted(hBaseCellId3), "row2 should be committed for kv3");
     }
 
     @Test(timeOut = 30_000)
@@ -96,7 +96,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
         // The following line emulates a crash after commit that is observed in (*) below
         doThrow(new RuntimeException()).when(syncPostCommitter).updateShadowCells(any(HBaseTransaction.class));
 
-        TTable table = spy(new TTable(hbaseConf, TEST_TABLE, tm.getCommitTableClient()));
+        TTable table = new TTable(hbaseConf, TEST_TABLE);
 
         HBaseTransaction t1 = (HBaseTransaction) tm.begin();
 
@@ -119,7 +119,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
         HBaseCellId hBaseCellId = new HBaseCellId(htable, row1, family, qualifier, t1.getStartTimestamp());
 
         HBaseTransactionClient hbaseTm = (HBaseTransactionClient) newTransactionManager(context);
-        assertTrue(table.isCommitted(hBaseCellId, 0), "row1 should be committed");
+        assertTrue(hbaseTm.isCommitted(hBaseCellId), "row1 should be committed");
     }
 
     @Test(timeOut = 30_000)
@@ -137,7 +137,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
         Optional<CommitTimestamp> optionalCT = tm.commitTableClient.getCommitTimestamp(NON_EXISTING_CELL_TS).get();
         assertFalse(optionalCT.isPresent());
 
-        try (TTable table = spy(new TTable(hbaseConf, TEST_TABLE, tm.getCommitTableClient()))) {
+        try (TTable table = new TTable(hbaseConf, TEST_TABLE)) {
             // Test that we get an invalidation mark for an invalidated transaction
 
             // Start a transaction and invalidate it before commiting it
@@ -182,14 +182,14 @@ public class TestHBaseTransactionClient extends OmidTestBase {
 
         HBaseTransactionManager tm = (HBaseTransactionManager) newTransactionManager(context);
 
-        try (TTable table = spy(new TTable(hbaseConf, TEST_TABLE, tm.getCommitTableClient()))) {
+        try (TTable table = new TTable(hbaseConf, TEST_TABLE)) {
 
             // Test first we can not found a non-existent cell ts
             HBaseCellId hBaseCellId = new HBaseCellId(table.getHTable(), row1, family, qualifier, NON_EXISTING_CELL_TS);
             // Set an empty cache to allow to bypass the checking
             CommitTimestampLocator ctLocator = new CommitTimestampLocatorImpl(hBaseCellId,
                     Maps.<Long, Long>newHashMap());
-            Optional<CommitTimestamp> optionalCT = table
+            Optional<CommitTimestamp> optionalCT = tm
                     .readCommitTimestampFromShadowCell(NON_EXISTING_CELL_TS, ctLocator);
             assertFalse(optionalCT.isPresent());
 
@@ -200,7 +200,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
             table.put(tx1, put);
             tm.commit(tx1);
             // Upon commit, the commit data should be in the shadow cells, so test it
-            optionalCT = table.readCommitTimestampFromShadowCell(tx1.getStartTimestamp(), ctLocator);
+            optionalCT = tm.readCommitTimestampFromShadowCell(tx1.getStartTimestamp(), ctLocator);
             assertTrue(optionalCT.isPresent());
             CommitTimestamp ct = optionalCT.get();
             assertTrue(ct.isValid());
@@ -228,7 +228,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
 
         // Then test that locator finds it in the cache
         CommitTimestampLocator ctLocator = new CommitTimestampLocatorImpl(hBaseCellId, fakeCache);
-        CommitTimestamp ct = (new TTable(table)).locateCellCommitTimestamp(CELL_ST, tm.tsoClient.getEpoch(), ctLocator);
+        CommitTimestamp ct = tm.locateCellCommitTimestamp(CELL_ST, tm.tsoClient.getEpoch(), ctLocator);
         assertTrue(ct.isValid());
         assertEquals(ct.getValue(), CELL_CT);
         assertTrue(ct.getLocation().compareTo(CACHE) == 0);
@@ -247,7 +247,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
         // The following line emulates a crash after commit that is observed in (*) below
         doThrow(new RuntimeException()).when(syncPostCommitter).updateShadowCells(any(HBaseTransaction.class));
 
-        try (TTable table = spy(new TTable(hbaseConf, TEST_TABLE, tm.getCommitTableClient()))) {
+        try (TTable table = new TTable(hbaseConf, TEST_TABLE)) {
             // Commit a transaction that is broken on commit to avoid
             // write to the shadow cells and avoid cleaning the commit table
             HBaseTransaction tx1 = (HBaseTransaction) tm.begin();
@@ -265,10 +265,10 @@ public class TestHBaseTransactionClient extends OmidTestBase {
                     tx1.getStartTimestamp());
             CommitTimestampLocator ctLocator = new CommitTimestampLocatorImpl(hBaseCellId,
                     Maps.<Long, Long>newHashMap());
-            CommitTimestamp ct = table.locateCellCommitTimestamp(tx1.getStartTimestamp(), tm.tsoClient.getEpoch(),
+            CommitTimestamp ct = tm.locateCellCommitTimestamp(tx1.getStartTimestamp(), tm.tsoClient.getEpoch(),
                     ctLocator);
             assertTrue(ct.isValid());
-            long expectedCommitTS = tx1.getStartTimestamp() + AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
+            long expectedCommitTS = tx1.getStartTimestamp() + 1;
             assertEquals(ct.getValue(), expectedCommitTS);
             assertTrue(ct.getLocation().compareTo(COMMIT_TABLE) == 0);
         }
@@ -281,7 +281,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
 
         HBaseTransactionManager tm = (HBaseTransactionManager) newTransactionManager(context);
 
-        try (TTable table = spy(new TTable(hbaseConf, TEST_TABLE, tm.getCommitTableClient()))) {
+        try (TTable table = new TTable(hbaseConf, TEST_TABLE)) {
             // Commit a transaction to add ST/CT in commit table
             HBaseTransaction tx1 = (HBaseTransaction) tm.begin();
             Put put = new Put(row1);
@@ -295,7 +295,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
                     tx1.getStartTimestamp());
             CommitTimestampLocator ctLocator = new CommitTimestampLocatorImpl(hBaseCellId,
                     Maps.<Long, Long>newHashMap());
-            CommitTimestamp ct = table.locateCellCommitTimestamp(tx1.getStartTimestamp(), tm.tsoClient.getEpoch(),
+            CommitTimestamp ct = tm.locateCellCommitTimestamp(tx1.getStartTimestamp(), tm.tsoClient.getEpoch(),
                     ctLocator);
             assertTrue(ct.isValid());
             assertEquals(ct.getValue(), tx1.getCommitTimestamp());
@@ -308,7 +308,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
     @Test(timeOut = 30_000)
     public void testCellFromTransactionInPreviousEpochGetsInvalidComitTimestamp(ITestContext context) throws Exception {
 
-        final long CURRENT_EPOCH_FAKE = 1000L * AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN;
+        final long CURRENT_EPOCH_FAKE = 1000L;
 
         CommitTable.Client commitTableClient = spy(getCommitTable(context).getClient());
         AbstractTransactionManager tm = spy((AbstractTransactionManager) newTransactionManager(context, commitTableClient));
@@ -317,15 +317,17 @@ public class TestHBaseTransactionClient extends OmidTestBase {
         SettableFuture<Optional<CommitTimestamp>> f = SettableFuture.create();
         f.set(Optional.<CommitTimestamp>absent());
         doReturn(f).when(commitTableClient).getCommitTimestamp(any(Long.class));
+        doReturn(Optional.<CommitTimestamp>absent()).when(tm).readCommitTimestampFromShadowCell(any(Long.class),
+                any(CommitTimestampLocator.class));
 
-
-        try (TTable table = spy(new TTable(hbaseConf, TEST_TABLE, tm.getCommitTableClient()))) {
+        try (TTable table = new TTable(hbaseConf, TEST_TABLE)) {
 
             // Commit a transaction to add ST/CT in commit table
             HBaseTransaction tx1 = (HBaseTransaction) tm.begin();
             Put put = new Put(row1);
             put.add(family, qualifier, data1);
             table.put(tx1, put);
+            tm.commit(tx1);
             // Upon commit, the commit data should be in the shadow cells
 
             // Test a transaction in the previous epoch gets an InvalidCommitTimestamp class
@@ -334,7 +336,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
             CommitTimestampLocator ctLocator = new CommitTimestampLocatorImpl(hBaseCellId,
                     Maps.<Long, Long>newHashMap());
             // Fake the current epoch to simulate a newer TSO
-            CommitTimestamp ct = table.locateCellCommitTimestamp(tx1.getStartTimestamp(), CURRENT_EPOCH_FAKE, ctLocator);
+            CommitTimestamp ct = tm.locateCellCommitTimestamp(tx1.getStartTimestamp(), CURRENT_EPOCH_FAKE, ctLocator);
             assertFalse(ct.isValid());
             assertEquals(ct.getValue(), CommitTable.INVALID_TRANSACTION_MARKER);
             assertTrue(ct.getLocation().compareTo(COMMIT_TABLE) == 0);
@@ -357,8 +359,10 @@ public class TestHBaseTransactionClient extends OmidTestBase {
         SettableFuture<Optional<CommitTimestamp>> f = SettableFuture.create();
         f.set(Optional.<CommitTimestamp>absent());
         doReturn(f).doCallRealMethod().when(commitTableClient).getCommitTimestamp(any(Long.class));
+        doReturn(Optional.<CommitTimestamp>absent()).when(tm).readCommitTimestampFromShadowCell(any(Long.class),
+                any(CommitTimestampLocator.class));
 
-        try (TTable table = spy(new TTable(hbaseConf, TEST_TABLE, tm.getCommitTableClient()))) {
+        try (TTable table = new TTable(hbaseConf, TEST_TABLE)) {
 
             // Commit a transaction that is broken on commit to avoid
             // write to the shadow cells and avoid cleaning the commit table
@@ -377,7 +381,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
                     tx1.getStartTimestamp());
             CommitTimestampLocator ctLocator = new CommitTimestampLocatorImpl(hBaseCellId,
                     Maps.<Long, Long>newHashMap());
-            CommitTimestamp ct = table.locateCellCommitTimestamp(tx1.getStartTimestamp(), tm.tsoClient.getEpoch(),
+            CommitTimestamp ct = tm.locateCellCommitTimestamp(tx1.getStartTimestamp(), tm.tsoClient.getEpoch(),
                     ctLocator);
             assertTrue(ct.isValid());
             assertEquals(ct.getValue(), tx1.getCommitTimestamp());
@@ -397,8 +401,10 @@ public class TestHBaseTransactionClient extends OmidTestBase {
         SettableFuture<Optional<CommitTimestamp>> f = SettableFuture.create();
         f.set(Optional.<CommitTimestamp>absent());
         doReturn(f).when(commitTableClient).getCommitTimestamp(any(Long.class));
+        doReturn(Optional.<CommitTimestamp>absent()).doCallRealMethod()
+                .when(tm).readCommitTimestampFromShadowCell(any(Long.class), any(CommitTimestampLocator.class));
 
-        try (TTable table = spy(new TTable(hbaseConf, TEST_TABLE, tm.getCommitTableClient()))) {
+        try (TTable table = new TTable(hbaseConf, TEST_TABLE)) {
 
             // Commit a transaction to add ST/CT in commit table
             HBaseTransaction tx1 = (HBaseTransaction) tm.begin();
@@ -413,7 +419,7 @@ public class TestHBaseTransactionClient extends OmidTestBase {
                     tx1.getStartTimestamp());
             CommitTimestampLocator ctLocator = new CommitTimestampLocatorImpl(hBaseCellId,
                     Maps.<Long, Long>newHashMap());
-            CommitTimestamp ct = table.locateCellCommitTimestamp(tx1.getStartTimestamp(), tm.tsoClient.getEpoch(),
+            CommitTimestamp ct = tm.locateCellCommitTimestamp(tx1.getStartTimestamp(), tm.tsoClient.getEpoch(),
                     ctLocator);
             assertTrue(ct.isValid());
             assertEquals(ct.getValue(), tx1.getCommitTimestamp());
@@ -434,12 +440,14 @@ public class TestHBaseTransactionClient extends OmidTestBase {
         SettableFuture<Optional<CommitTimestamp>> f = SettableFuture.create();
         f.set(Optional.<CommitTimestamp>absent());
         doReturn(f).when(commitTableClient).getCommitTimestamp(any(Long.class));
+        doReturn(Optional.<CommitTimestamp>absent()).when(tm).readCommitTimestampFromShadowCell(any(Long.class),
+                any(CommitTimestampLocator.class));
 
-        try (TTable table = spy(new TTable(hbaseConf, TEST_TABLE, tm.getCommitTableClient()))) {
+        try (TTable table = new TTable(hbaseConf, TEST_TABLE)) {
             HBaseCellId hBaseCellId = new HBaseCellId(table.getHTable(), row1, family, qualifier, CELL_TS);
             CommitTimestampLocator ctLocator = new CommitTimestampLocatorImpl(hBaseCellId,
                     Maps.<Long, Long>newHashMap());
-            CommitTimestamp ct = table.locateCellCommitTimestamp(CELL_TS, tm.tsoClient.getEpoch(), ctLocator);
+            CommitTimestamp ct = tm.locateCellCommitTimestamp(CELL_TS, tm.tsoClient.getEpoch(), ctLocator);
             assertTrue(ct.isValid());
             assertEquals(ct.getValue(), -1L);
             assertTrue(ct.getLocation().compareTo(NOT_PRESENT) == 0);

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionManager.java b/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionManager.java
index 347c4ce..03187ea 100644
--- a/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionManager.java
+++ b/hbase-client/src/test/java/org/apache/omid/transaction/TestHBaseTransactionManager.java
@@ -52,7 +52,7 @@ public class TestHBaseTransactionManager extends OmidTestBase {
 
         TSOClient tsoClient = spy(getClient(context));
 
-        long fakeEpoch = tsoClient.getNewStartTimestamp().get() + (FAKE_EPOCH_INCREMENT * AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN);
+        long fakeEpoch = tsoClient.getNewStartTimestamp().get() + FAKE_EPOCH_INCREMENT;
 
         // Modify the epoch before testing the begin method
         doReturn(fakeEpoch).when(tsoClient).getEpoch();

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-client/src/test/java/org/apache/omid/transaction/TestShadowCells.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/omid/transaction/TestShadowCells.java b/hbase-client/src/test/java/org/apache/omid/transaction/TestShadowCells.java
index 8a35f9a..75e64fd 100644
--- a/hbase-client/src/test/java/org/apache/omid/transaction/TestShadowCells.java
+++ b/hbase-client/src/test/java/org/apache/omid/transaction/TestShadowCells.java
@@ -20,9 +20,10 @@ package org.apache.omid.transaction;
 import com.google.common.base.Charsets;
 import com.google.common.base.Optional;
 import com.google.common.util.concurrent.ListenableFuture;
-
 import org.apache.omid.committable.CommitTable;
+
 import org.apache.omid.metrics.NullMetricsProvider;
+
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
@@ -44,9 +45,7 @@ import org.testng.ITestContext;
 import org.testng.annotations.Test;
 
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -346,7 +345,7 @@ public class TestShadowCells extends OmidTestBase {
                             return (List<KeyValue>) invocation.callRealMethod();
                         }
                     }).when(table).filterCellsForSnapshot(Matchers.<List<Cell>>any(),
-                            any(HBaseTransaction.class), anyInt(), Matchers.<Map<String, List<Cell>>>any());
+                            any(HBaseTransaction.class), anyInt());
 
                     TransactionManager tm = newTransactionManager(context);
                     if (hasShadowCell(row,

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-common/src/main/java/org/apache/omid/transaction/CellUtils.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/omid/transaction/CellUtils.java b/hbase-common/src/main/java/org/apache/omid/transaction/CellUtils.java
index 14b9d72..3f2425b 100644
--- a/hbase-common/src/main/java/org/apache/omid/transaction/CellUtils.java
+++ b/hbase-common/src/main/java/org/apache/omid/transaction/CellUtils.java
@@ -49,9 +49,6 @@ public final class CellUtils {
     private static final Logger LOG = LoggerFactory.getLogger(CellUtils.class);
     static final byte[] SHADOW_CELL_SUFFIX = "\u0080".getBytes(Charsets.UTF_8); // Non printable char (128 ASCII)
     static byte[] DELETE_TOMBSTONE = Bytes.toBytes("__OMID_TOMBSTONE__");
-    public static final byte[] FAMILY_DELETE_QUALIFIER = new byte[0];
-    public static final String TRANSACTION_ATTRIBUTE = "__OMID_TRANSACTION__";
-    public static final String CLIENT_GET_ATTRIBUTE = "__OMID_CLIENT_GET__";
 
     /**
      * Utility interface to get rid of the dependency on HBase server package

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-coprocessor/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-coprocessor/pom.xml b/hbase-coprocessor/pom.xml
index e9eab4f..8af093c 100644
--- a/hbase-coprocessor/pom.xml
+++ b/hbase-coprocessor/pom.xml
@@ -31,6 +31,7 @@
             <groupId>org.apache.omid</groupId>
             <artifactId>omid-hbase-client</artifactId>
             <version>${project.version}</version>
+            <scope>test</scope>
         </dependency>
         <dependency>
             <groupId>org.apache.omid</groupId>

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-coprocessor/src/main/java/org/apache/hadoop/hbase/regionserver/OmidRegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-coprocessor/src/main/java/org/apache/hadoop/hbase/regionserver/OmidRegionScanner.java b/hbase-coprocessor/src/main/java/org/apache/hadoop/hbase/regionserver/OmidRegionScanner.java
deleted file mode 100644
index 752e9a7..0000000
--- a/hbase-coprocessor/src/main/java/org/apache/hadoop/hbase/regionserver/OmidRegionScanner.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.ScannerContext;
-import org.apache.omid.transaction.HBaseTransaction;
-import org.apache.omid.transaction.SnapshotFilterImpl;
-
-public class OmidRegionScanner implements RegionScanner {
-
-    private RegionScanner scanner;
-    private SnapshotFilterImpl snapshotFilter;
-    private HBaseTransaction transaction;
-    private int maxVersions;
-    private Map<String, List<Cell>> familyDeletionCache;
-    
-    public OmidRegionScanner(SnapshotFilterImpl snapshotFilter,
-                      RegionScanner s,
-                      HBaseTransaction transaction,
-                      int maxVersions) {
-        this.snapshotFilter = snapshotFilter;
-        this.scanner = s;
-        this.transaction = transaction;
-        this.maxVersions = maxVersions;
-        this.familyDeletionCache = new HashMap<String, List<Cell>>();
-    }
-
-    @Override
-    public boolean next(List<Cell> results) throws IOException {
-       return next(results, Integer.MAX_VALUE);
-    }
-
-    public boolean next(List<Cell> result, int limit) throws IOException {
-        return nextRaw(result, limit);
-    }
-
-    @Override
-    public void close() throws IOException {
-        scanner.close();
-    }
-
-    @Override
-    public HRegionInfo getRegionInfo() {
-        return scanner.getRegionInfo();
-    }
-
-    @Override
-    public boolean isFilterDone() throws IOException {
-        return scanner.isFilterDone();
-    }
-
-    @Override
-    public boolean reseek(byte[] row) throws IOException {
-        throw new RuntimeException("Not implemented");
-    }
-
-    @Override
-    public long getMaxResultSize() {
-        return scanner.getMaxResultSize();
-    }
-
-    @Override
-    public long getMvccReadPoint() {
-        return scanner.getMvccReadPoint();
-    }
-
-    @Override
-    public boolean nextRaw(List<Cell> result) throws IOException {
-        return nextRaw(result,Integer.MAX_VALUE);
-    }
-
-    public boolean next(List<Cell> result,
-            ScannerContext scannerContext) throws IOException {
-        return next(result, scannerContext.getBatchLimit());
-    }
-
-    public boolean nextRaw(List<Cell> result,
-            ScannerContext scannerContext) throws IOException {
-        return nextRaw(result, scannerContext.getBatchLimit());
-    }
-
-    public int getBatch() {
-        return Integer.MAX_VALUE;
-    }
-
-    public boolean nextRaw(List<Cell> result, int limit) throws IOException {
-        List<Cell> filteredResult = new ArrayList<Cell>();
-        while (filteredResult.isEmpty()) {
-            scanner.nextRaw(filteredResult);
-            if (filteredResult.isEmpty()) {
-                return false;
-            }
-
-            filteredResult = snapshotFilter.filterCellsForSnapshot(filteredResult, transaction, maxVersions, familyDeletionCache);
-        }
-
-        for (Cell cell : filteredResult) {
-            result.add(cell);
-        }
-
-        return true;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-coprocessor/src/main/java/org/apache/hadoop/hbase/regionserver/RegionAccessWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-coprocessor/src/main/java/org/apache/hadoop/hbase/regionserver/RegionAccessWrapper.java b/hbase-coprocessor/src/main/java/org/apache/hadoop/hbase/regionserver/RegionAccessWrapper.java
deleted file mode 100644
index 28a4be3..0000000
--- a/hbase-coprocessor/src/main/java/org/apache/hadoop/hbase/regionserver/RegionAccessWrapper.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.omid.transaction.TableAccessWrapper;
-
-// This class wraps the Region object when doing server side filtering.
-public class RegionAccessWrapper implements TableAccessWrapper {
-
-    private final Region region;
-    
-    public RegionAccessWrapper(Region region) {
-        this.region = region;
-    }
-
-    @Override
-    public Result[] get(List<Get> get) throws IOException {
-        Result[] results = new Result[get.size()];
-
-        int i = 0;
-        for (Get g : get) {
-            results[i++] = region.get(g);
-        }
-        return results;
-    }
-
-    @Override
-    public Result get(Get get) throws IOException {
-        return region.get(get);
-    }
-
-    @Override
-    public void put(Put put) throws IOException {
-        region.put(put);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-coprocessor/src/main/java/org/apache/omid/transaction/OmidSnapshotFilter.java
----------------------------------------------------------------------
diff --git a/hbase-coprocessor/src/main/java/org/apache/omid/transaction/OmidSnapshotFilter.java b/hbase-coprocessor/src/main/java/org/apache/omid/transaction/OmidSnapshotFilter.java
deleted file mode 100644
index daf319c..0000000
--- a/hbase-coprocessor/src/main/java/org/apache/omid/transaction/OmidSnapshotFilter.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.transaction;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.omid.committable.CommitTable;
-import org.apache.omid.committable.hbase.HBaseCommitTable;
-import org.apache.omid.committable.hbase.HBaseCommitTableConfig;
-import org.apache.omid.proto.TSOProto;
-import org.apache.omid.transaction.AbstractTransaction.VisibilityLevel;
-import org.apache.omid.HBaseShims;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.OmidRegionScanner;
-import org.apache.hadoop.hbase.regionserver.RegionAccessWrapper;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-
-import static org.apache.omid.committable.hbase.HBaseCommitTableConfig.COMMIT_TABLE_NAME_KEY;
-
-/**
- * Server side filtering to identify the transaction snapshot.
- */
-public class OmidSnapshotFilter extends BaseRegionObserver {
-
-    private static final Logger LOG = LoggerFactory.getLogger(OmidSnapshotFilter.class);
-
-    private HBaseCommitTableConfig commitTableConf = null;
-    private Configuration conf = null;
-    @VisibleForTesting
-    private CommitTable.Client commitTableClient;
-
-    private SnapshotFilterImpl snapshotFilter;
-
-    final static String OMID_SNAPSHOT_FILTER_CF_FLAG = "OMID_SNAPSHOT_FILTER_ENABLED";
-
-    public OmidSnapshotFilter() {
-        LOG.info("Compactor coprocessor initialized via empty constructor");
-    }
-
-    @Override
-    public void start(CoprocessorEnvironment env) throws IOException {
-        LOG.info("Starting snapshot filter coprocessor");
-        conf = env.getConfiguration();
-        commitTableConf = new HBaseCommitTableConfig();
-        String commitTableName = conf.get(COMMIT_TABLE_NAME_KEY);
-        if (commitTableName != null) {
-            commitTableConf.setTableName(commitTableName);
-        }
-        commitTableClient = initAndGetCommitTableClient();
-        
-        snapshotFilter = new SnapshotFilterImpl(commitTableClient);
-        
-        LOG.info("Snapshot filter started");
-    }
-
-    @Override
-    public void stop(CoprocessorEnvironment e) throws IOException {
-        LOG.info("Stopping snapshot filter coprocessor");
-        commitTableClient.close();
-        LOG.info("Snapshot filter stopped");
-    }
-
-    @Override
-    public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> c, Get get, List<Cell> result) throws IOException {
-
-        if (get.getAttribute(CellUtils.CLIENT_GET_ATTRIBUTE) == null) return;
-
-        get.setAttribute(CellUtils.CLIENT_GET_ATTRIBUTE, null);
-        RegionAccessWrapper regionAccessWrapper = new RegionAccessWrapper(HBaseShims.getRegionCoprocessorRegion(c.getEnvironment()));
-        Result res = regionAccessWrapper.get(get); // get parameters were set at the client side
-
-        snapshotFilter.setTableAccessWrapper(regionAccessWrapper);
-
-        List<Cell> filteredKeyValues = Collections.emptyList();
-        if (!res.isEmpty()) {
-            TSOProto.Transaction transaction = TSOProto.Transaction.parseFrom(get.getAttribute(CellUtils.TRANSACTION_ATTRIBUTE));
-
-            long id = transaction.getTimestamp();
-            long readTs = transaction.getReadTimestamp();
-            long epoch = transaction.getEpoch();
-            VisibilityLevel visibilityLevel = VisibilityLevel.fromInteger(transaction.getVisibilityLevel());
-
-            HBaseTransaction hbaseTransaction = new HBaseTransaction(id, readTs, visibilityLevel, epoch, new HashSet<HBaseCellId>(), null);
-            filteredKeyValues = snapshotFilter.filterCellsForSnapshot(res.listCells(), hbaseTransaction, get.getMaxVersions(), new HashMap<String, List<Cell>>());
-        }
-
-        for (Cell cell : filteredKeyValues) {
-            result.add(cell);
-        }
-
-        c.bypass();
-
-    }
-
-    @Override
-    public RegionScanner postScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e,
-            Scan scan,
-            RegionScanner s) throws IOException {
-        byte[] byteTransaction = scan.getAttribute(CellUtils.TRANSACTION_ATTRIBUTE);
-
-        if (byteTransaction == null) {
-            return s;
-        }
-
-        TSOProto.Transaction transaction = TSOProto.Transaction.parseFrom(byteTransaction);
-
-        long id = transaction.getTimestamp();
-        long readTs = transaction.getReadTimestamp();
-        long epoch = transaction.getEpoch();
-        VisibilityLevel visibilityLevel = VisibilityLevel.fromInteger(transaction.getVisibilityLevel());
-
-        HBaseTransaction hbaseTransaction = new HBaseTransaction(id, readTs, visibilityLevel, epoch, new HashSet<HBaseCellId>(), null);
-
-        RegionAccessWrapper regionAccessWrapper = new RegionAccessWrapper(HBaseShims.getRegionCoprocessorRegion(e.getEnvironment()));
-
-        snapshotFilter.setTableAccessWrapper(regionAccessWrapper);
-
-        return new OmidRegionScanner(snapshotFilter, s, hbaseTransaction, 1);
-    }
-
-    private CommitTable.Client initAndGetCommitTableClient() throws IOException {
-        LOG.info("Trying to get the commit table client");
-        CommitTable commitTable = new HBaseCommitTable(conf, commitTableConf);
-        CommitTable.Client commitTableClient = commitTable.getClient();
-        LOG.info("Commit table client obtained {}", commitTableClient.getClass().getCanonicalName());
-        return commitTableClient;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-coprocessor/src/test/java/org/apache/omid/transaction/TSOForSnapshotFilterTestModule.java
----------------------------------------------------------------------
diff --git a/hbase-coprocessor/src/test/java/org/apache/omid/transaction/TSOForSnapshotFilterTestModule.java b/hbase-coprocessor/src/test/java/org/apache/omid/transaction/TSOForSnapshotFilterTestModule.java
deleted file mode 100644
index 130a061..0000000
--- a/hbase-coprocessor/src/test/java/org/apache/omid/transaction/TSOForSnapshotFilterTestModule.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.transaction;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Provider;
-import com.google.inject.Provides;
-import org.apache.omid.committable.CommitTable;
-import org.apache.omid.committable.hbase.HBaseCommitTable;
-import org.apache.omid.metrics.MetricsRegistry;
-import org.apache.omid.metrics.NullMetricsProvider;
-import org.apache.omid.timestamp.storage.HBaseTimestampStorage;
-import org.apache.omid.timestamp.storage.TimestampStorage;
-import org.apache.omid.tso.BatchPoolModule;
-import org.apache.omid.tso.DisruptorModule;
-import org.apache.omid.tso.LeaseManagement;
-import org.apache.omid.tso.MockPanicker;
-import org.apache.omid.tso.NetworkInterfaceUtils;
-import org.apache.omid.tso.Panicker;
-import org.apache.omid.tso.PersistenceProcessorHandler;
-import org.apache.omid.tso.TSOChannelHandler;
-import org.apache.omid.tso.TSOServerConfig;
-import org.apache.omid.tso.TSOStateManager;
-import org.apache.omid.tso.TSOStateManagerImpl;
-import org.apache.omid.tso.TimestampOracle;
-import org.apache.omid.tso.TimestampOracleImpl;
-import org.apache.omid.tso.VoidLeaseManager;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-
-import javax.inject.Named;
-import javax.inject.Singleton;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.SocketException;
-import java.net.UnknownHostException;
-
-import static org.apache.omid.tso.TSOServer.TSO_HOST_AND_PORT_KEY;
-
-class TSOForSnapshotFilterTestModule extends AbstractModule {
-
-    private final TSOServerConfig config;
-
-    TSOForSnapshotFilterTestModule(TSOServerConfig config) {
-        this.config = config;
-    }
-
-    @Override
-    protected void configure() {
-
-        bind(TSOChannelHandler.class).in(Singleton.class);
-
-        bind(TSOStateManager.class).to(TSOStateManagerImpl.class).in(Singleton.class);
-
-        bind(Panicker.class).to(MockPanicker.class);
-        // HBase commit table creation
-        bind(CommitTable.class).to(HBaseCommitTable.class).in(Singleton.class);
-        // Timestamp storage creation
-        bind(TimestampStorage.class).to(HBaseTimestampStorage.class).in(Singleton.class);
-        bind(TimestampOracle.class).to(TimestampOracleImpl.class).in(Singleton.class);
-
-        install(new BatchPoolModule(config));
-        // DisruptorConfig
-        install(new DisruptorModule(config));
-
-    }
-
-    @Provides
-    @Singleton
-    Configuration provideHBaseConfig() throws IOException {
-        Configuration hbaseConf = HBaseConfiguration.create();
-        hbaseConf.setInt("hbase.hregion.memstore.flush.size", 10_000 * 1024);
-        hbaseConf.setInt("hbase.regionserver.nbreservationblocks", 1);
-        hbaseConf.set("tso.host", "localhost");
-        hbaseConf.setInt("tso.port", 1234);
-        hbaseConf.set("hbase.coprocessor.region.classes", "org.apache.omid.transaction.OmidSnapshotFilter");
-        final String rootdir = "/tmp/hbase.test.dir/";
-        File rootdirFile = new File(rootdir);
-        FileUtils.deleteDirectory(rootdirFile);
-        hbaseConf.set("hbase.rootdir", rootdir);
-        return hbaseConf;
-    }
-
-    @Provides
-    TSOServerConfig provideTSOServerConfig() {
-        return config;
-    }
-
-    @Provides
-    @Singleton
-    MetricsRegistry provideMetricsRegistry() {
-        return new NullMetricsProvider();
-    }
-
-    @Provides
-    @Singleton
-    LeaseManagement provideLeaseManager(TSOChannelHandler tsoChannelHandler,
-                                        TSOStateManager stateManager) throws IOException {
-        return new VoidLeaseManager(tsoChannelHandler, stateManager);
-    }
-
-    @Provides
-    @Named(TSO_HOST_AND_PORT_KEY)
-    String provideTSOHostAndPort() throws SocketException, UnknownHostException {
-        return NetworkInterfaceUtils.getTSOHostAndPort(config);
-    }
-
-    @Provides
-    PersistenceProcessorHandler[] getPersistenceProcessorHandler(Provider<PersistenceProcessorHandler> provider) {
-        PersistenceProcessorHandler[] persistenceProcessorHandlers = new PersistenceProcessorHandler[config.getNumConcurrentCTWriters()];
-        for (int i = 0; i < persistenceProcessorHandlers.length; i++) {
-            persistenceProcessorHandlers[i] = provider.get();
-        }
-        return persistenceProcessorHandlers;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-coprocessor/src/test/java/org/apache/omid/transaction/TestSnapshotFilter.java
----------------------------------------------------------------------
diff --git a/hbase-coprocessor/src/test/java/org/apache/omid/transaction/TestSnapshotFilter.java b/hbase-coprocessor/src/test/java/org/apache/omid/transaction/TestSnapshotFilter.java
deleted file mode 100644
index 29f0a4b..0000000
--- a/hbase-coprocessor/src/test/java/org/apache/omid/transaction/TestSnapshotFilter.java
+++ /dev/null
@@ -1,514 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.omid.transaction;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.omid.TestUtils;
-import org.apache.omid.committable.CommitTable;
-import org.apache.omid.committable.hbase.HBaseCommitTableConfig;
-import org.apache.omid.metrics.NullMetricsProvider;
-import org.apache.omid.timestamp.storage.HBaseTimestampStorageConfig;
-import org.apache.omid.tso.TSOServer;
-import org.apache.omid.tso.TSOServerConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-
-import static org.mockito.Mockito.spy;
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertFalse;
-import static org.testng.Assert.assertTrue;
-
-public class TestSnapshotFilter {
-
-    private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotFilter.class);
-
-    private static final String TEST_FAMILY = "test-fam";
-    
-    private static final int MAX_VERSIONS = 3;
-
-    private AbstractTransactionManager tm;
-
-    private Injector injector;
-
-    private HBaseAdmin admin;
-    private Configuration hbaseConf;
-    private HBaseTestingUtility hbaseTestUtil;
-    private MiniHBaseCluster hbaseCluster;
-
-    private TSOServer tso;
-
-    private AggregationClient aggregationClient;
-    private CommitTable commitTable;
-    private PostCommitActions syncPostCommitter;
-
-    @BeforeClass
-    public void setupTestSnapshotFilter() throws Exception {
-        TSOServerConfig tsoConfig = new TSOServerConfig();
-        tsoConfig.setPort(5678);
-        tsoConfig.setConflictMapSize(1);
-        injector = Guice.createInjector(new TSOForSnapshotFilterTestModule(tsoConfig));
-        hbaseConf = injector.getInstance(Configuration.class);
-        hbaseConf.setBoolean("omid.server.side.filter", true);
-        hbaseConf.setInt("hbase.master.info.port", 16011);
-        HBaseCommitTableConfig hBaseCommitTableConfig = injector.getInstance(HBaseCommitTableConfig.class);
-        HBaseTimestampStorageConfig hBaseTimestampStorageConfig = injector.getInstance(HBaseTimestampStorageConfig.class);
-
-        setupHBase();
-        aggregationClient = new AggregationClient(hbaseConf);
-        admin = new HBaseAdmin(hbaseConf);
-        createRequiredHBaseTables(hBaseTimestampStorageConfig, hBaseCommitTableConfig);
-        setupTSO();
-
-        commitTable = injector.getInstance(CommitTable.class);
-    }
-
-    private void setupHBase() throws Exception {
-        LOG.info("--------------------------------------------------------------------------------------------------");
-        LOG.info("Setting up HBase");
-        LOG.info("--------------------------------------------------------------------------------------------------");
-        hbaseTestUtil = new HBaseTestingUtility(hbaseConf);
-        LOG.info("--------------------------------------------------------------------------------------------------");
-        LOG.info("Creating HBase MiniCluster");
-        LOG.info("--------------------------------------------------------------------------------------------------");
-        hbaseCluster = hbaseTestUtil.startMiniCluster(1);
-    }
-
-    private void createRequiredHBaseTables(HBaseTimestampStorageConfig timestampStorageConfig,
-                                           HBaseCommitTableConfig hBaseCommitTableConfig) throws IOException {
-        createTableIfNotExists(timestampStorageConfig.getTableName(), timestampStorageConfig.getFamilyName().getBytes());
-
-        createTableIfNotExists(hBaseCommitTableConfig.getTableName(), hBaseCommitTableConfig.getCommitTableFamily(), hBaseCommitTableConfig.getLowWatermarkFamily());
-    }
-
-    private void createTableIfNotExists(String tableName, byte[]... families) throws IOException {
-        if (!admin.tableExists(tableName)) {
-            LOG.info("Creating {} table...", tableName);
-            HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
-
-            for (byte[] family : families) {
-                HColumnDescriptor datafam = new HColumnDescriptor(family);
-                datafam.setMaxVersions(MAX_VERSIONS);
-                desc.addFamily(datafam);
-            }
-
-            desc.addCoprocessor("org.apache.hadoop.hbase.coprocessor.AggregateImplementation");
-            admin.createTable(desc);
-        }
-
-    }
-
-    private void setupTSO() throws IOException, InterruptedException {
-        tso = injector.getInstance(TSOServer.class);
-        tso.startAndWait();
-        TestUtils.waitForSocketListening("localhost", 5678, 100);
-        Thread.currentThread().setName("UnitTest(s) thread");
-    }
-
-    @AfterClass
-    public void cleanupTestSnapshotFilter() throws Exception {
-        teardownTSO();
-        hbaseCluster.shutdown();
-    }
-
-    private void teardownTSO() throws IOException, InterruptedException {
-        tso.stopAndWait();
-        TestUtils.waitForSocketNotListening("localhost", 5678, 1000);
-    }
-
-    @BeforeMethod
-    public void setupTestSnapshotFilterIndividualTest() throws Exception {
-        tm = spy((AbstractTransactionManager) newTransactionManager());
-    }
-
-    private TransactionManager newTransactionManager() throws Exception {
-        HBaseOmidClientConfiguration hbaseOmidClientConf = new HBaseOmidClientConfiguration();
-        hbaseOmidClientConf.setConnectionString("localhost:5678");
-        hbaseOmidClientConf.setHBaseConfiguration(hbaseConf);
-        CommitTable.Client commitTableClient = commitTable.getClient();
-        syncPostCommitter =
-                spy(new HBaseSyncPostCommitter(new NullMetricsProvider(),commitTableClient));
-        return HBaseTransactionManager.builder(hbaseOmidClientConf)
-                .postCommitter(syncPostCommitter)
-                .commitTableClient(commitTableClient)
-                .build();
-    }
-
-    @Test(timeOut = 60_000)
-    public void testGetFirstResult() throws Throwable {
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] famName1 = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-
-        String TEST_TABLE = "testGetFirstResult";
-        createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY));
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        Transaction tx1 = tm.begin();
-
-        Put row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue1);
-        tt.put(tx1, row1);
-     
-        tm.commit(tx1);
-
-        Transaction tx2 = tm.begin();
-
-        Get get = new Get(rowName1);
-        Result result = tt.get(tx2, get);
-
-        assertTrue(!result.isEmpty(), "Result should not be empty!");
-
-        long tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx1.getTransactionId(), "Reading differnt version");
-
-        tm.commit(tx2);
-
-        Transaction tx3 = tm.begin();
-
-        Put put3 = new Put(rowName1);
-        put3.add(famName1, colName1, dataValue1);
-        tt.put(tx3, put3);
-
-        tm.commit(tx3);
-        
-        Transaction tx4 = tm.begin();
-
-        Get get2 = new Get(rowName1);
-        Result result2 = tt.get(tx4, get2);
-
-        assertTrue(!result2.isEmpty(), "Result should not be empty!");
-
-        long tsRow2 = result2.rawCells()[0].getTimestamp();
-        assertEquals(tsRow2, tx3.getTransactionId(), "Reading differnt version");
-
-        tm.commit(tx4);
-
-        tt.close();
-    }
-
-    @Test(timeOut = 60_000)
-    public void testGetSecondResult() throws Throwable {
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] famName1 = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-
-        String TEST_TABLE = "testGetFirstResult";
-        createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY));
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        Transaction tx1 = tm.begin();
-
-        Put put1 = new Put(rowName1);
-        put1.add(famName1, colName1, dataValue1);
-        tt.put(tx1, put1);
-        
-        tm.commit(tx1);
-
-        Transaction tx2 = tm.begin();
-        Put put2 = new Put(rowName1);
-        put2.add(famName1, colName1, dataValue1);
-        tt.put(tx2, put2);
-        
-        Transaction tx3 = tm.begin();
-
-        Get get = new Get(rowName1);
-        Result result = tt.get(tx3, get);
-
-        assertTrue(!result.isEmpty(), "Result should not be empty!");
-
-        long tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx1.getTransactionId(), "Reading differnt version");
-
-        tm.commit(tx3);
-
-        tt.close();
-    }
-
-    @Test(timeOut = 60_000)
-    public void testScanFirstResult() throws Throwable {
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] famName1 = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-
-        String TEST_TABLE = "testGetFirstResult";
-        createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY));
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        Transaction tx1 = tm.begin();
-
-        Put row1 = new Put(rowName1);
-        row1.add(famName1, colName1, dataValue1);
-        tt.put(tx1, row1);
-
-        tm.commit(tx1);
-
-        Transaction tx2 = tm.begin();
-
-        ResultScanner iterableRS = tt.getScanner(tx2, new Scan().setStartRow(rowName1).setStopRow(rowName1));
-        Result result = iterableRS.next();
-        long tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx1.getTransactionId(), "Reading differnt version");
-
-        assertFalse(iterableRS.next() != null);
-
-        tm.commit(tx2);
-
-        Transaction tx3 = tm.begin();
-
-        Put put3 = new Put(rowName1);
-        put3.add(famName1, colName1, dataValue1);
-        tt.put(tx3, put3);
-
-        tm.commit(tx3);
-
-        Transaction tx4 = tm.begin();
-
-        ResultScanner iterableRS2 = tt.getScanner(tx4, new Scan().setStartRow(rowName1).setStopRow(rowName1));
-        Result result2 = iterableRS2.next();
-        long tsRow2 = result2.rawCells()[0].getTimestamp();
-        assertEquals(tsRow2, tx3.getTransactionId(), "Reading differnt version");
-
-        assertFalse(iterableRS2.next() != null);
-
-        tm.commit(tx4);
-
-        tt.close();
-    }
-
-    @Test(timeOut = 60_000)
-    public void testScanSecondResult() throws Throwable {
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] famName1 = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-
-        String TEST_TABLE = "testGetFirstResult";
-        createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY));
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        Transaction tx1 = tm.begin();
-
-        Put put1 = new Put(rowName1);
-        put1.add(famName1, colName1, dataValue1);
-        tt.put(tx1, put1);
-
-        tm.commit(tx1);
-
-        Transaction tx2 = tm.begin();
-
-        Put put2 = new Put(rowName1);
-        put2.add(famName1, colName1, dataValue1);
-        tt.put(tx2, put2);
-
-        Transaction tx3 = tm.begin();
-
-        ResultScanner iterableRS = tt.getScanner(tx3, new Scan().setStartRow(rowName1).setStopRow(rowName1));
-        Result result = iterableRS.next();
-        long tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx1.getTransactionId(), "Reading differnt version");
-
-        assertFalse(iterableRS.next() != null);
-
-        tm.commit(tx3);
-
-        tt.close();
-    }
-
-    @Test (timeOut = 60_000)
-    public void testScanFewResults() throws Throwable {
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] rowName2 = Bytes.toBytes("row2");
-        byte[] rowName3 = Bytes.toBytes("row3");
-        byte[] famName = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] colName2 = Bytes.toBytes("col2");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-        byte[] dataValue2 = Bytes.toBytes("testWrite-2");
-
-        String TEST_TABLE = "testGetFirstResult";
-        createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY));
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        Transaction tx1 = tm.begin();
-
-        Put put1 = new Put(rowName1);
-        put1.add(famName, colName1, dataValue1);
-        tt.put(tx1, put1);
-
-        tm.commit(tx1);
-
-        Transaction tx2 = tm.begin();
-
-        Put put2 = new Put(rowName2);
-        put2.add(famName, colName2, dataValue2);
-        tt.put(tx2, put2);
-
-        tm.commit(tx2);
-
-        Transaction tx3 = tm.begin();
-
-        ResultScanner iterableRS = tt.getScanner(tx3, new Scan().setStartRow(rowName1).setStopRow(rowName3));
-        Result result = iterableRS.next();
-        long tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx1.getTransactionId(), "Reading differnt version");
-
-        result = iterableRS.next();
-        tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx2.getTransactionId(), "Reading differnt version");
-
-        assertFalse(iterableRS.next() != null);
-
-        tm.commit(tx3);
-
-        tt.close();
-    }
-
-    @Test (timeOut = 60_000)
-    public void testScanFewResultsDifferentTransaction() throws Throwable {
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] rowName2 = Bytes.toBytes("row2");
-        byte[] rowName3 = Bytes.toBytes("row3");
-        byte[] famName = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] colName2 = Bytes.toBytes("col2");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-        byte[] dataValue2 = Bytes.toBytes("testWrite-2");
-
-        String TEST_TABLE = "testGetFirstResult";
-        createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY));
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        Transaction tx1 = tm.begin();
-
-        Put put1 = new Put(rowName1);
-        put1.add(famName, colName1, dataValue1);
-        tt.put(tx1, put1);
-        Put put2 = new Put(rowName2);
-        put2.add(famName, colName2, dataValue2);
-        tt.put(tx1, put2);
-
-        tm.commit(tx1);
-
-        Transaction tx2 = tm.begin();
-
-        put2 = new Put(rowName2);
-        put2.add(famName, colName2, dataValue2);
-        tt.put(tx2, put2);
-
-        tm.commit(tx2);
-
-        Transaction tx3 = tm.begin();
-
-        ResultScanner iterableRS = tt.getScanner(tx3, new Scan().setStartRow(rowName1).setStopRow(rowName3));
-        Result result = iterableRS.next();
-        long tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx1.getTransactionId(), "Reading differnt version");
-
-        result = iterableRS.next();
-        tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx2.getTransactionId(), "Reading differnt version");
-
-        assertFalse(iterableRS.next() != null);
-
-        tm.commit(tx3);
-
-        tt.close();
-    }
-
-    @Test (timeOut = 60_000)
-    public void testScanFewResultsSameTransaction() throws Throwable {
-
-        byte[] rowName1 = Bytes.toBytes("row1");
-        byte[] rowName2 = Bytes.toBytes("row2");
-        byte[] rowName3 = Bytes.toBytes("row3");
-        byte[] famName = Bytes.toBytes(TEST_FAMILY);
-        byte[] colName1 = Bytes.toBytes("col1");
-        byte[] colName2 = Bytes.toBytes("col2");
-        byte[] dataValue1 = Bytes.toBytes("testWrite-1");
-        byte[] dataValue2 = Bytes.toBytes("testWrite-2");
-
-        String TEST_TABLE = "testGetFirstResult";
-        createTableIfNotExists(TEST_TABLE, Bytes.toBytes(TEST_FAMILY));
-        TTable tt = new TTable(hbaseConf, TEST_TABLE);
-
-        Transaction tx1 = tm.begin();
-
-        Put put1 = new Put(rowName1);
-        put1.add(famName, colName1, dataValue1);
-        tt.put(tx1, put1);
-        Put put2 = new Put(rowName2);
-        put2.add(famName, colName2, dataValue2);
-        tt.put(tx1, put2);
-
-        tm.commit(tx1);
-
-        Transaction tx2 = tm.begin();
-
-        put2 = new Put(rowName2);
-        put2.add(famName, colName2, dataValue2);
-        tt.put(tx2, put2);
-
-        Transaction tx3 = tm.begin();
-
-        ResultScanner iterableRS = tt.getScanner(tx3, new Scan().setStartRow(rowName1).setStopRow(rowName3));
-        Result result = iterableRS.next();
-        long tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx1.getTransactionId(), "Reading differnt version");
-
-        result = iterableRS.next();
-        tsRow = result.rawCells()[0].getTimestamp();
-        assertEquals(tsRow, tx1.getTransactionId(), "Reading differnt version");
-
-        assertFalse(iterableRS.next() != null);
-
-        tm.commit(tx3);
-
-        tt.close();
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/hbase-shims/hbase-0/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git a/hbase-shims/hbase-0/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-shims/hbase-0/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index a419a1d..23742b6 100644
--- a/hbase-shims/hbase-0/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-shims/hbase-0/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.regionserver;
 
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 
 import java.io.IOException;
@@ -40,12 +39,6 @@ public class Region {
 
     }
 
-    void put(Put putOperation) throws IOException {
-
-        hRegion.put(putOperation);
-
-    }
-
     HRegionInfo getRegionInfo() {
 
         return hRegion.getRegionInfo();

http://git-wip-us.apache.org/repos/asf/incubator-omid/blob/7a9d7d6f/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransaction.java
----------------------------------------------------------------------
diff --git a/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransaction.java b/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransaction.java
index b22f024..500c1e2 100644
--- a/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransaction.java
+++ b/transaction-client/src/main/java/org/apache/omid/transaction/AbstractTransaction.java
@@ -18,7 +18,6 @@
 package org.apache.omid.transaction;
 
 import com.google.common.base.Optional;
-
 import org.apache.omid.tso.client.CellId;
 
 import java.util.ArrayList;
@@ -38,48 +37,14 @@ import java.util.Set;
  */
 public abstract class AbstractTransaction<T extends CellId> implements Transaction {
 
-    enum VisibilityLevel {
-        // Regular snapshot isolation. Returns the last key, either from the snapshot or from the current transaction
-        // Sets the readTimestamp to be the writeTimestamp
-        SNAPSHOT,
-        // Returns all the written version of a key X that written by the transaction and the key X from the provided snapshot.
-        SNAPSHOT_ALL,
-        // Returns the last key, either from the snapshot or from the current transaction that was written before the last checkpoint.
-        // Sets the readTimestamp to be the writeTimestamp - 1
-        SNAPSHOT_EXCLUDE_CURRENT;
-
-        public static VisibilityLevel fromInteger(int number) {
-            VisibilityLevel visibilityLevel = SNAPSHOT;
-
-            switch (number) {
-            case 0:
-                visibilityLevel = VisibilityLevel.SNAPSHOT;
-                break;
-            case 1:
-                visibilityLevel =  VisibilityLevel.SNAPSHOT_ALL;
-                break;
-            case 2:
-                visibilityLevel = VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
-                break;
-                default:
-                    assert(false);
-            }
-
-            return visibilityLevel;
-        }
-    }
-
     private transient Map<String, Object> metadata = new HashMap<>();
     private final AbstractTransactionManager transactionManager;
     private final long startTimestamp;
-    protected long readTimestamp;
-    protected long writeTimestamp;
     private final long epoch;
     private long commitTimestamp;
     private boolean isRollbackOnly;
     private final Set<T> writeSet;
     private Status status = Status.RUNNING;
-    private VisibilityLevel visibilityLevel;
 
     /**
      * Base constructor
@@ -101,37 +66,10 @@ public abstract class AbstractTransaction<T extends CellId> implements Transacti
                                long epoch,
                                Set<T> writeSet,
                                AbstractTransactionManager transactionManager) {
-        this(transactionId, transactionId, VisibilityLevel.SNAPSHOT, epoch, writeSet, transactionManager);
-    }
-
-    public AbstractTransaction(long transactionId,
-            long readTimestamp,
-            VisibilityLevel visibilityLevel,
-            long epoch,
-            Set<T> writeSet,
-            AbstractTransactionManager transactionManager) {
-        this.startTimestamp = this.writeTimestamp = transactionId;
-        this.readTimestamp = readTimestamp;
+        this.startTimestamp = transactionId;
         this.epoch = epoch;
         this.writeSet = writeSet;
         this.transactionManager = transactionManager;
-        this.visibilityLevel = visibilityLevel;
-    }
-
-    /**
-     * Creates a checkpoint and sets the visibility level to SNAPSHOT_EXCLUDE_CURRENT
-     * The number of checkpoints is bounded to NUM_CHECKPOINTS in order to make checkpoint a client side operation
-     * @return true if a checkpoint was created and false otherwise
-     * @throws TransactionException
-     */
-    void checkpoint() throws TransactionException {
-
-        setVisibilityLevel(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
-        this.readTimestamp = this.writeTimestamp++;
-
-        if (this.writeTimestamp % AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN == 0) {
-            throw new TransactionException("Error: number of checkpoing cannot exceed " + (AbstractTransactionManager.MAX_CHECKPOINTS_PER_TXN - 1));
-        }
     }
 
     /**
@@ -196,22 +134,6 @@ public abstract class AbstractTransaction<T extends CellId> implements Transacti
     }
 
     /**
-     * Returns the read timestamp for this transaction.
-     * @return read timestamp
-     */
-    public long getReadTimestamp() {
-        return readTimestamp;
-    }
-
-    /**
-     * Returns the write timestamp for this transaction.
-     * @return write timestamp
-     */
-    public long getWriteTimestamp() {
-        return writeTimestamp;
-    }
-
-    /**
      * Returns the commit timestamp for this transaction.
      * @return commit timestamp
      */
@@ -220,14 +142,6 @@ public abstract class AbstractTransaction<T extends CellId> implements Transacti
     }
 
     /**
-     * Returns the visibility level for this transaction.
-     * @return visibility level
-     */
-    public VisibilityLevel getVisibilityLevel() {
-        return visibilityLevel;
-    }
-
-    /**
      * Sets the commit timestamp for this transaction.
      * @param commitTimestamp
      *            the commit timestamp to set
@@ -237,22 +151,6 @@ public abstract class AbstractTransaction<T extends CellId> implements Transacti
     }
 
     /**
-     * Sets the visibility level for this transaction.
-     * @param visibilityLevel
-     *            the {@link VisibilityLevel} to set
-     */
-    public void setVisibilityLevel(VisibilityLevel visibilityLevel) {
-        this.visibilityLevel = visibilityLevel;
-
-        // If we are setting visibility level to either SNAPSHOT or SNAPSHOT_ALL
-        // then we should let readTimestamp equals to writeTimestamp
-        if (this.visibilityLevel == VisibilityLevel.SNAPSHOT ||
-            this.visibilityLevel == VisibilityLevel.SNAPSHOT_ALL) {
-            this.readTimestamp = this.writeTimestamp;
-        }
-    }
-
-    /**
      * Sets the status for this transaction.
      * @param status
      *            the {@link Status} to set
@@ -280,12 +178,10 @@ public abstract class AbstractTransaction<T extends CellId> implements Transacti
 
     @Override
     public String toString() {
-        return String.format("Tx-%s [%s] (ST=%d, RT=%d, WT=%d, CT=%d, Epoch=%d) WriteSet %s",
+        return String.format("Tx-%s [%s] (ST=%d, CT=%d, Epoch=%d) WriteSet %s",
                              Long.toHexString(getTransactionId()),
                              status,
                              startTimestamp,
-                             readTimestamp,
-                             writeTimestamp,
                              commitTimestamp,
                              epoch,
                              writeSet);