You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jm...@apache.org on 2015/10/29 19:16:15 UTC

[3/8] hbase git commit: HBASE-14675 Exorcise deprecated Put#add(...) and replace with Put#addColumn(...)

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
index 49f36d6..7b48783 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
@@ -386,7 +386,7 @@ public class TestAtomicOperation {
               RowMutations rm = new RowMutations(row);
               if (op) {
                 Put p = new Put(row, ts);
-                p.add(fam1, qual1, value1);
+                p.addColumn(fam1, qual1, value1);
                 p.setDurability(Durability.ASYNC_WAL);
                 rm.add(p);
                 Delete d = new Delete(row);
@@ -399,7 +399,7 @@ public class TestAtomicOperation {
                 d.setDurability(Durability.ASYNC_WAL);
                 rm.add(d);
                 Put p = new Put(row, ts);
-                p.add(fam1, qual2, value2);
+                p.addColumn(fam1, qual2, value2);
                 p.setDurability(Durability.ASYNC_WAL);
                 rm.add(p);
               }
@@ -479,7 +479,7 @@ public class TestAtomicOperation {
               List<Mutation> mrm = new ArrayList<Mutation>();
               if (op) {
                 Put p = new Put(row2, ts);
-                p.add(fam1, qual1, value1);
+                p.addColumn(fam1, qual1, value1);
                 p.setDurability(Durability.ASYNC_WAL);
                 mrm.add(p);
                 Delete d = new Delete(row);
@@ -493,7 +493,7 @@ public class TestAtomicOperation {
                 mrm.add(d);
                 Put p = new Put(row, ts);
                 p.setDurability(Durability.ASYNC_WAL);
-                p.add(fam1, qual1, value2);
+                p.addColumn(fam1, qual1, value2);
                 mrm.add(p);
               }
               region.mutateRowsWithLocks(mrm, rowsToLock, HConstants.NO_NONCE, HConstants.NO_NONCE);
@@ -581,7 +581,7 @@ public class TestAtomicOperation {
     
     Put[] puts = new Put[1];
     Put put = new Put(Bytes.toBytes("r1"));
-    put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
+    put.addColumn(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
     puts[0] = put;
     
     region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
@@ -615,7 +615,7 @@ public class TestAtomicOperation {
     public void doWork() throws Exception {
       Put[] puts = new Put[1];
       Put put = new Put(Bytes.toBytes("r1"));
-      put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("50"));
+      put.addColumn(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("50"));
       puts[0] = put;
       testStep = TestStep.PUT_STARTED;
       region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
@@ -632,7 +632,7 @@ public class TestAtomicOperation {
     public void doWork() throws Exception {
       Put[] puts = new Put[1];
       Put put = new Put(Bytes.toBytes("r1"));
-      put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("11"));
+      put.addColumn(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("11"));
       puts[0] = put;
       while (testStep != TestStep.PUT_COMPLETED) {
         Thread.sleep(100);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
index 900d4ff..b0a43b7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
@@ -127,7 +127,7 @@ public class TestBlocksRead  {
     put.setDurability(Durability.SKIP_WAL);
 
     for (long version = versionStart; version <= versionEnd; version++) {
-      put.add(cf, columnBytes, version, genValue(row, col, version));
+      put.addColumn(cf, columnBytes, version, genValue(row, col, version));
     }
     region.put(put);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
index f809a97..22b2163 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
@@ -222,7 +222,7 @@ public class TestCompactionState {
         byte[] row = Bytes.toBytes(random.nextLong());
         Put p = new Put(row);
         for (int j = 0; j < families.length; ++j) {
-          p.add(families[ j ], qualifier, row);
+          p.addColumn(families[j], qualifier, row);
         }
         puts.add(p);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
index 7d1a39c..54dbe9b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
@@ -88,7 +88,7 @@ public class TestCorruptedRegionStoreFile {
       while (rowCount < NUM_ROWS) {
         Put put = new Put(Bytes.toBytes(String.format("%010d", rowCount)));
         put.setDurability(Durability.SKIP_WAL);
-        put.add(FAMILY_NAME, q, value);
+        put.addColumn(FAMILY_NAME, q, value);
         table.put(put);
 
         if ((rowCount++ % ROW_PER_FILE) == 0) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
index 45a95c4..6c66c6d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
@@ -216,7 +216,7 @@ public class TestEncryptionKeyRotation {
     Table table = TEST_UTIL.getConnection().getTable(htd.getTableName());
     try {
       table.put(new Put(Bytes.toBytes("testrow"))
-        .add(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
+              .addColumn(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
     } finally {
       table.close();
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
index 0a6b2b5..ad7cf2f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
@@ -107,7 +107,7 @@ public class TestEncryptionRandomKeying {
     Table table = TEST_UTIL.getConnection().getTable(htd.getTableName());
     try {
       table.put(new Put(Bytes.toBytes("testrow"))
-        .add(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
+              .addColumn(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
     } finally {
       table.close();
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 06517d7..9b82cc5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -287,13 +287,13 @@ public class TestHRegion {
     byte [] value = Bytes.toBytes(name.getMethodName());
     // Make a random put against our cf.
     Put put = new Put(value);
-    put.add(COLUMN_FAMILY_BYTES, null, value);
+    put.addColumn(COLUMN_FAMILY_BYTES, null, value);
     // First put something in current memstore, which will be in snapshot after flusher.prepare()
     region.put(put);
     StoreFlushContext storeFlushCtx = store.createFlushContext(12345);
     storeFlushCtx.prepare();
     // Second put something in current memstore
-    put.add(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
+    put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
     region.put(put);
     // Close with something in memstore and something in the snapshot.  Make sure all is cleared.
     region.close();
@@ -339,7 +339,7 @@ public class TestHRegion {
     faultyLog.setStoreFlushCtx(store.createFlushContext(12345));
 
     Put put = new Put(value);
-    put.add(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
+    put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
     faultyLog.setFailureType(FaultyFSLog.FailureType.SYNC);
 
     boolean threwIOE = false;
@@ -388,7 +388,7 @@ public class TestHRegion {
     // Put some value and make sure flush could be completed normally
     byte [] value = Bytes.toBytes(name.getMethodName());
     Put put = new Put(value);
-    put.add(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
+    put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
     region.put(put);
     long onePutSize = region.getMemstoreSize();
     assertTrue(onePutSize > 0);
@@ -457,7 +457,7 @@ public class TestHRegion {
           Assert.assertEquals(0, size);
           // Put one item into memstore.  Measure the size of one item in memstore.
           Put p1 = new Put(row);
-          p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[])null));
+          p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null));
           region.put(p1);
           final long sizeOfOnePut = region.getMemstoreSize();
           // Fail a flush which means the current memstore will hang out as memstore 'snapshot'.
@@ -556,7 +556,7 @@ public class TestHRegion {
     this.region = initHRegion(tableName, method, CONF, family);
 
     Put put = new Put(Bytes.toBytes("r1"));
-    put.add(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
+    put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
     region.put(put);
     region.flush(true);
 
@@ -604,10 +604,10 @@ public class TestHRegion {
     this.region = initHRegion(tableName, method, CONF, family);
 
     Put put = new Put(Bytes.toBytes("r1"));
-    put.add(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
+    put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
     region.put(put);
     put = new Put(Bytes.toBytes("r2"));
-    put.add(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
+    put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
     region.put(put);
     region.flush(true);
 
@@ -853,7 +853,7 @@ public class TestHRegion {
 
       for (long i = minSeqId; i < maxSeqId; i++) {
         Put put = new Put(Bytes.toBytes(i));
-        put.add(family, Bytes.toBytes(i), Bytes.toBytes(i));
+        put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
         region.put(put);
         region.flush(true);
       }
@@ -955,7 +955,7 @@ public class TestHRegion {
 
       for (long i = minSeqId; i < maxSeqId; i++) {
         Put put = new Put(Bytes.toBytes(i));
-        put.add(family, Bytes.toBytes(i), Bytes.toBytes(i));
+        put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
         region.put(put);
         region.flush(true);
       }
@@ -1152,7 +1152,7 @@ public class TestHRegion {
       int i = 0;
       Put put = new Put(Bytes.toBytes(i));
       put.setDurability(Durability.SKIP_WAL); // have to skip mocked wal
-      put.add(family, Bytes.toBytes(i), Bytes.toBytes(i));
+      put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
       region.put(put);
 
       // 1. Test case where START_FLUSH throws exception
@@ -1440,11 +1440,11 @@ public class TestHRegion {
       System.out.println(String.format("Saving row: %s, with value %s", row, value));
       Put put = new Put(Bytes.toBytes(row));
       put.setDurability(Durability.SKIP_WAL);
-      put.add(Bytes.toBytes("trans-blob"), null, Bytes.toBytes("value for blob"));
-      put.add(Bytes.toBytes("trans-type"), null, Bytes.toBytes("statement"));
-      put.add(Bytes.toBytes("trans-date"), null, Bytes.toBytes("20090921010101999"));
-      put.add(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"), Bytes.toBytes(value));
-      put.add(Bytes.toBytes("trans-group"), null, Bytes.toBytes("adhocTransactionGroupId"));
+      put.addColumn(Bytes.toBytes("trans-blob"), null, Bytes.toBytes("value for blob"));
+      put.addColumn(Bytes.toBytes("trans-type"), null, Bytes.toBytes("statement"));
+      put.addColumn(Bytes.toBytes("trans-date"), null, Bytes.toBytes("20090921010101999"));
+      put.addColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"), Bytes.toBytes(value));
+      put.addColumn(Bytes.toBytes("trans-group"), null, Bytes.toBytes("adhocTransactionGroupId"));
       r.put(put);
     }
   }
@@ -1457,7 +1457,7 @@ public class TestHRegion {
     try {
       Put p = new Put(b.toBytes());
       byte[] cfwithcolon = Bytes.toBytes(COLUMN_FAMILY + ":");
-      p.add(cfwithcolon, cfwithcolon, cfwithcolon);
+      p.addColumn(cfwithcolon, cfwithcolon, cfwithcolon);
       boolean exception = false;
       try {
         this.region.put(p);
@@ -1486,7 +1486,7 @@ public class TestHRegion {
       final Put[] puts = new Put[10];
       for (int i = 0; i < 10; i++) {
         puts[i] = new Put(Bytes.toBytes("row_" + i));
-        puts[i].add(cf, qual, val);
+        puts[i].addColumn(cf, qual, val);
       }
 
       OperationStatus[] codes = this.region.batchMutate(puts);
@@ -1497,7 +1497,7 @@ public class TestHRegion {
       metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 1, source);
 
       LOG.info("Next a batch put with one invalid family");
-      puts[5].add(Bytes.toBytes("BAD_CF"), qual, val);
+      puts[5].addColumn(Bytes.toBytes("BAD_CF"), qual, val);
       codes = this.region.batchMutate(puts);
       assertEquals(10, codes.length);
       for (int i = 0; i < 10; i++) {
@@ -1526,9 +1526,9 @@ public class TestHRegion {
       final Put[] puts = new Put[10];
       for (int i = 0; i < 10; i++) {
         puts[i] = new Put(Bytes.toBytes("row_" + i));
-        puts[i].add(cf, qual, val);
+        puts[i].addColumn(cf, qual, val);
       }
-      puts[5].add(Bytes.toBytes("BAD_CF"), qual, val);
+      puts[5].addColumn(Bytes.toBytes("BAD_CF"), qual, val);
 
       LOG.info("batchPut will have to break into four batches to avoid row locks");
       RowLock rowLock1 = region.getRowLock(Bytes.toBytes("row_2"));
@@ -1633,7 +1633,7 @@ public class TestHRegion {
       final Put[] puts = new Put[10];
       for (int i = 0; i < 10; i++) {
         puts[i] = new Put(Bytes.toBytes("row_" + i), Long.MAX_VALUE - 100);
-        puts[i].add(cf, qual, val);
+        puts[i].addColumn(cf, qual, val);
       }
 
       OperationStatus[] codes = this.region.batchMutate(puts);
@@ -1668,7 +1668,7 @@ public class TestHRegion {
     try {
       // Putting empty data in key
       Put put = new Put(row1);
-      put.add(fam1, qf1, emptyVal);
+      put.addColumn(fam1, qf1, emptyVal);
 
       // checkAndPut with empty value
       boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(
@@ -1677,7 +1677,7 @@ public class TestHRegion {
 
       // Putting data in key
       put = new Put(row1);
-      put.add(fam1, qf1, val1);
+      put.addColumn(fam1, qf1, val1);
 
       // checkAndPut with correct value
       res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(emptyVal),
@@ -1696,7 +1696,7 @@ public class TestHRegion {
       assertFalse(res);
 
       put = new Put(row1);
-      put.add(fam1, qf1, val2);
+      put.addColumn(fam1, qf1, val2);
       // checkAndPut with correct value
       res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val1),
           put, true);
@@ -1717,7 +1717,7 @@ public class TestHRegion {
 
       // checkAndPut looking for a null value
       put = new Put(row1);
-      put.add(fam1, qf1, val1);
+      put.addColumn(fam1, qf1, val1);
 
       res = region
           .checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new NullComparator(), put, true);
@@ -1742,7 +1742,7 @@ public class TestHRegion {
     try {
       // Putting data in key
       Put put = new Put(row1);
-      put.add(fam1, qf1, val1);
+      put.addColumn(fam1, qf1, val1);
       region.put(put);
 
       // checkAndPut with wrong value
@@ -1775,7 +1775,7 @@ public class TestHRegion {
     try {
       // Putting data in key
       Put put = new Put(row1);
-      put.add(fam1, qf1, val1);
+      put.addColumn(fam1, qf1, val1);
       region.put(put);
 
       // checkAndPut with correct value
@@ -1811,7 +1811,7 @@ public class TestHRegion {
     try {
       // Putting val3 in key
       Put put = new Put(row1);
-      put.add(fam1, qf1, val3);
+      put.addColumn(fam1, qf1, val3);
       region.put(put);
 
       // Test CompareOp.LESS: original = val3, compare with val3, fail
@@ -1827,7 +1827,7 @@ public class TestHRegion {
       // Test CompareOp.LESS: original = val3, compare with val2,
       // succeed (now value = val2)
       put = new Put(row1);
-      put.add(fam1, qf1, val2);
+      put.addColumn(fam1, qf1, val2);
       res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS,
           new BinaryComparator(val2), put, true);
       assertEquals(true, res);
@@ -1846,7 +1846,7 @@ public class TestHRegion {
       // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val1,
       // succeed (now value = val3)
       put = new Put(row1);
-      put.add(fam1, qf1, val3);
+      put.addColumn(fam1, qf1, val3);
       res = region.checkAndMutate(row1, fam1, qf1, CompareOp.LESS_OR_EQUAL,
           new BinaryComparator(val1), put, true);
       assertEquals(true, res);
@@ -1864,7 +1864,7 @@ public class TestHRegion {
       // Test CompareOp.GREATER: original = val3, compare with val4,
       // succeed (now value = val2)
       put = new Put(row1);
-      put.add(fam1, qf1, val2);
+      put.addColumn(fam1, qf1, val2);
       res = region.checkAndMutate(row1, fam1, qf1, CompareOp.GREATER,
           new BinaryComparator(val4), put, true);
       assertEquals(true, res);
@@ -1907,7 +1907,7 @@ public class TestHRegion {
     try {
       // Putting data in the key to check
       Put put = new Put(row1);
-      put.add(fam1, qf1, val1);
+      put.addColumn(fam1, qf1, val1);
       region.put(put);
 
       // Creating put to add
@@ -1943,7 +1943,7 @@ public class TestHRegion {
     this.region = initHRegion(tableName, this.getName(), CONF, COLUMNS);
     try {
       Put put = new Put(row2);
-      put.add(fam1, qual1, value1);
+      put.addColumn(fam1, qual1, value1);
       try {
         region.checkAndMutate(row, fam1, qual1, CompareOp.EQUAL,
             new BinaryComparator(value2), put, false);
@@ -1978,16 +1978,16 @@ public class TestHRegion {
     try {
       // Put content
       Put put = new Put(row1);
-      put.add(fam1, qf1, val1);
+      put.addColumn(fam1, qf1, val1);
       region.put(put);
       Threads.sleep(2);
 
       put = new Put(row1);
-      put.add(fam1, qf1, val2);
-      put.add(fam2, qf1, val3);
-      put.add(fam2, qf2, val2);
-      put.add(fam2, qf3, val1);
-      put.add(fam1, qf3, val1);
+      put.addColumn(fam1, qf1, val2);
+      put.addColumn(fam2, qf1, val3);
+      put.addColumn(fam2, qf2, val2);
+      put.addColumn(fam2, qf3, val1);
+      put.addColumn(fam1, qf3, val1);
       region.put(put);
 
       // Multi-column delete
@@ -2045,8 +2045,8 @@ public class TestHRegion {
     byte[] value = Bytes.toBytes("value");
 
     Put put = new Put(row1);
-    put.add(fam1, qual, 1, value);
-    put.add(fam1, qual, 2, value);
+    put.addColumn(fam1, qual, (long) 1, value);
+    put.addColumn(fam1, qual, (long) 2, value);
 
     String method = this.getName();
     this.region = initHRegion(tableName, method, CONF, fam1);
@@ -2130,15 +2130,15 @@ public class TestHRegion {
 
       // add some data:
       Put put = new Put(row);
-      put.add(fam, splitA, Bytes.toBytes("reference_A"));
+      put.addColumn(fam, splitA, Bytes.toBytes("reference_A"));
       region.put(put);
 
       put = new Put(row);
-      put.add(fam, splitB, Bytes.toBytes("reference_B"));
+      put.addColumn(fam, splitB, Bytes.toBytes("reference_B"));
       region.put(put);
 
       put = new Put(row);
-      put.add(fam, serverinfo, Bytes.toBytes("ip_address"));
+      put.addColumn(fam, serverinfo, Bytes.toBytes("ip_address"));
       region.put(put);
 
       // ok now delete a split:
@@ -2161,7 +2161,7 @@ public class TestHRegion {
 
       // Assert that after a delete, I can put.
       put = new Put(row);
-      put.add(fam, splitA, Bytes.toBytes("reference_A"));
+      put.addColumn(fam, splitA, Bytes.toBytes("reference_A"));
       region.put(put);
       get = new Get(row);
       result = region.get(get);
@@ -2172,7 +2172,7 @@ public class TestHRegion {
       region.delete(delete);
       assertEquals(0, region.get(get).size());
 
-      region.put(new Put(row).add(fam, splitA, Bytes.toBytes("reference_A")));
+      region.put(new Put(row).addColumn(fam, splitA, Bytes.toBytes("reference_A")));
       result = region.get(get);
       assertEquals(1, result.size());
     } finally {
@@ -2194,7 +2194,7 @@ public class TestHRegion {
 
       // add data in the far future
       Put put = new Put(row);
-      put.add(fam, serverinfo, HConstants.LATEST_TIMESTAMP - 5, Bytes.toBytes("value"));
+      put.addColumn(fam, serverinfo, HConstants.LATEST_TIMESTAMP - 5, Bytes.toBytes("value"));
       region.put(put);
 
       // now delete something in the present
@@ -2237,7 +2237,7 @@ public class TestHRegion {
 
       // add data with LATEST_TIMESTAMP, put without WAL
       Put put = new Put(row);
-      put.add(fam, qual, HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value"));
+      put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value"));
       region.put(put);
 
       // Make sure it shows up with an actual timestamp
@@ -2253,7 +2253,7 @@ public class TestHRegion {
       // code paths, so check both)
       row = Bytes.toBytes("row2");
       put = new Put(row);
-      put.add(fam, qual, HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value"));
+      put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value"));
       region.put(put);
 
       // Make sure it shows up with an actual timestamp
@@ -2289,10 +2289,10 @@ public class TestHRegion {
     try {
       try {
         // no TS specified == use latest. should not error
-        region.put(new Put(row).add(fam, Bytes.toBytes("qual"), Bytes.toBytes("value")));
+        region.put(new Put(row).addColumn(fam, Bytes.toBytes("qual"), Bytes.toBytes("value")));
         // TS out of range. should error
-        region.put(new Put(row).add(fam, Bytes.toBytes("qual"), System.currentTimeMillis() + 2000,
-            Bytes.toBytes("value")));
+        region.put(new Put(row).addColumn(fam, Bytes.toBytes("qual"),
+            System.currentTimeMillis() + 2000, Bytes.toBytes("value")));
         fail("Expected IOE for TS out of configured timerange");
       } catch (FailedSanityCheckException ioe) {
         LOG.debug("Received expected exception", ioe);
@@ -2323,12 +2323,12 @@ public class TestHRegion {
 
       // now create data.
       Put put = new Put(rowA);
-      put.add(fam2, null, value);
+      put.addColumn(fam2, null, value);
       region.put(put);
 
       put = new Put(rowB);
-      put.add(fam1, null, value);
-      put.add(fam2, null, value);
+      put.addColumn(fam1, null, value);
+      put.addColumn(fam2, null, value);
       region.put(put);
 
       Scan scan = new Scan();
@@ -2367,7 +2367,7 @@ public class TestHRegion {
     try {
       EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
       Put put = new Put(row);
-      put.add(fam1, qual1, value1);
+      put.addColumn(fam1, qual1, value1);
       region.put(put);
 
       // now delete the value:
@@ -2375,7 +2375,7 @@ public class TestHRegion {
 
       // ok put data:
       put = new Put(row);
-      put.add(fam1, qual1, value2);
+      put.addColumn(fam1, qual1, value2);
       region.put(put);
 
       // ok get:
@@ -2493,11 +2493,11 @@ public class TestHRegion {
     try {
       // Add to memstore
       Put put = new Put(row1);
-      put.add(fam1, col1, null);
-      put.add(fam1, col2, null);
-      put.add(fam1, col3, null);
-      put.add(fam1, col4, null);
-      put.add(fam1, col5, null);
+      put.addColumn(fam1, col1, null);
+      put.addColumn(fam1, col2, null);
+      put.addColumn(fam1, col3, null);
+      put.addColumn(fam1, col4, null);
+      put.addColumn(fam1, col5, null);
       region.put(put);
 
       Get get = new Get(row1);
@@ -2708,10 +2708,10 @@ public class TestHRegion {
 
       // Putting data in Region
       Put put = new Put(row1);
-      put.add(fam1, null, null);
-      put.add(fam2, null, null);
-      put.add(fam3, null, null);
-      put.add(fam4, null, null);
+      put.addColumn(fam1, null, null);
+      put.addColumn(fam2, null, null);
+      put.addColumn(fam3, null, null);
+      put.addColumn(fam4, null, null);
       region.put(put);
 
       Scan scan = null;
@@ -2791,17 +2791,17 @@ public class TestHRegion {
       // Putting data in Region
       Put put = null;
       put = new Put(row1);
-      put.add(fam1, (byte[]) null, ts, null);
-      put.add(fam2, (byte[]) null, ts, null);
-      put.add(fam3, (byte[]) null, ts, null);
-      put.add(fam4, (byte[]) null, ts, null);
+      put.addColumn(fam1, (byte[]) null, ts, null);
+      put.addColumn(fam2, (byte[]) null, ts, null);
+      put.addColumn(fam3, (byte[]) null, ts, null);
+      put.addColumn(fam4, (byte[]) null, ts, null);
       region.put(put);
 
       put = new Put(row2);
-      put.add(fam1, (byte[]) null, ts, null);
-      put.add(fam2, (byte[]) null, ts, null);
-      put.add(fam3, (byte[]) null, ts, null);
-      put.add(fam4, (byte[]) null, ts, null);
+      put.addColumn(fam1, (byte[]) null, ts, null);
+      put.addColumn(fam2, (byte[]) null, ts, null);
+      put.addColumn(fam3, (byte[]) null, ts, null);
+      put.addColumn(fam4, (byte[]) null, ts, null);
       region.put(put);
 
       Scan scan = new Scan();
@@ -3179,23 +3179,23 @@ public class TestHRegion {
       byte[] col2 = Bytes.toBytes("Pub222");
 
       Put put = new Put(row1);
-      put.add(family, col1, Bytes.toBytes(10L));
+      put.addColumn(family, col1, Bytes.toBytes(10L));
       region.put(put);
 
       put = new Put(row2);
-      put.add(family, col1, Bytes.toBytes(15L));
+      put.addColumn(family, col1, Bytes.toBytes(15L));
       region.put(put);
 
       put = new Put(row3);
-      put.add(family, col2, Bytes.toBytes(20L));
+      put.addColumn(family, col2, Bytes.toBytes(20L));
       region.put(put);
 
       put = new Put(row4);
-      put.add(family, col2, Bytes.toBytes(30L));
+      put.addColumn(family, col2, Bytes.toBytes(30L));
       region.put(put);
 
       put = new Put(row5);
-      put.add(family, col1, Bytes.toBytes(40L));
+      put.addColumn(family, col1, Bytes.toBytes(40L));
       region.put(put);
 
       Scan scan = new Scan(row3, row4);
@@ -3314,19 +3314,19 @@ public class TestHRegion {
       byte[] filtered_val = Bytes.toBytes(3);
 
       Put put = new Put(row1);
-      put.add(cf_essential, col_normal, Bytes.toBytes(1));
-      put.add(cf_joined, col_alpha, Bytes.toBytes(1));
+      put.addColumn(cf_essential, col_normal, Bytes.toBytes(1));
+      put.addColumn(cf_joined, col_alpha, Bytes.toBytes(1));
       region.put(put);
 
       put = new Put(row2);
-      put.add(cf_essential, col_alpha, Bytes.toBytes(2));
-      put.add(cf_joined, col_normal, Bytes.toBytes(2));
-      put.add(cf_alpha, col_alpha, Bytes.toBytes(2));
+      put.addColumn(cf_essential, col_alpha, Bytes.toBytes(2));
+      put.addColumn(cf_joined, col_normal, Bytes.toBytes(2));
+      put.addColumn(cf_alpha, col_alpha, Bytes.toBytes(2));
       region.put(put);
 
       put = new Put(row3);
-      put.add(cf_essential, col_normal, filtered_val);
-      put.add(cf_joined, col_normal, filtered_val);
+      put.addColumn(cf_essential, col_normal, filtered_val);
+      put.addColumn(cf_joined, col_normal, filtered_val);
       region.put(put);
 
       // Check two things:
@@ -3379,11 +3379,11 @@ public class TestHRegion {
 
       for (int i = 0; i < 10; i++) {
         put = new Put(Bytes.toBytes("r" + Integer.toString(i)));
-        put.add(cf_first, col_a, Bytes.toBytes(i));
+        put.addColumn(cf_first, col_a, Bytes.toBytes(i));
         if (i < 5) {
-          put.add(cf_first, col_b, Bytes.toBytes(i));
-          put.add(cf_second, col_a, Bytes.toBytes(i));
-          put.add(cf_second, col_b, Bytes.toBytes(i));
+          put.addColumn(cf_first, col_b, Bytes.toBytes(i));
+          put.addColumn(cf_second, col_a, Bytes.toBytes(i));
+          put.addColumn(cf_second, col_b, Bytes.toBytes(i));
         }
         region.put(put);
       }
@@ -3717,7 +3717,7 @@ public class TestHRegion {
       for (long i = 0; i < numRows; i++) {
         Put put = new Put(Bytes.toBytes(i));
         put.setDurability(Durability.SKIP_WAL);
-        put.add(family, qual1, Bytes.toBytes(i % 10));
+        put.addColumn(family, qual1, Bytes.toBytes(i % 10));
         region.put(put);
 
         if (i != 0 && i % compactInterval == 0) {
@@ -3951,7 +3951,7 @@ public class TestHRegion {
             byte[] value = Bytes.toBytes(String.valueOf(numPutsFinished));
             for (byte[] family : families) {
               for (byte[] qualifier : qualifiers) {
-                put.add(family, qualifier, (long) numPutsFinished, value);
+                put.addColumn(family, qualifier, (long) numPutsFinished, value);
               }
             }
             region.put(put);
@@ -4128,7 +4128,7 @@ public class TestHRegion {
     this.region = initHRegion(tableName, method, CONF, family);
     try {
       Put put = new Put(Bytes.toBytes(1L));
-      put.add(family, qual1, 1L, Bytes.toBytes(1L));
+      put.addColumn(family, qual1, 1L, Bytes.toBytes(1L));
       region.put(put);
 
       region.flush(true);
@@ -4137,7 +4137,7 @@ public class TestHRegion {
       region.delete(delete);
 
       put = new Put(Bytes.toBytes(2L));
-      put.add(family, qual1, 2L, Bytes.toBytes(2L));
+      put.addColumn(family, qual1, 2L, Bytes.toBytes(2L));
       region.put(put);
 
       Scan idxScan = new Scan();
@@ -4185,7 +4185,8 @@ public class TestHRegion {
           for (int j = 0; j < num_unique_rows; j++) {
             Put put = new Put(Bytes.toBytes("row" + j));
             put.setDurability(Durability.SKIP_WAL);
-            put.add(fam1, qf1, version++, val1);
+            long ts = version++;
+            put.addColumn(fam1, qf1, ts, val1);
             region.put(put);
           }
         }
@@ -4238,7 +4239,7 @@ public class TestHRegion {
       Put put = new Put(row);
       put.setDurability(Durability.SKIP_WAL);
       for (long idx = 1; idx <= 4; idx++) {
-        put.add(FAMILY, column, idx, Bytes.toBytes("value-version-" + idx));
+        put.addColumn(FAMILY, column, idx, Bytes.toBytes("value-version-" + idx));
       }
       region.put(put);
 
@@ -4285,7 +4286,7 @@ public class TestHRegion {
       byte col[] = Bytes.toBytes("col1");
 
       Put put = new Put(row);
-      put.add(familyName, col, 1, Bytes.toBytes("SomeRandomValue"));
+      put.addColumn(familyName, col, (long) 1, Bytes.toBytes("SomeRandomValue"));
       region.put(put);
       region.flush(true);
 
@@ -4332,8 +4333,8 @@ public class TestHRegion {
       byte col[] = Bytes.toBytes("col1");
 
       Put put = new Put(row);
-      put.add(fam1, col, 1, Bytes.toBytes("test1"));
-      put.add(fam2, col, 1, Bytes.toBytes("test2"));
+      put.addColumn(fam1, col, (long) 1, Bytes.toBytes("test1"));
+      put.addColumn(fam2, col, (long) 1, Bytes.toBytes("test2"));
       ht.put(put);
 
       HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
@@ -4662,7 +4663,7 @@ public class TestHRegion {
 
     put = new Put(row);
     value = Bytes.toBytes("value0");
-    put.add(family, qualifier, 1234567l, value);
+    put.addColumn(family, qualifier, 1234567l, value);
     region.put(put);
     get = new Get(row);
     get.addColumn(family, qualifier);
@@ -4683,7 +4684,7 @@ public class TestHRegion {
 
     put = new Put(row);
     value = Bytes.toBytes("value1");
-    put.add(family, qualifier, 1234567l, value);
+    put.addColumn(family, qualifier, 1234567l, value);
     region.put(put);
     get = new Get(row);
     get.addColumn(family, qualifier);
@@ -4771,7 +4772,7 @@ public class TestHRegion {
         new byte[][] { family });
 
     Put put = new Put(Bytes.toBytes("r1"));
-    put.add(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
+    put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
     put.setDurability(mutationDurability);
     region.put(put);
 
@@ -4987,7 +4988,7 @@ public class TestHRegion {
       Put put = new Put(Bytes.toBytes("" + i));
       put.setDurability(durability);
       for (byte[] family : families) {
-        put.add(family, qf, null);
+        put.addColumn(family, qf, null);
       }
       region.put(put);
     }
@@ -5081,7 +5082,7 @@ public class TestHRegion {
 
     // Flush enough files to get up to the threshold, doesn't need compactions
     for (int i = 0; i < 2; i++) {
-      Put put = new Put(tableName.toBytes()).add(family, family, tableName.toBytes());
+      Put put = new Put(tableName.toBytes()).addColumn(family, family, tableName.toBytes());
       region.put(put);
       fr = region.flush(true);
       assertTrue(fr.isFlushSucceeded());
@@ -5090,7 +5091,7 @@ public class TestHRegion {
 
     // Two flushes after the threshold, compactions are needed
     for (int i = 0; i < 2; i++) {
-      Put put = new Put(tableName.toBytes()).add(family, family, tableName.toBytes());
+      Put put = new Put(tableName.toBytes()).addColumn(family, family, tableName.toBytes());
       region.put(put);
       fr = region.flush(true);
       assertTrue(fr.isFlushSucceeded());
@@ -5793,11 +5794,11 @@ public class TestHRegion {
     try {
       // setup with one storefile and one memstore, to create scanner and get an earlier readPt
       Put put = new Put(Bytes.toBytes("19998"));
-      put.add(cf1, col, Bytes.toBytes("val"));
+      put.addColumn(cf1, col, Bytes.toBytes("val"));
       region.put(put);
       region.flushcache(true, true);
       Put put2 = new Put(Bytes.toBytes("19997"));
-      put2.add(cf1, col, Bytes.toBytes("val"));
+      put2.addColumn(cf1, col, Bytes.toBytes("val"));
       region.put(put2);
 
       Scan scan = new Scan(Bytes.toBytes("19998"));
@@ -5808,7 +5809,7 @@ public class TestHRegion {
       // to check StoreFileScanner.seekToPreviousRow
       for (int i = 10000; i < 20000; i++) {
         Put p = new Put(Bytes.toBytes(""+i));
-        p.add(cf1, col, Bytes.toBytes(""+i));
+        p.addColumn(cf1, col, Bytes.toBytes("" + i));
         region.put(p);
       }
       region.flushcache(true, true);
@@ -5817,7 +5818,7 @@ public class TestHRegion {
       // to check MemStoreScanner.seekToPreviousRow
       for (int i = 10000; i < 20000; i++) {
         Put p = new Put(Bytes.toBytes(""+i));
-        p.add(cf1, col, Bytes.toBytes(""+i));
+        p.addColumn(cf1, col, Bytes.toBytes("" + i));
         region.put(p);
       }
 
@@ -5944,7 +5945,7 @@ public class TestHRegion {
     Assert.assertEquals(0L, region.getWriteRequestsCount());
 
     Put put = new Put(row);
-    put.add(fam, fam, fam);
+    put.addColumn(fam, fam, fam);
 
     Assert.assertEquals(0L, region.getWriteRequestsCount());
     region.put(put);
@@ -5981,7 +5982,7 @@ public class TestHRegion {
     assertNotNull(region);
 
     // create a file in fam1 for the region before opening in OpenRegionHandler
-    region.put(new Put(Bytes.toBytes("a")).add(fam1, fam1, fam1));
+    region.put(new Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
     region.flush(true);
     HBaseTestingUtility.closeRegionAndWAL(region);
 
@@ -6086,7 +6087,7 @@ public class TestHRegion {
     assertNotNull(region);
 
     // create a file in fam1 for the region before opening in OpenRegionHandler
-    region.put(new Put(Bytes.toBytes("a")).add(fam1, fam1, fam1));
+    region.put(new Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
     region.flush(true);
     HBaseTestingUtility.closeRegionAndWAL(region);
 
@@ -6319,14 +6320,14 @@ public class TestHRegion {
           // TTL tags specify ts in milliseconds
           new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } )));
       // Add a cell that will expire after 10 seconds via family setting
-      region.put(new Put(row).add(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY));
+      region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY));
       // Add a cell that will expire in 15 seconds via cell TTL
       region.put(new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1,
         HConstants.EMPTY_BYTE_ARRAY, new Tag[] {
           // TTL tags specify ts in milliseconds
           new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } )));
       // Add a cell that will expire in 20 seconds via family setting
-      region.put(new Put(row).add(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY));
+      region.put(new Put(row).addColumn(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY));
 
       // Flush so we are sure store scanning gets this right
       region.flush(true);
@@ -6377,7 +6378,7 @@ public class TestHRegion {
       // Fun with disappearing increments
 
       // Start at 1
-      region.put(new Put(row).add(fam1, q1, Bytes.toBytes(1L)));
+      region.put(new Put(row).addColumn(fam1, q1, Bytes.toBytes(1L)));
       r = region.get(new Get(row));
       byte[] val = r.getValue(fam1, q1);
       assertNotNull(val);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
index aa45ab9..98d98aa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
@@ -145,7 +145,7 @@ public class TestHRegionOnCluster {
       String value, int verifyNum) throws IOException {
     System.out.println("=========Putting data :" + row);
     Put put = new Put(Bytes.toBytes(row));
-    put.add(family, Bytes.toBytes("q1"), Bytes.toBytes(value));
+    put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes(value));
     table.put(put);
     ResultScanner resultScanner = table.getScanner(new Scan());
     List<Result> results = new ArrayList<Result>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 04e9b56..0c2e01c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -1078,7 +1078,7 @@ public class TestHRegionReplayEvents {
       long readPoint = region.getMVCC().getReadPoint();
       long origSeqId = readPoint + 100;
 
-      Put put = new Put(row).add(family, row, row);
+      Put put = new Put(row).addColumn(family, row, row);
       put.setDurability(Durability.SKIP_WAL); // we replay with skip wal
       replay(region, put, origSeqId);
 
@@ -1091,7 +1091,7 @@ public class TestHRegionReplayEvents {
       // replay an entry that is smaller than current read point
       // caution: adding an entry below current read point might cause partial dirty reads. Normal
       // replay does not allow reads while replay is going on.
-      put = new Put(row2).add(family, row2, row2);
+      put = new Put(row2).addColumn(family, row2, row2);
       put.setDurability(Durability.SKIP_WAL);
       replay(region, put, origSeqId - 50);
 
@@ -1628,7 +1628,7 @@ public class TestHRegionReplayEvents {
       Put put = new Put(Bytes.toBytes("" + i));
       put.setDurability(Durability.SKIP_WAL);
       for (byte[] family : families) {
-        put.add(family, qf, EnvironmentEdgeManager.currentTime(), null);
+        put.addColumn(family, qf, EnvironmentEdgeManager.currentTime(), null);
       }
       replay(region, put, i+1);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java
index 896cd5d..67c5f51 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java
@@ -113,11 +113,11 @@ public class TestJoinedScanners {
       for (long i = 0; i < rows_to_insert; i++) {
         Put put = new Put(Bytes.toBytes(Long.toString (i)));
         if (rand.nextInt(100) <= selectionRatio) {
-          put.add(cf_essential, col_name, flag_yes);
+          put.addColumn(cf_essential, col_name, flag_yes);
         } else {
-          put.add(cf_essential, col_name, flag_no);
+          put.addColumn(cf_essential, col_name, flag_no);
         }
-        put.add(cf_joined, col_name, val_large);
+        put.addColumn(cf_joined, col_name, val_large);
         puts.add(put);
         if (puts.size() >= insert_batch) {
           ht.put(puts);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
index d19d709..bb72b1d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
@@ -103,16 +103,16 @@ public class TestKeepDeletes {
 
     long ts = EnvironmentEdgeManager.currentTime();
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
     p = new Put(T1, ts+1);
-    p.add(c0, c0, T2);
+    p.addColumn(c0, c0, T2);
     region.put(p);
     p = new Put(T1, ts+2);
-    p.add(c0, c0, T3);
+    p.addColumn(c0, c0, T3);
     region.put(p);
     p = new Put(T1, ts+4);
-    p.add(c0, c0, T4);
+    p.addColumn(c0, c0, T4);
     region.put(p);
 
     // now place a delete marker at ts+2
@@ -156,16 +156,16 @@ public class TestKeepDeletes {
 
     // two more puts, this will expire the older puts.
     p = new Put(T1, ts+5);
-    p.add(c0, c0, T5);
+    p.addColumn(c0, c0, T5);
     region.put(p);
     p = new Put(T1, ts+6);
-    p.add(c0, c0, T6);
+    p.addColumn(c0, c0, T6);
     region.put(p);
 
     // also add an old put again
     // (which is past the max versions)
     p = new Put(T1, ts);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
     r = region.get(g);
     assertTrue(r.isEmpty());
@@ -200,7 +200,7 @@ public class TestKeepDeletes {
 
     long ts = EnvironmentEdgeManager.currentTime();
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     Delete d = new Delete(T1, ts);
@@ -245,7 +245,7 @@ public class TestKeepDeletes {
 
     long ts = EnvironmentEdgeManager.currentTime();
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     Get gOne = new Get(T1);
@@ -322,13 +322,13 @@ public class TestKeepDeletes {
 
     long ts = EnvironmentEdgeManager.currentTime();
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
     p = new Put(T1, ts+2);
-    p.add(c0, c0, T2);
+    p.addColumn(c0, c0, T2);
     region.put(p);
     p = new Put(T1, ts+4);
-    p.add(c0, c0, T3);
+    p.addColumn(c0, c0, T3);
     region.put(p);
 
     Delete d = new Delete(T1, ts+1);
@@ -456,12 +456,12 @@ public class TestKeepDeletes {
     long ts = EnvironmentEdgeManager.currentTime();
 
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     // a put into another store (CF) should have no effect
     p = new Put(T1, ts-10);
-    p.add(c1, c0, T1);
+    p.addColumn(c1, c0, T1);
     region.put(p);
 
     // all the following deletes affect the put
@@ -491,7 +491,7 @@ public class TestKeepDeletes {
 
     // another put will push out the earlier put...
     p = new Put(T1, ts+3);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     region.flush(true);
@@ -519,12 +519,12 @@ public class TestKeepDeletes {
     long ts = EnvironmentEdgeManager.currentTime();
 
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     // a put another (older) row in the same store
     p = new Put(T2, ts-10);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     // all the following deletes affect the put
@@ -554,7 +554,7 @@ public class TestKeepDeletes {
 
     // another put will push out the earlier put...
     p = new Put(T1, ts+3);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     region.flush(true);
@@ -569,7 +569,7 @@ public class TestKeepDeletes {
 
     // another put will push out the earlier put...
     p = new Put(T1, ts+4);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     // this pushed out the column and version marker
@@ -596,31 +596,31 @@ public class TestKeepDeletes {
 
     long ts = EnvironmentEdgeManager.currentTime();
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
-    p.add(c0, c1, T1);
-    p.add(c1, c0, T1);
-    p.add(c1, c1, T1);
+    p.addColumn(c0, c0, T1);
+    p.addColumn(c0, c1, T1);
+    p.addColumn(c1, c0, T1);
+    p.addColumn(c1, c1, T1);
     region.put(p);
 
     p = new Put(T2, ts);
-    p.add(c0, c0, T1);
-    p.add(c0, c1, T1);
-    p.add(c1, c0, T1);
-    p.add(c1, c1, T1);
+    p.addColumn(c0, c0, T1);
+    p.addColumn(c0, c1, T1);
+    p.addColumn(c1, c0, T1);
+    p.addColumn(c1, c1, T1);
     region.put(p);
 
     p = new Put(T1, ts+1);
-    p.add(c0, c0, T2);
-    p.add(c0, c1, T2);
-    p.add(c1, c0, T2);
-    p.add(c1, c1, T2);
+    p.addColumn(c0, c0, T2);
+    p.addColumn(c0, c1, T2);
+    p.addColumn(c1, c0, T2);
+    p.addColumn(c1, c1, T2);
     region.put(p);
 
     p = new Put(T2, ts+1);
-    p.add(c0, c0, T2);
-    p.add(c0, c1, T2);
-    p.add(c1, c0, T2);
-    p.add(c1, c1, T2);
+    p.addColumn(c0, c0, T2);
+    p.addColumn(c0, c1, T2);
+    p.addColumn(c1, c0, T2);
+    p.addColumn(c1, c1, T2);
     region.put(p);
 
     Delete d = new Delete(T1, ts+2);
@@ -678,13 +678,13 @@ public class TestKeepDeletes {
 
     long ts = EnvironmentEdgeManager.currentTime();
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     // this prevents marker collection based on earliestPut
     // (cannot keep earliest put per column in the store file)
     p = new Put(T1, ts-10);
-    p.add(c0, c1, T1);
+    p.addColumn(c0, c1, T1);
     region.put(p);
 
     Delete d = new Delete(T1, ts);
@@ -709,14 +709,14 @@ public class TestKeepDeletes {
     // the 2nd put (and all delete markers following)
     // will be removed.
     p = new Put(T1, ts+2);
-    p.add(c0, c0, T2);
+    p.addColumn(c0, c0, T2);
     region.put(p);
 
     // delete, put, delete, delete, put
     assertEquals(3, countDeleteMarkers(region));
 
     p = new Put(T1, ts+3);
-    p.add(c0, c0, T3);
+    p.addColumn(c0, c0, T3);
     region.put(p);
 
     // This is potentially questionable behavior.
@@ -746,7 +746,7 @@ public class TestKeepDeletes {
 
     // add one more put
     p = new Put(T1, ts+4);
-    p.add(c0, c0, T4);
+    p.addColumn(c0, c0, T4);
     region.put(p);
 
     region.flush(true);
@@ -771,17 +771,17 @@ public class TestKeepDeletes {
     long ts = EnvironmentEdgeManager.currentTime();
 
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
-    p.add(c0, c1, T1);
-    p.add(c1, c0, T1);
-    p.add(c1, c1, T1);
+    p.addColumn(c0, c0, T1);
+    p.addColumn(c0, c1, T1);
+    p.addColumn(c1, c0, T1);
+    p.addColumn(c1, c1, T1);
     region.put(p);
 
     p = new Put(T2, ts+1);
-    p.add(c0, c0, T2);
-    p.add(c0, c1, T2);
-    p.add(c1, c0, T2);
-    p.add(c1, c1, T2);
+    p.addColumn(c0, c0, T2);
+    p.addColumn(c0, c1, T2);
+    p.addColumn(c1, c0, T2);
+    p.addColumn(c1, c1, T2);
     region.put(p);
 
     // family markers are each family
@@ -823,16 +823,16 @@ public class TestKeepDeletes {
     long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past
 
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T3);
+    p.addColumn(c0, c0, T3);
     region.put(p);
     p = new Put(T1, ts-1);
-    p.add(c0, c0, T2);
+    p.addColumn(c0, c0, T2);
     region.put(p);
     p = new Put(T1, ts-3);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
     p = new Put(T1, ts-4);
-    p.add(c0, c0, T0);
+    p.addColumn(c0, c0, T0);
     region.put(p);
 
     // all puts now are just retained because of min versions = 3
@@ -861,7 +861,7 @@ public class TestKeepDeletes {
     r = region.get(g);
     checkResult(r, c0, c0, T1);
     p = new Put(T1, ts+1);
-    p.add(c0, c0, T4);
+    p.addColumn(c0, c0, T4);
     region.put(p);
     region.flush(true);
 
@@ -873,7 +873,7 @@ public class TestKeepDeletes {
     // this will push out the last put before
     // family delete marker
     p = new Put(T1, ts+2);
-    p.add(c0, c0, T5);
+    p.addColumn(c0, c0, T5);
     region.put(p);
 
     region.flush(true);
@@ -902,12 +902,12 @@ public class TestKeepDeletes {
     long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past
 
     Put p = new Put(T1, ts);
-    p.add(c0, c0, T3);
+    p.addColumn(c0, c0, T3);
     region.put(p);
 
     // place an old row, to make the family marker expires anyway
     p = new Put(T2, ts-10);
-    p.add(c0, c0, T1);
+    p.addColumn(c0, c0, T1);
     region.put(p);
 
     checkGet(region, T1, c0, c0, ts+1, T3);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
index cd84eac..661583e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
@@ -75,15 +75,15 @@ public class TestMinVersions {
       long ts = EnvironmentEdgeManager.currentTime() - 2000;
 
       Put p = new Put(T1, ts);
-      p.add(c0, c0, T1);
+      p.addColumn(c0, c0, T1);
       region.put(p);
 
       p = new Put(T1, ts+1);
-      p.add(c0, c0, T4);
+      p.addColumn(c0, c0, T4);
       region.put(p);
 
       p = new Put(T3, ts);
-      p.add(c0, c0, T3);
+      p.addColumn(c0, c0, T3);
       region.put(p);
 
       // now make sure that getClosestBefore(...) get can
@@ -124,11 +124,11 @@ public class TestMinVersions {
 
     try {
       Put p = new Put(T1, ts-1);
-      p.add(c0, c0, T2);
+      p.addColumn(c0, c0, T2);
       region.put(p);
 
       p = new Put(T1, ts-3);
-      p.add(c0, c0, T0);
+      p.addColumn(c0, c0, T0);
       region.put(p);
 
       // now flush/compact
@@ -136,15 +136,15 @@ public class TestMinVersions {
       region.compact(true);
 
       p = new Put(T1, ts);
-      p.add(c0, c0, T3);
+      p.addColumn(c0, c0, T3);
       region.put(p);
 
       p = new Put(T1, ts-2);
-      p.add(c0, c0, T1);
+      p.addColumn(c0, c0, T1);
       region.put(p);
 
       p = new Put(T1, ts-3);
-      p.add(c0, c0, T0);
+      p.addColumn(c0, c0, T0);
       region.put(p);
 
       // newest version in the memstore
@@ -180,15 +180,15 @@ public class TestMinVersions {
 
     try {
       Put p = new Put(T1, ts-2);
-      p.add(c0, c0, T1);
+      p.addColumn(c0, c0, T1);
       region.put(p);
 
       p = new Put(T1, ts-1);
-      p.add(c0, c0, T2);
+      p.addColumn(c0, c0, T2);
       region.put(p);
 
       p = new Put(T1, ts);
-      p.add(c0, c0, T3);
+      p.addColumn(c0, c0, T3);
       region.put(p);
 
       Delete d = new Delete(T1, ts-1);
@@ -240,17 +240,17 @@ public class TestMinVersions {
     try {
       // 2nd version
       Put p = new Put(T1, ts-2);
-      p.add(c0, c0, T2);
+      p.addColumn(c0, c0, T2);
       region.put(p);
 
       // 3rd version
       p = new Put(T1, ts-1);
-      p.add(c0, c0, T3);
+      p.addColumn(c0, c0, T3);
       region.put(p);
 
       // 4th version
       p = new Put(T1, ts);
-      p.add(c0, c0, T4);
+      p.addColumn(c0, c0, T4);
       region.put(p);
 
       // now flush/compact
@@ -259,7 +259,7 @@ public class TestMinVersions {
 
       // now put the first version (backdated)
       p = new Put(T1, ts-3);
-      p.add(c0, c0, T1);
+      p.addColumn(c0, c0, T1);
       region.put(p);
 
       // now the latest change is in the memstore,
@@ -280,7 +280,7 @@ public class TestMinVersions {
       checkResult(r, c0, T4,T3);
 
       p = new Put(T1, ts+1);
-      p.add(c0, c0, T5);
+      p.addColumn(c0, c0, T5);
       region.put(p);
 
       // now the latest version is in the memstore
@@ -316,22 +316,22 @@ public class TestMinVersions {
 
        // 1st version
       Put p = new Put(T1, ts-3);
-      p.add(c0, c0, T1);
+      p.addColumn(c0, c0, T1);
       region.put(p);
 
       // 2nd version
       p = new Put(T1, ts-2);
-      p.add(c0, c0, T2);
+      p.addColumn(c0, c0, T2);
       region.put(p);
 
       // 3rd version
       p = new Put(T1, ts-1);
-      p.add(c0, c0, T3);
+      p.addColumn(c0, c0, T3);
       region.put(p);
 
       // 4th version
       p = new Put(T1, ts);
-      p.add(c0, c0, T4);
+      p.addColumn(c0, c0, T4);
       region.put(p);
 
       Result r = region.get(new Get(T1));
@@ -408,23 +408,23 @@ public class TestMinVersions {
     try {
 
       Put p = new Put(T1, ts-3);
-      p.add(c0, c0, T0);
-      p.add(c1, c1, T0);
+      p.addColumn(c0, c0, T0);
+      p.addColumn(c1, c1, T0);
       region.put(p);
 
       p = new Put(T1, ts-2);
-      p.add(c0, c0, T1);
-      p.add(c1, c1, T1);
+      p.addColumn(c0, c0, T1);
+      p.addColumn(c1, c1, T1);
       region.put(p);
 
       p = new Put(T1, ts-1);
-      p.add(c0, c0, T2);
-      p.add(c1, c1, T2);
+      p.addColumn(c0, c0, T2);
+      p.addColumn(c1, c1, T2);
       region.put(p);
 
       p = new Put(T1, ts);
-      p.add(c0, c0, T3);
-      p.add(c1, c1, T3);
+      p.addColumn(c0, c0, T3);
+      p.addColumn(c1, c1, T3);
       region.put(p);
 
       List<Long> tss = new ArrayList<Long>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java
index d9453b0..4dc233f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java
@@ -115,7 +115,7 @@ public class TestParallelPut {
     long value = 1L;
 
     Put put = new Put(row);
-    put.add(fam1, qual1, Bytes.toBytes(value));
+    put.addColumn(fam1, qual1, Bytes.toBytes(value));
     region.put(put);
 
     assertGet(this.region, row, fam1, qual1, Bytes.toBytes(value));
@@ -215,7 +215,7 @@ public class TestParallelPut {
         // put the randombytes and verify that we can read it. This is one
         // way of ensuring that rwcc manipulation in HRegion.put() is fine.
         Put put = new Put(rowkey);
-        put.add(fam1, qual1, value);
+        put.addColumn(fam1, qual1, value);
         in[0] = put;
         try {
           OperationStatus[] ret = region.batchMutate(in);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
index f238770..bcb8733 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
@@ -460,7 +460,7 @@ public class TestRegionMergeTransaction {
             continue;
           }
           Put put = new Put(k);
-          put.add(f, null, k);
+          put.addColumn(f, null, k);
           if (r.getWAL() == null)
             put.setDurability(Durability.SKIP_WAL);
           r.put(put);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index 2a949a1..ae3ad1a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -456,7 +456,7 @@ public class TestRegionMergeTransactionOnCluster {
   private void loadData(Table table) throws IOException {
     for (int i = 0; i < ROWSIZE; i++) {
       Put put = new Put(ROWS[i]);
-      put.add(FAMILYNAME, QUALIFIER, Bytes.toBytes(i));
+      put.addColumn(FAMILYNAME, QUALIFIER, Bytes.toBytes(i));
       table.put(put);
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
index 96b6122..870e963 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
@@ -336,7 +336,7 @@ public class TestRegionReplicas {
             while (running.get()) {
               byte[] data = Bytes.toBytes(String.valueOf(key));
               Put put = new Put(data);
-              put.add(f, null, data);
+              put.addColumn(f, null, data);
               table.put(put);
               key++;
               if (key == endKey) key = startKey;

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 7a9e61f..67fedcd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -119,7 +119,7 @@ public class TestRegionServerMetrics {
     // Do a first put to be sure that the connection is established, meta is there and so on.
     Table table = connection.getTable(tName);
     Put p = new Put(row);
-    p.add(cfName, qualifier, initValue);
+    p.addColumn(cfName, qualifier, initValue);
     table.put(p);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
@@ -199,7 +199,7 @@ public class TestRegionServerMetrics {
     Table t = TEST_UTIL.createTable(tableName, cf);
 
     Put p = new Put(row);
-    p.add(cf, qualifier, val);
+    p.addColumn(cf, qualifier, val);
     p.setDurability(Durability.SKIP_WAL);
 
     t.put(p);
@@ -227,7 +227,7 @@ public class TestRegionServerMetrics {
     //Force a hfile.
     Table t = TEST_UTIL.createTable(tableName, cf);
     Put p = new Put(row);
-    p.add(cf, qualifier, val);
+    p.addColumn(cf, qualifier, val);
     t.put(p);
     TEST_UTIL.getHBaseAdmin().flush(tableName);
 
@@ -251,15 +251,15 @@ public class TestRegionServerMetrics {
 
     Table t = TEST_UTIL.createTable(tableName, cf);
     Put p = new Put(row);
-    p.add(cf, qualifier, valOne);
+    p.addColumn(cf, qualifier, valOne);
     t.put(p);
 
     Put pTwo = new Put(row);
-    pTwo.add(cf, qualifier, valTwo);
+    pTwo.addColumn(cf, qualifier, valTwo);
     t.checkAndPut(row, cf, qualifier, valOne, pTwo);
 
     Put pThree = new Put(row);
-    pThree.add(cf, qualifier, valThree);
+    pThree.addColumn(cf, qualifier, valThree);
     t.checkAndPut(row, cf, qualifier, valOne, pThree);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
@@ -281,7 +281,7 @@ public class TestRegionServerMetrics {
 
     Table t = TEST_UTIL.createTable(tableName, cf);
     Put p = new Put(row);
-    p.add(cf, qualifier, val);
+    p.addColumn(cf, qualifier, val);
     t.put(p);
 
     for(int count = 0; count< 13; count++) {
@@ -308,7 +308,7 @@ public class TestRegionServerMetrics {
 
     Table t = TEST_UTIL.createTable(tableName, cf);
     Put p = new Put(row);
-    p.add(cf, qualifier, val);
+    p.addColumn(cf, qualifier, val);
     t.put(p);
 
     for(int count = 0; count< 73; count++) {
@@ -334,7 +334,7 @@ public class TestRegionServerMetrics {
     List<Put> puts = new ArrayList<>();
     for (int insertCount =0; insertCount < 100; insertCount++) {
       Put p = new Put(Bytes.toBytes("" + insertCount + "row"));
-      p.add(cf, qualifier, val);
+      p.addColumn(cf, qualifier, val);
       puts.add(p);
     }
     try (Table t = TEST_UTIL.createTable(tableName, cf)) {
@@ -384,7 +384,7 @@ public class TestRegionServerMetrics {
     List<Put> puts = new ArrayList<>();
     for (int insertCount =0; insertCount < 100; insertCount++) {
       Put p = new Put(Bytes.toBytes("" + insertCount + "row"));
-      p.add(cf, qualifier, val);
+      p.addColumn(cf, qualifier, val);
       puts.add(p);
     }
     try (Table t = TEST_UTIL.createTable(tableName, cf)) {
@@ -442,7 +442,7 @@ public class TestRegionServerMetrics {
     t.setAutoFlush(true, true);
     for (int insertCount = 0; insertCount < numHfiles; insertCount++) {
       Put p = new Put(Bytes.toBytes(insertCount));
-      p.add(cf, qualifier, val);
+      p.addColumn(cf, qualifier, val);
       t.put(p);
       admin.flush(tableName);
     }
@@ -471,7 +471,7 @@ public class TestRegionServerMetrics {
     for (int insertCount = numHfiles;
         insertCount < 2 * numHfiles - 1; insertCount++) {
       Put p = new Put(Bytes.toBytes(insertCount));
-      p.add(cf, qualifier, val);
+      p.addColumn(cf, qualifier, val);
       t.put(p);
       admin.flush(tableName);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index b2405e8..e8a2134 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -76,7 +76,7 @@ public class TestRegionServerNoMaster {
     // Create table then get the single region for our new table.
     table = HTU.createTable(tableName,HConstants.CATALOG_FAMILY);
     Put p = new Put(row);
-    p.add(HConstants.CATALOG_FAMILY, row, row);
+    p.addColumn(HConstants.CATALOG_FAMILY, row, row);
     table.put(p);
 
     try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
index ab12195..4d3a1c3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
@@ -92,7 +92,8 @@ public class TestRowTooBig {
       for (int i = 0; i < 5 ; i++) {
         Put put = new Put(row1);
 
-        put.add(fam1, Bytes.toBytes("col_" + i ), new byte[5 * 1024 * 1024]);
+        byte[] value = new byte[5 * 1024 * 1024];
+        put.addColumn(fam1, Bytes.toBytes("col_" + i), value);
         region.put(put);
         region.flush(true);
       }
@@ -138,7 +139,8 @@ public class TestRowTooBig {
       for (int i = 0; i < 10; i++) {
         Put put = new Put(row1);
         for (int j = 0; j < 10 * 10000; j++) {
-          put.add(fam1, Bytes.toBytes("col_" + i + "_" + j), new byte[10]);
+          byte[] value = new byte[10];
+          put.addColumn(fam1, Bytes.toBytes("col_" + i + "_" + j), value);
         }
         region.put(put);
         region.flush(true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java
index 0c11978..909ae71 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java
@@ -90,27 +90,27 @@ public class TestSCVFWithMiniCluster {
     /* Add a row with 'a:foo' = false */
     Put put = new Put(Bytes.toBytes("1"));
     put.setDurability(Durability.SKIP_WAL);
-    put.add(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("false"));
-    put.add(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
-    put.add(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
-    put.add(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
+    put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("false"));
+    put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
+    put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
+    put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
     puts.add(put);
 
     /* Add a row with 'a:foo' = true */
     put = new Put(Bytes.toBytes("2"));
     put.setDurability(Durability.SKIP_WAL);
-    put.add(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("true"));
-    put.add(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
-    put.add(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
-    put.add(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
+    put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("true"));
+    put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
+    put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
+    put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
     puts.add(put);
 
     /* Add a row with 'a:foo' qualifier not set */
     put = new Put(Bytes.toBytes("3"));
     put.setDurability(Durability.SKIP_WAL);
-    put.add(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
-    put.add(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
-    put.add(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
+    put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
+    put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
+    put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
     puts.add(put);
 
     htable.put(puts);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java
index 5dd10b1..e84ed59 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java
@@ -135,7 +135,7 @@ public class TestScannerRetriableFailure {
       byte[] row = Bytes.toBytes(String.format("%09d", i));
       Put put = new Put(row);
       put.setDurability(Durability.SKIP_WAL);
-      put.add(FAMILY_NAME, null, row);
+      put.addColumn(FAMILY_NAME, null, row);
       table.put(put);
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java
index 03cf8b8..fc3735a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java
@@ -309,7 +309,7 @@ public class TestSeekOptimizations {
 
   public void put(String qual, long ts) {
     if (!putTimestamps.contains(ts)) {
-      put.add(FAMILY_BYTES, Bytes.toBytes(qual), ts, createValue(ts));
+      put.addColumn(FAMILY_BYTES, Bytes.toBytes(qual), ts, createValue(ts));
       putTimestamps.add(ts);
     }
     if (VERBOSE) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 1cf06e5..2549a4f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -486,7 +486,7 @@ public class TestSplitTransactionOnCluster {
         String row = "row" + i;
         Put p = new Put(row.getBytes());
         String val = "Val" + i;
-        p.add("col".getBytes(), "ql".getBytes(), val.getBytes());
+        p.addColumn("col".getBytes(), "ql".getBytes(), val.getBytes());
         table.put(p);
         admin.flush(userTableName.getName());
         Delete d = new Delete(row.getBytes());
@@ -500,13 +500,13 @@ public class TestSplitTransactionOnCluster {
           .getRegionsOfTable(userTableName);
       HRegionInfo hRegionInfo = regionsOfTable.get(0);
       Put p = new Put("row6".getBytes());
-      p.add("col".getBytes(), "ql".getBytes(), "val".getBytes());
+      p.addColumn("col".getBytes(), "ql".getBytes(), "val".getBytes());
       table.put(p);
       p = new Put("row7".getBytes());
-      p.add("col".getBytes(), "ql".getBytes(), "val".getBytes());
+      p.addColumn("col".getBytes(), "ql".getBytes(), "val".getBytes());
       table.put(p);
       p = new Put("row8".getBytes());
-      p.add("col".getBytes(), "ql".getBytes(), "val".getBytes());
+      p.addColumn("col".getBytes(), "ql".getBytes(), "val".getBytes());
       table.put(p);
       admin.flush(userTableName.getName());
       admin.split(hRegionInfo.getRegionName(), "row7".getBytes());
@@ -753,16 +753,16 @@ public class TestSplitTransactionOnCluster {
   private void insertData(final TableName tableName, HBaseAdmin admin, Table t) throws IOException,
       InterruptedException {
     Put p = new Put(Bytes.toBytes("row1"));
-    p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("1"));
+    p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("1"));
     t.put(p);
     p = new Put(Bytes.toBytes("row2"));
-    p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("2"));
+    p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("2"));
     t.put(p);
     p = new Put(Bytes.toBytes("row3"));
-    p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("3"));
+    p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("3"));
     t.put(p);
     p = new Put(Bytes.toBytes("row4"));
-    p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("4"));
+    p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("4"));
     t.put(p);
     admin.flush(tableName);
   }
@@ -966,7 +966,7 @@ public class TestSplitTransactionOnCluster {
       Table hTable = connection.getTable(desc.getTableName());
       for(int i = 1; i < 5; i++) {
         Put p1 = new Put(("r"+i).getBytes());
-        p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
+        p1.addColumn(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
         hTable.put(p1);
       }
       admin.flush(desc.getTableName());
@@ -1055,8 +1055,8 @@ public class TestSplitTransactionOnCluster {
       HRegion region = regions.get(0);
       for(int i = 3;i<9;i++) {
         Put p = new Put(Bytes.toBytes("row"+i));
-        p.add(Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("value"+i));
-        p.add(Bytes.toBytes("i_f"), Bytes.toBytes("q"), Bytes.toBytes("value"+i));
+        p.addColumn(Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("value" + i));
+        p.addColumn(Bytes.toBytes("i_f"), Bytes.toBytes("q"), Bytes.toBytes("value" + i));
         region.put(p);
       }
       region.flush(true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java
index 92e0558..826d06f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java
@@ -118,7 +118,8 @@ public class TestSplitWalDataLoss {
     Connection conn = testUtil.getConnection();
 
     try (Table table = conn.getTable(tableName)) {
-      table.put(new Put(Bytes.toBytes("row0")).addColumn(family, qualifier, Bytes.toBytes("val0")));
+      table.put(new Put(Bytes.toBytes("row0"))
+              .addColumn(family, qualifier, Bytes.toBytes("val0")));
     }
     long oldestSeqIdOfStore = region.getOldestSeqIdOfStore(family);
     Log.info("CHANGE OLDEST " + oldestSeqIdOfStore);
@@ -130,7 +131,8 @@ public class TestSplitWalDataLoss {
       }
     }
     try (Table table = conn.getTable(tableName)) {
-      table.put(new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, Bytes.toBytes("val1")));
+      table.put(new Put(Bytes.toBytes("row1"))
+              .addColumn(family, qualifier, Bytes.toBytes("val1")));
     }
     long now = EnvironmentEdgeManager.currentTime();
     rs.tryRegionServerReport(now - 500, now);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
index 18eda70..74b3df9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
@@ -121,7 +121,7 @@ public class TestStoreFileRefresherChore {
       Put put = new Put(Bytes.toBytes("" + i));
       put.setDurability(Durability.SKIP_WAL);
       for (byte[] family : families) {
-        put.add(family, qf, null);
+        put.addColumn(family, qf, null);
       }
       region.put(put);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
index 32e5855..a85e479 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
@@ -120,7 +120,7 @@ public class TestTags {
       byte[] value = Bytes.toBytes("value");
       table = TEST_UTIL.getConnection().getTable(tableName);
       Put put = new Put(row);
-      put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
+      put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
       put.setAttribute("visibility", Bytes.toBytes("myTag"));
       table.put(put);
       admin.flush(tableName);
@@ -133,7 +133,7 @@ public class TestTags {
 
       Put put1 = new Put(row1);
       byte[] value1 = Bytes.toBytes("1000dfsdf");
-      put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
+      put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
       // put1.setAttribute("visibility", Bytes.toBytes("myTag3"));
       table.put(put1);
       admin.flush(tableName);
@@ -141,7 +141,7 @@ public class TestTags {
 
       Put put2 = new Put(row2);
       byte[] value2 = Bytes.toBytes("1000dfsdf");
-      put2.add(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
+      put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
       put2.setAttribute("visibility", Bytes.toBytes("myTag3"));
       table.put(put2);
       admin.flush(tableName);
@@ -187,7 +187,7 @@ public class TestTags {
       table = TEST_UTIL.getConnection().getTable(tableName);
       Put put = new Put(row);
       byte[] value = Bytes.toBytes("value");
-      put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
+      put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
       table.put(put);
       admin.flush(tableName);
       // We are lacking an API for confirming flush request compaction.
@@ -199,14 +199,14 @@ public class TestTags {
 
       Put put1 = new Put(row1);
       byte[] value1 = Bytes.toBytes("1000dfsdf");
-      put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
+      put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
       table.put(put1);
       admin.flush(tableName);
       Thread.sleep(1000);
 
       Put put2 = new Put(row2);
       byte[] value2 = Bytes.toBytes("1000dfsdf");
-      put2.add(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
+      put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
       table.put(put2);
       admin.flush(tableName);
       Thread.sleep(1000);
@@ -277,13 +277,13 @@ public class TestTags {
         table = TEST_UTIL.getConnection().getTable(tableName);
         Put put = new Put(row);
         byte[] value = Bytes.toBytes("value");
-        put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
+        put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
         int bigTagLen = Short.MAX_VALUE - 5;
         put.setAttribute("visibility", new byte[bigTagLen]);
         table.put(put);
         Put put1 = new Put(row1);
         byte[] value1 = Bytes.toBytes("1000dfsdf");
-        put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
+        put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
         table.put(put1);
         admin.flush(tableName);
         // We are lacking an API for confirming flush request compaction.
@@ -295,18 +295,18 @@ public class TestTags {
 
         put1 = new Put(row2);
         value1 = Bytes.toBytes("1000dfsdf");
-        put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
+        put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
         table.put(put1);
         admin.flush(tableName);
         Thread.sleep(1000);
 
         Put put2 = new Put(rowd);
         byte[] value2 = Bytes.toBytes("1000dfsdf");
-        put2.add(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
+        put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
         table.put(put2);
         put2 = new Put(rowe);
         value2 = Bytes.toBytes("1000dfsddfdf");
-        put2.add(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
+        put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
         put.setAttribute("visibility", Bytes.toBytes("ram"));
         table.put(put2);
         admin.flush(tableName);
@@ -390,7 +390,7 @@ public class TestTags {
       table = TEST_UTIL.getConnection().getTable(tableName);
       Put put = new Put(row1);
       byte[] v = Bytes.toBytes(2L);
-      put.add(f, q, v);
+      put.addColumn(f, q, v);
       put.setAttribute("visibility", Bytes.toBytes("tag1"));
       table.put(put);
       Increment increment = new Increment(row1);
@@ -430,7 +430,7 @@ public class TestTags {
 
       put = new Put(row2);
       v = Bytes.toBytes(2L);
-      put.add(f, q, v);
+      put.addColumn(f, q, v);
       table.put(put);
       increment = new Increment(row2);
       increment.add(new KeyValue(row2, f, q, 1234L, v));
@@ -452,7 +452,7 @@ public class TestTags {
       // Test Append
       byte[] row3 = Bytes.toBytes("r3");
       put = new Put(row3);
-      put.add(f, q, Bytes.toBytes("a"));
+      put.addColumn(f, q, Bytes.toBytes("a"));
       put.setAttribute("visibility", Bytes.toBytes("tag1"));
       table.put(put);
       Append append = new Append(row3);
@@ -492,7 +492,7 @@ public class TestTags {
 
       byte[] row4 = Bytes.toBytes("r4");
       put = new Put(row4);
-      put.add(f, q, Bytes.toBytes("a"));
+      put.addColumn(f, q, Bytes.toBytes("a"));
       table.put(put);
       append = new Append(row4);
       append.add(new KeyValue(row4, f, q, 1234L, v));

http://git-wip-us.apache.org/repos/asf/hbase/blob/de9555ce/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
index cfa1695..ca7b3b1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
@@ -78,7 +78,8 @@ public class TestWideScanner extends HBaseTestCase {
         for (j = 0; j < 100; j++) {
           Put put = new Put(row);
           put.setDurability(Durability.SKIP_WAL);
-          put.add(COLUMNS[rng.nextInt(COLUMNS.length)], b, ++ts, b);
+          long ts1 = ++ts;
+          put.addColumn(COLUMNS[rng.nextInt(COLUMNS.length)], b, ts1, b);
           region.put(put);
           count++;
         }