You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/08/24 05:06:42 UTC
[1/8] hbase git commit: Revert "HBASE-18503 Change ***Util and Master
to use TableDescriptor and ColumnFamilyDescriptor" Wrong author information
This reverts commit b03348630c145aa6cc29f0f295442c6deb28a28e.
Repository: hbase
Updated Branches:
refs/heads/master b03348630 -> 25ff9d0bb
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 28d2a24..18b1114 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
@@ -467,20 +466,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* @return META table descriptor
- * @deprecated since 2.0 version and will be removed in 3.0 version.
- * use {@link #getMetaDescriptor()}
*/
- @Deprecated
public HTableDescriptor getMetaTableDescriptor() {
- return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
- }
-
- /**
- * @return META table descriptor
- */
- public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
try {
- return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
+ return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
} catch (IOException e) {
throw new RuntimeException("Unable to create META table descriptor", e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
index 95997f2..7457f43 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
@@ -25,13 +25,10 @@ import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.junit.Rule;
-import org.junit.Test;
+import org.junit.*;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
@@ -49,9 +46,9 @@ public class TestFSTableDescriptorForceCreation {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
- assertTrue("Should create new table descriptor",
- fstd.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(), false));
+ assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
}
@Test
@@ -62,7 +59,7 @@ public class TestFSTableDescriptorForceCreation {
// Cleanup old tests if any detritus laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
fstd.add(htd);
assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
}
@@ -74,7 +71,7 @@ public class TestFSTableDescriptorForceCreation {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
fstd.createTableDescriptor(htd, false);
assertTrue("Should create new table descriptor",
fstd.createTableDescriptor(htd, true));
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
index d85326f..20cf8bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
@@ -24,8 +24,6 @@ import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -154,22 +152,22 @@ public class TestHColumnDescriptorDefaultVersions {
Admin admin = TEST_UTIL.getAdmin();
// Verify descriptor from master
- TableDescriptor htd = admin.listTableDescriptor(tableName);
- ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
+ HTableDescriptor htd = admin.getTableDescriptor(tableName);
+ HColumnDescriptor[] hcds = htd.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
- TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
+ HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
hcds = td.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
}
- private void verifyHColumnDescriptor(int expected, final ColumnFamilyDescriptor[] hcds,
+ private void verifyHColumnDescriptor(int expected, final HColumnDescriptor[] hcds,
final TableName tableName,
final byte[]... families) {
- for (ColumnFamilyDescriptor hcd : hcds) {
+ for (HColumnDescriptor hcd : hcds) {
assertEquals(expected, hcd.getMaxVersions());
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 121647e..d17c782 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -38,11 +38,14 @@ import java.util.regex.Pattern;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -748,7 +751,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
- TableDescriptor td =
+ HTableDescriptor td =
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td, tableName, families);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 9f4ce35..27c9a5f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -23,14 +23,15 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -74,7 +75,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
@Override
public long createTable(
- final TableDescriptor desc,
+ final HTableDescriptor desc,
final byte[][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException {
@@ -83,7 +84,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
- public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
+ public long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException {
return -1;
}
@@ -266,7 +267,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
@Override
public long modifyTable(
final TableName tableName,
- final TableDescriptor descriptor,
+ final HTableDescriptor descriptor,
final long nonceGroup,
final long nonce) throws IOException {
return -1;
@@ -289,13 +290,13 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
- public long addColumn(final TableName tableName, final ColumnFamilyDescriptor columnDescriptor,
+ public long addColumn(final TableName tableName, final HColumnDescriptor columnDescriptor,
final long nonceGroup, final long nonce) throws IOException {
return -1;
}
@Override
- public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor,
+ public long modifyColumn(final TableName tableName, final HColumnDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException {
return -1;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 9101d5e..48386a6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -24,19 +24,19 @@ import java.util.NavigableMap;
import java.util.SortedSet;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
@@ -300,36 +300,36 @@ public class MockMasterServices extends MockNoopMasterServices {
public TableDescriptors getTableDescriptors() {
return new TableDescriptors() {
@Override
- public TableDescriptor remove(TableName tablename) throws IOException {
+ public HTableDescriptor remove(TableName tablename) throws IOException {
// noop
return null;
}
@Override
- public Map<String, TableDescriptor> getAll() throws IOException {
+ public Map<String, HTableDescriptor> getAll() throws IOException {
// noop
return null;
}
- @Override public Map<String, TableDescriptor> getAllDescriptors() throws IOException {
+ @Override public Map<String, HTableDescriptor> getAllDescriptors() throws IOException {
// noop
return null;
}
@Override
- public TableDescriptor get(TableName tablename) throws IOException {
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tablename);
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(DEFAULT_COLUMN_FAMILY_NAME));
- return builder.build();
+ public HTableDescriptor get(TableName tablename) throws IOException {
+ HTableDescriptor htd = new HTableDescriptor(tablename);
+ htd.addFamily(new HColumnDescriptor(DEFAULT_COLUMN_FAMILY_NAME));
+ return htd;
}
@Override
- public Map<String, TableDescriptor> getByNamespace(String name) throws IOException {
+ public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
return null;
}
@Override
- public void add(TableDescriptor htd) throws IOException {
+ public void add(HTableDescriptor htd) throws IOException {
// noop
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 226f9f1..6dfcad1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -33,23 +33,20 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.BufferedMutator;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterMetaBootstrap;
@@ -64,7 +61,6 @@ import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
-@InterfaceAudience.Private
public class MasterProcedureTestingUtility {
private static final Log LOG = LogFactory.getLog(MasterProcedureTestingUtility.class);
@@ -140,17 +136,17 @@ public class MasterProcedureTestingUtility {
// ==========================================================================
// Table Helpers
// ==========================================================================
- public static TableDescriptor createHTD(final TableName tableName, final String... family) {
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
+ public static HTableDescriptor createHTD(final TableName tableName, final String... family) {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
for (int i = 0; i < family.length; ++i) {
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family[i]));
+ htd.addFamily(new HColumnDescriptor(family[i]));
}
- return builder.build();
+ return htd;
}
public static HRegionInfo[] createTable(final ProcedureExecutor<MasterProcedureEnv> procExec,
final TableName tableName, final byte[][] splitKeys, String... family) throws IOException {
- TableDescriptor htd = createHTD(tableName, family);
+ HTableDescriptor htd = createHTD(tableName, family);
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = ProcedureTestingUtility.submitAndWait(procExec,
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
@@ -198,12 +194,12 @@ public class MasterProcedureTestingUtility {
assertEquals(regions.length, countMetaRegions(master, tableName));
// check htd
- TableDescriptor htd = master.getTableDescriptors().get(tableName);
+ HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue("table descriptor not found", htd != null);
for (int i = 0; i < family.length; ++i) {
- assertTrue("family not found " + family[i], htd.getColumnFamily(Bytes.toBytes(family[i])) != null);
+ assertTrue("family not found " + family[i], htd.getFamily(Bytes.toBytes(family[i])) != null);
}
- assertEquals(family.length, htd.getColumnFamilyCount());
+ assertEquals(family.length, htd.getFamilies().size());
}
public static void validateTableDeletion(
@@ -271,18 +267,18 @@ public class MasterProcedureTestingUtility {
public static void validateColumnFamilyAddition(final HMaster master, final TableName tableName,
final String family) throws IOException {
- TableDescriptor htd = master.getTableDescriptors().get(tableName);
+ HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
- assertTrue(htd.hasColumnFamily(family.getBytes()));
+ assertTrue(htd.hasFamily(family.getBytes()));
}
public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
final String family) throws IOException {
// verify htd
- TableDescriptor htd = master.getTableDescriptors().get(tableName);
+ HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
- assertFalse(htd.hasColumnFamily(family.getBytes()));
+ assertFalse(htd.hasFamily(family.getBytes()));
// verify fs
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
@@ -294,13 +290,13 @@ public class MasterProcedureTestingUtility {
}
public static void validateColumnFamilyModification(final HMaster master,
- final TableName tableName, final String family, ColumnFamilyDescriptor columnDescriptor)
+ final TableName tableName, final String family, HColumnDescriptor columnDescriptor)
throws IOException {
- TableDescriptor htd = master.getTableDescriptors().get(tableName);
+ HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
- ColumnFamilyDescriptor hcfd = htd.getColumnFamily(family.getBytes());
- assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(hcfd, columnDescriptor));
+ HColumnDescriptor hcfd = htd.getFamily(family.getBytes());
+ assertTrue(hcfd.equals(columnDescriptor));
}
public static void loadData(final Connection connection, final TableName tableName,
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index eda7fcd..177d862 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -22,11 +22,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -78,11 +76,10 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final TableName tableName = TableName.valueOf(name.getMethodName());
// create table with 0 families will fail
- final TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName));
+ final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName);
// disable sanity check
- builder.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString());
- TableDescriptor htd = builder.build();
+ htd.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString());
final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
long procId =
@@ -99,7 +96,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
public void testCreateExisting() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
- final TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
+ final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
// create the table
@@ -128,7 +125,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
- TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
@@ -141,21 +138,18 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
@Test(timeout=90000)
public void testRollbackAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
- testRollbackAndDoubleExecution(TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName, F1, F2)));
+ testRollbackAndDoubleExecution(MasterProcedureTestingUtility.createHTD(tableName, F1, F2));
}
@Test(timeout=90000)
public void testRollbackAndDoubleExecutionOnMobTable() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
- TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, F1, F2);
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd)
- .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(Bytes.toBytes(F1)))
- .setMobEnabled(true)
- .build());
- testRollbackAndDoubleExecution(builder);
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, F1, F2);
+ htd.getFamily(Bytes.toBytes(F1)).setMobEnabled(true);
+ testRollbackAndDoubleExecution(htd);
}
- private void testRollbackAndDoubleExecution(TableDescriptorBuilder builder) throws Exception {
+ private void testRollbackAndDoubleExecution(HTableDescriptor htd) throws Exception {
// create the table
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
@@ -164,8 +158,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
final byte[][] splitKeys = new byte[][] {
Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
};
- builder.setRegionReplication(3);
- TableDescriptor htd = builder.build();
+ htd.setRegionReplication(3);
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
@@ -188,9 +181,9 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
splitKeys[i] = Bytes.toBytes(String.format("%08d", i));
}
- final TableDescriptor htd = MasterProcedureTestingUtility.createHTD(
+ final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(
TableName.valueOf("TestMRegions"), F1, F2);
- UTIL.getAdmin().createTableAsync(htd, splitKeys)
+ UTIL.getHBaseAdmin().createTableAsync(htd, splitKeys)
.get(10, java.util.concurrent.TimeUnit.HOURS);
LOG.info("TABLE CREATED");
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
index db5eafa..d2df2bf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -112,7 +112,7 @@ public class TestMasterFailoverWithProcedures {
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
- TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
index 68013fb..a75cbc1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@@ -135,7 +135,7 @@ public class TestMasterProcedureWalLease {
backupStore3.recoverLease();
// Try to trigger a command on the master (WAL lease expired on the active one)
- TableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf(name.getMethodName()), "f");
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf(name.getMethodName()), "f");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
LOG.debug("submit proc");
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
index 9d60bd8..77e1fc9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -275,14 +274,14 @@ public class TestTableDescriptorModificationFromClient {
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
- TableDescriptor td =
+ HTableDescriptor td =
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td, tableName, families);
}
- private void verifyTableDescriptor(final TableDescriptor htd,
+ private void verifyTableDescriptor(final HTableDescriptor htd,
final TableName tableName, final byte[]... families) {
- Set<byte[]> htdFamilies = htd.getColumnFamilyNames();
+ Set<byte[]> htdFamilies = htd.getFamiliesKeys();
assertEquals(tableName, htd.getTableName());
assertEquals(families.length, htdFamilies.size());
for (byte[] familyName: families) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
index 2fe8085..f93ce98 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -515,7 +514,7 @@ public class TestPartitionedMobCompactor {
CacheConfig cacheConfig = null;
MyPartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName,
- ColumnFamilyDescriptor column, ExecutorService pool, final int delPartitionSize,
+ HColumnDescriptor column, ExecutorService pool, final int delPartitionSize,
final CacheConfig cacheConf, final int PartitionsIncludeDelFiles)
throws IOException {
super(conf, fs, tableName, column, pool);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index 6b01256..570d2d8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -80,11 +79,10 @@ public class TestGetClosestAtOrBefore {
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = UTIL.getDataTestDirOnTestFS();
// Up flush size else we bind up when we use default catalog flush of 16k.
- TableDescriptorBuilder metaBuilder = UTIL.getMetaTableDescriptorBuilder()
- .setMemStoreFlushSize(64 * 1024 * 1024);
+ UTIL.getMetaTableDescriptor().setMemStoreFlushSize(64 * 1024 * 1024);
Region mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO,
- rootdir, this.conf, metaBuilder.build());
+ rootdir, this.conf, UTIL.getMetaTableDescriptor());
try {
// Write rows for three tables 'A', 'B', and 'C'.
for (char c = 'A'; c < 'D'; c++) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index e40bb43..11c985d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -39,22 +39,22 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterRpcServices;
@@ -207,7 +207,7 @@ public class TestRegionMergeTransactionOnCluster {
List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
.getTableRegionsAndLocations(MASTER.getConnection(), tableName);
HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
- TableDescriptor tableDescriptor = MASTER.getTableDescriptors().get(
+ HTableDescriptor tableDescriptor = MASTER.getTableDescriptors().get(
tableName);
Result mergedRegionResult = MetaTableAccessor.getRegionResult(
MASTER.getConnection(), mergedRegionInfo.getRegionName());
@@ -231,11 +231,11 @@ public class TestRegionMergeTransactionOnCluster {
assertTrue(fs.exists(regionAdir));
assertTrue(fs.exists(regionBdir));
- ColumnFamilyDescriptor[] columnFamilies = tableDescriptor.getColumnFamilies();
+ HColumnDescriptor[] columnFamilies = tableDescriptor.getColumnFamilies();
HRegionFileSystem hrfs = new HRegionFileSystem(
TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo);
int count = 0;
- for(ColumnFamilyDescriptor colFamily : columnFamilies) {
+ for(HColumnDescriptor colFamily : columnFamilies) {
count += hrfs.getStoreFiles(colFamily.getName()).size();
}
ADMIN.compactRegion(mergedRegionInfo.getRegionName());
@@ -244,7 +244,7 @@ public class TestRegionMergeTransactionOnCluster {
long timeout = System.currentTimeMillis() + waitTime;
int newcount = 0;
while (System.currentTimeMillis() < timeout) {
- for(ColumnFamilyDescriptor colFamily : columnFamilies) {
+ for(HColumnDescriptor colFamily : columnFamilies) {
newcount += hrfs.getStoreFiles(colFamily.getName()).size();
}
if(newcount > count) {
@@ -263,7 +263,7 @@ public class TestRegionMergeTransactionOnCluster {
}
while (System.currentTimeMillis() < timeout) {
int newcount1 = 0;
- for(ColumnFamilyDescriptor colFamily : columnFamilies) {
+ for(HColumnDescriptor colFamily : columnFamilies) {
newcount1 += hrfs.getStoreFiles(colFamily.getName()).size();
}
if(newcount1 <= 1) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index 3b66a1d..89598ad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -26,13 +26,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -261,7 +261,7 @@ public class TestRegionServerNoMaster {
hri.getEncodedNameAsBytes()));
// Let's start the open handler
- TableDescriptor htd = getRS().tableDescriptors.get(hri.getTable());
+ HTableDescriptor htd = getRS().tableDescriptors.get(hri.getTable());
getRS().service.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, -1));
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
index 126c4e4..18290f5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
@@ -36,20 +36,20 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.Waiter.Predicate;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -652,30 +652,31 @@ public class SecureTestUtil {
public static Table createTable(HBaseTestingUtility testUtil, TableName tableName,
byte[][] families) throws Exception {
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
+ HTableDescriptor htd = new HTableDescriptor(tableName);
for (byte[] family : families) {
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ htd.addFamily(hcd);
}
- createTable(testUtil, testUtil.getAdmin(), builder.build());
- return testUtil.getConnection().getTable(tableName);
+ createTable(testUtil, testUtil.getAdmin(), htd);
+ return testUtil.getConnection().getTable(htd.getTableName());
}
- public static void createTable(HBaseTestingUtility testUtil, TableDescriptor htd)
+ public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd)
throws Exception {
createTable(testUtil, testUtil.getAdmin(), htd);
}
- public static void createTable(HBaseTestingUtility testUtil, TableDescriptor htd,
+ public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd,
byte[][] splitKeys) throws Exception {
createTable(testUtil, testUtil.getAdmin(), htd, splitKeys);
}
- public static void createTable(HBaseTestingUtility testUtil, Admin admin, TableDescriptor htd)
+ public static void createTable(HBaseTestingUtility testUtil, Admin admin, HTableDescriptor htd)
throws Exception {
createTable(testUtil, admin, htd, null);
}
- public static void createTable(HBaseTestingUtility testUtil, Admin admin, TableDescriptor htd,
+ public static void createTable(HBaseTestingUtility testUtil, Admin admin, HTableDescriptor htd,
byte[][] splitKeys) throws Exception {
// NOTE: We need a latch because admin is not sync,
// so the postOp coprocessor method may be called after the admin operation returned.
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
index 1a33f13..3e1abb9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
@@ -24,20 +24,18 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Assert;
public class MobSnapshotTestingUtils {
@@ -62,17 +60,15 @@ public class MobSnapshotTestingUtils {
private static void createMobTable(final HBaseTestingUtility util,
final TableName tableName, final byte[][] splitKeys, int regionReplication,
final byte[]... families) throws IOException, InterruptedException {
- TableDescriptorBuilder builder
- = TableDescriptorBuilder.newBuilder(tableName)
- .setRegionReplication(regionReplication);
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ htd.setRegionReplication(regionReplication);
for (byte[] family : families) {
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder
- .newBuilder(family)
- .setMobEnabled(true)
- .setMobThreshold(0L)
- .build());
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ htd.addFamily(hcd);
}
- util.getAdmin().createTable(builder.build(), splitKeys);
+ util.getAdmin().createTable(htd, splitKeys);
SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
assertEquals((splitKeys.length + 1) * regionReplication, util
.getAdmin().getTableRegions(tableName).size());
@@ -84,29 +80,29 @@ public class MobSnapshotTestingUtils {
* @param util
* @param tableName
* @param families
- * @return An Table instance for the created table.
+ * @return An HTable instance for the created table.
* @throws IOException
*/
public static Table createMobTable(final HBaseTestingUtility util,
final TableName tableName, final byte[]... families) throws IOException {
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
+ HTableDescriptor htd = new HTableDescriptor(tableName);
for (byte[] family : families) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
// Disable blooms (they are on by default as of 0.95) but we disable them
// here because
// tests have hard coded counts of what to expect in block cache, etc.,
// and blooms being
// on is interfering.
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
- .setBloomFilterType(BloomType.NONE)
- .setMobEnabled(true)
- .setMobThreshold(0L)
- .build());
+ hcd.setBloomFilterType(BloomType.NONE);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ htd.addFamily(hcd);
}
- util.getAdmin().createTable(builder.build());
+ util.getAdmin().createTable(htd);
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait
// until they are assigned
- util.waitUntilAllRegionsAssigned(tableName);
- return ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName);
+ util.waitUntilAllRegionsAssigned(htd.getTableName());
+ return ConnectionFactory.createConnection(util.getConfiguration()).getTable(htd.getTableName());
}
/**
@@ -150,14 +146,13 @@ public class MobSnapshotTestingUtils {
}
@Override
- public TableDescriptor createHtd(final String tableName) {
- return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
- .addColumnFamily(ColumnFamilyDescriptorBuilder
- .newBuilder(Bytes.toBytes(TEST_FAMILY))
- .setMobEnabled(true)
- .setMobThreshold(0L)
- .build())
- .build();
+ public HTableDescriptor createHtd(final String tableName) {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
+ HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ htd.addFamily(hcd);
+ return htd;
}
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index 71dac9c..dab55f6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -40,35 +40,36 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.BufferedMutator;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.SnapshotDescription;
-import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSVisitor;
@@ -491,7 +492,7 @@ public final class SnapshotTestingUtils {
public static class SnapshotBuilder {
private final RegionData[] tableRegions;
private final SnapshotProtos.SnapshotDescription desc;
- private final TableDescriptor htd;
+ private final HTableDescriptor htd;
private final Configuration conf;
private final FileSystem fs;
private final Path rootDir;
@@ -499,7 +500,7 @@ public final class SnapshotTestingUtils {
private int snapshotted = 0;
public SnapshotBuilder(final Configuration conf, final FileSystem fs,
- final Path rootDir, final TableDescriptor htd,
+ final Path rootDir, final HTableDescriptor htd,
final SnapshotProtos.SnapshotDescription desc, final RegionData[] tableRegions)
throws IOException {
this.fs = fs;
@@ -513,7 +514,7 @@ public final class SnapshotTestingUtils {
.createTableDescriptorForTableDirectory(snapshotDir, htd, false);
}
- public TableDescriptor getTableDescriptor() {
+ public HTableDescriptor getTableDescriptor() {
return this.htd;
}
@@ -679,11 +680,11 @@ public final class SnapshotTestingUtils {
private SnapshotBuilder createSnapshot(final String snapshotName, final String tableName,
final int numRegions, final int version) throws IOException {
- TableDescriptor htd = createHtd(tableName);
+ HTableDescriptor htd = createHtd(tableName);
RegionData[] regions = createTable(htd, numRegions);
SnapshotProtos.SnapshotDescription desc = SnapshotProtos.SnapshotDescription.newBuilder()
- .setTable(htd.getTableName().getNameAsString())
+ .setTable(htd.getNameAsString())
.setName(snapshotName)
.setVersion(version)
.build();
@@ -693,13 +694,13 @@ public final class SnapshotTestingUtils {
return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
}
- public TableDescriptor createHtd(final String tableName) {
- return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
- .addColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY))
- .build();
+ public HTableDescriptor createHtd(final String tableName) {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
+ htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
+ return htd;
}
- private RegionData[] createTable(final TableDescriptor htd, final int nregions)
+ private RegionData[] createTable(final HTableDescriptor htd, final int nregions)
throws IOException {
Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false);
@@ -765,15 +766,14 @@ public final class SnapshotTestingUtils {
public static void createTable(final HBaseTestingUtility util, final TableName tableName,
int regionReplication, int nRegions, final byte[]... families)
throws IOException, InterruptedException {
- TableDescriptorBuilder builder
- = TableDescriptorBuilder
- .newBuilder(tableName)
- .setRegionReplication(regionReplication);
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ htd.setRegionReplication(regionReplication);
for (byte[] family : families) {
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ htd.addFamily(hcd);
}
byte[][] splitKeys = getSplitKeys(nRegions);
- util.createTable(builder.build(), splitKeys);
+ util.createTable(htd, splitKeys);
assertEquals((splitKeys.length + 1) * regionReplication,
util.getAdmin().getTableRegions(tableName).size());
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
index b7110b2..4b684e3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -104,11 +104,11 @@ public class TestRestoreSnapshotHelper {
builder.addRegionV2();
builder.addRegionV1();
Path snapshotDir = builder.commit();
- TableDescriptor htd = builder.getTableDescriptor();
+ HTableDescriptor htd = builder.getTableDescriptor();
SnapshotDescription desc = builder.getSnapshotDescription();
// Test clone a snapshot
- TableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
+ HTableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
testRestore(snapshotDir, desc, htdClone);
verifyRestore(rootDir, htd, htdClone);
@@ -118,13 +118,13 @@ public class TestRestoreSnapshotHelper {
.setTable("testtb-clone")
.build();
Path cloneDir = FSUtils.getTableDir(rootDir, htdClone.getTableName());
- TableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
+ HTableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
testRestore(cloneDir, cloneDesc, htdClone2);
verifyRestore(rootDir, htd, htdClone2);
}
- private void verifyRestore(final Path rootDir, final TableDescriptor sourceHtd,
- final TableDescriptor htdClone) throws IOException {
+ private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
+ final HTableDescriptor htdClone) throws IOException {
List<String> files = SnapshotTestingUtils.listHFileNames(fs,
FSUtils.getTableDir(rootDir, htdClone.getTableName()));
assertEquals(12, files.size());
@@ -148,7 +148,7 @@ public class TestRestoreSnapshotHelper {
* @param htdClone The HTableDescriptor of the table to restore/clone.
*/
private void testRestore(final Path snapshotDir, final SnapshotDescription sd,
- final TableDescriptor htdClone) throws IOException {
+ final HTableDescriptor htdClone) throws IOException {
LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
@@ -164,7 +164,7 @@ public class TestRestoreSnapshotHelper {
* Initialize the restore helper, based on the snapshot and table information provided.
*/
private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir,
- final SnapshotDescription sd, final TableDescriptor htdClone) throws IOException {
+ final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
MonitoredTask status = Mockito.mock(MonitoredTask.class);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
index 8ba4262..0ee28d1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
@@ -28,9 +28,9 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
@@ -129,7 +129,7 @@ public class TestSnapshotManifest {
SnapshotRegionManifest.Builder dataRegionManifestBuilder =
SnapshotRegionManifest.newBuilder();
- for (ColumnFamilyDescriptor hcd: builder.getTableDescriptor().getColumnFamilies()) {
+ for (HColumnDescriptor hcd: builder.getTableDescriptor().getFamilies()) {
SnapshotRegionManifest.FamilyFiles.Builder family =
SnapshotRegionManifest.FamilyFiles.newBuilder();
family.setFamilyName(UnsafeByteOperations.unsafeWrap(hcd.getName()));
@@ -150,7 +150,7 @@ public class TestSnapshotManifest {
}
dataManifestBuilder
- .setTableSchema(ProtobufUtil.toTableSchema(builder.getTableDescriptor()));
+ .setTableSchema(ProtobufUtil.convertToTableSchema(builder.getTableDescriptor()));
SnapshotDataManifest dataManifest = dataManifestBuilder.build();
return writeDataManifest(dataManifest);
@@ -163,7 +163,7 @@ public class TestSnapshotManifest {
SnapshotRegionManifest.Builder dataRegionManifestBuilder = SnapshotRegionManifest.newBuilder();
dataRegionManifestBuilder.setRegionInfo(HRegionInfo.convert(regionInfo));
- for (ColumnFamilyDescriptor hcd: builder.getTableDescriptor().getColumnFamilies()) {
+ for (HColumnDescriptor hcd: builder.getTableDescriptor().getFamilies()) {
SnapshotRegionManifest.FamilyFiles.Builder family =
SnapshotRegionManifest.FamilyFiles.newBuilder();
family.setFamilyName(UnsafeByteOperations.unsafeWrap(hcd.getName()));
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index 30a7cd6..8337eb0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -40,12 +40,11 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -79,7 +78,7 @@ public class TestFSTableDescriptors {
@Test
public void testCreateAndUpdate() throws IOException {
Path testdir = UTIL.getDataTestDir(name.getMethodName());
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
assertTrue(fstd.createTableDescriptor(htd));
@@ -99,7 +98,7 @@ public class TestFSTableDescriptors {
@Test
public void testSequenceIdAdvancesOnTableInfo() throws IOException {
Path testdir = UTIL.getDataTestDir(name.getMethodName());
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
Path p0 = fstd.updateTableDescriptor(htd);
@@ -119,7 +118,7 @@ public class TestFSTableDescriptors {
assertTrue(!fs.exists(p2));
int i3 = FSTableDescriptors.getTableInfoSequenceId(p3);
assertTrue(i3 == i2 + 1);
- TableDescriptor descriptor = fstd.get(htd.getTableName());
+ HTableDescriptor descriptor = fstd.get(htd.getTableName());
assertEquals(descriptor, htd);
}
@@ -162,7 +161,7 @@ public class TestFSTableDescriptors {
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
assertNotNull(htds.remove(htd.getTableName()));
assertNull(htds.remove(htd.getTableName()));
@@ -171,11 +170,11 @@ public class TestFSTableDescriptors {
@Test public void testReadingHTDFromFS() throws IOException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
Path rootdir = UTIL.getDataTestDir(name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
fstd.createTableDescriptor(htd);
- TableDescriptor td2 =
+ HTableDescriptor td2 =
FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName());
assertTrue(htd.equals(td2));
}
@@ -185,25 +184,25 @@ public class TestFSTableDescriptors {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = UTIL.getDataTestDir(name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
Path descriptorFile = fstd.updateTableDescriptor(htd);
try (FSDataOutputStream out = fs.create(descriptorFile, true)) {
- out.write(TableDescriptorBuilder.toByteArray(htd));
+ out.write(htd.toByteArray());
}
FSTableDescriptors fstd2 = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- TableDescriptor td2 = fstd2.get(htd.getTableName());
+ HTableDescriptor td2 = fstd2.get(htd.getTableName());
assertEquals(htd, td2);
FileStatus descriptorFile2 =
FSTableDescriptors.getTableInfoPath(fs, fstd2.getTableDir(htd.getTableName()));
- byte[] buffer = TableDescriptorBuilder.toByteArray(htd);
+ byte[] buffer = htd.toByteArray();
try (FSDataInputStream in = fs.open(descriptorFile2.getPath())) {
in.readFully(buffer);
}
- TableDescriptor td3 = TableDescriptorBuilder.parseFrom(buffer);
+ HTableDescriptor td3 = HTableDescriptor.parseFrom(buffer);
assertEquals(htd, td3);
}
- @Test public void testTableDescriptors()
+ @Test public void testHTableDescriptors()
throws IOException, InterruptedException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@@ -211,7 +210,7 @@ public class TestFSTableDescriptors {
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir) {
@Override
- public TableDescriptor get(TableName tablename)
+ public HTableDescriptor get(TableName tablename)
throws TableExistsException, FileNotFoundException, IOException {
LOG.info(tablename + ", cachehits=" + this.cachehits);
return super.get(tablename);
@@ -220,7 +219,9 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
- htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
+ HTableDescriptor htd = new HTableDescriptor(
+ new HTableDescriptor(TableName.valueOf(name + i)));
+ htds.createTableDescriptor(htd);
}
for (int i = 0; i < count; i++) {
@@ -231,9 +232,9 @@ public class TestFSTableDescriptors {
}
// Update the table infos
for (int i = 0; i < count; i++) {
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
- htds.updateTableDescriptor(builder.build());
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
+ htd.addFamily(new HColumnDescriptor("" + i));
+ htds.updateTableDescriptor(htd);
}
// Wait a while so mod time we write is for sure different.
Thread.sleep(100);
@@ -249,7 +250,7 @@ public class TestFSTableDescriptors {
}
@Test
- public void testTableDescriptorsNoCache()
+ public void testHTableDescriptorsNoCache()
throws IOException, InterruptedException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@@ -260,7 +261,8 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
- htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
+ htds.createTableDescriptor(htd);
}
for (int i = 0; i < 2 * count; i++) {
@@ -268,14 +270,14 @@ public class TestFSTableDescriptors {
}
// Update the table infos
for (int i = 0; i < count; i++) {
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
- htds.updateTableDescriptor(builder.build());
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
+ htd.addFamily(new HColumnDescriptor("" + i));
+ htds.updateTableDescriptor(htd);
}
for (int i = 0; i < count; i++) {
assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i)));
assertTrue("Column Family " + i + " missing",
- htds.get(TableName.valueOf(name + i)).hasColumnFamily(Bytes.toBytes("" + i)));
+ htds.get(TableName.valueOf(name + i)).hasFamily(Bytes.toBytes("" + i)));
}
assertEquals(count * 4, htds.invocations);
assertEquals("expected=0, actual=" + htds.cachehits, 0, htds.cachehits);
@@ -292,10 +294,12 @@ public class TestFSTableDescriptors {
final int count = 4;
// Write out table infos.
for (int i = 0; i < count; i++) {
- htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
+ htds.createTableDescriptor(htd);
}
// add hbase:meta
- htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build());
+ HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
+ htds.createTableDescriptor(htd);
assertEquals("getAll() didn't return all TableDescriptors, expected: " +
(count + 1) + " got: " + htds.getAll().size(),
@@ -317,7 +321,8 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos via non-cached FSTableDescriptors
for (int i = 0; i < count; i++) {
- nonchtds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
+ nonchtds.createTableDescriptor(htd);
}
// Calls to getAll() won't increase the cache counter, do per table.
@@ -328,15 +333,15 @@ public class TestFSTableDescriptors {
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
// add a new entry for hbase:meta
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
nonchtds.createTableDescriptor(htd);
// hbase:meta will only increase the cachehit by 1
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
- for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
+ for (Map.Entry entry: nonchtds.getAll().entrySet()) {
String t = (String) entry.getKey();
- TableDescriptor nchtd = entry.getValue();
+ HTableDescriptor nchtd = (HTableDescriptor) entry.getValue();
assertTrue("expected " + htd.toString() +
" got: " + chtds.get(TableName.valueOf(t)).toString(),
(nchtd.equals(chtds.get(TableName.valueOf(t)))));
@@ -361,7 +366,7 @@ public class TestFSTableDescriptors {
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
htds.add(htd);
htds.add(htd);
@@ -410,14 +415,12 @@ public class TestFSTableDescriptors {
@Test
public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
Path testdir = UTIL.getDataTestDir(name.getMethodName());
- TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
- htd = TableDescriptorBuilder.newBuilder(htd)
- .setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"))
- .build();
+ htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
Path tableDir = fstd.getTableDir(htd.getTableName());
Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
@@ -440,10 +443,10 @@ public class TestFSTableDescriptors {
}
@Override
- public TableDescriptor get(TableName tablename)
+ public HTableDescriptor get(TableName tablename)
throws TableExistsException, FileNotFoundException, IOException {
LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") +
- " TableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits);
+ " HTableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits);
return super.get(tablename);
}
}
[2/8] hbase git commit: Revert "HBASE-18503 Change ***Util and Master
to use TableDescriptor and ColumnFamilyDescriptor" Wrong author information
This reverts commit b03348630c145aa6cc29f0f295442c6deb28a28e.
Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 979a351..fce4eaa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -33,9 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -89,7 +89,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
protected final SnapshotManifest snapshotManifest;
protected final SnapshotManager snapshotManager;
- protected TableDescriptor htd;
+ protected HTableDescriptor htd;
/**
* @param snapshot descriptor of the snapshot to take
@@ -124,12 +124,12 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
"Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
}
- private TableDescriptor loadTableDescriptor()
+ private HTableDescriptor loadTableDescriptor()
throws FileNotFoundException, IOException {
- TableDescriptor htd =
+ HTableDescriptor htd =
this.master.getTableDescriptors().get(snapshotTable);
if (htd == null) {
- throw new IOException("TableDescriptor missing for " + snapshotTable);
+ throw new IOException("HTableDescriptor missing for " + snapshotTable);
}
return htd;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index b1d1415..d4a54bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -59,7 +59,7 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
* @param tableName The current table name.
* @param family The current family.
*/
- public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor family) throws IOException {
+ public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family) throws IOException {
Configuration conf = getConf();
TableName tn = TableName.valueOf(tableName);
FileSystem fs = FileSystem.get(conf);
@@ -98,8 +98,8 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
Connection connection = ConnectionFactory.createConnection(getConf());
Admin admin = connection.getAdmin();
try {
- TableDescriptor htd = admin.listTableDescriptor(tn);
- ColumnFamilyDescriptor family = htd.getColumnFamily(Bytes.toBytes(familyName));
+ HTableDescriptor htd = admin.getTableDescriptor(tn);
+ HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
if (family == null || !family.isMobEnabled()) {
throw new IOException("Column family " + familyName + " is not a MOB column family");
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 4273098..80bda28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -44,8 +44,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
@@ -55,7 +57,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Encryption;
@@ -285,7 +286,7 @@ public final class MobUtils {
* @throws IOException
*/
public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, TableName tableName,
- ColumnFamilyDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
+ HColumnDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
throws IOException {
long timeToLive = columnDescriptor.getTimeToLive();
if (Integer.MAX_VALUE == timeToLive) {
@@ -518,7 +519,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
+ HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, String startKey, CacheConfig cacheConfig,
Encryption.Context cryptoContext, boolean isCompaction)
throws IOException {
@@ -542,7 +543,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createRefFileWriter(Configuration conf, FileSystem fs,
- ColumnFamilyDescriptor family, Path basePath, long maxKeyCount, CacheConfig cacheConfig,
+ HColumnDescriptor family, Path basePath, long maxKeyCount, CacheConfig cacheConfig,
Encryption.Context cryptoContext, boolean isCompaction)
throws IOException {
return createWriter(conf, fs, family,
@@ -569,7 +570,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
+ HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
Encryption.Context cryptoContext, boolean isCompaction)
throws IOException {
@@ -595,7 +596,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createDelFileWriter(Configuration conf, FileSystem fs,
- ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
+ HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
Encryption.Context cryptoContext)
throws IOException {
@@ -622,7 +623,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- ColumnFamilyDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
+ HColumnDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext,
boolean isCompaction)
throws IOException {
@@ -796,7 +797,7 @@ public final class MobUtils {
* @param allFiles Whether add all mob files into the compaction.
*/
public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
- ColumnFamilyDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
+ HColumnDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
throws IOException {
String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY,
PartitionedMobCompactor.class.getName());
@@ -804,7 +805,7 @@ public final class MobUtils {
MobCompactor compactor = null;
try {
compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {
- Configuration.class, FileSystem.class, TableName.class, ColumnFamilyDescriptor.class,
+ Configuration.class, FileSystem.class, TableName.class, HColumnDescriptor.class,
ExecutorService.class }, new Object[] { conf, fs, tableName, hcd, pool });
} catch (Exception e) {
throw new IOException("Unable to load configured mob file compactor '" + className + "'", e);
@@ -856,9 +857,9 @@ public final class MobUtils {
* @param htd The current table descriptor.
* @return Whether this table has mob-enabled columns.
*/
- public static boolean hasMobColumns(TableDescriptor htd) {
- ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
- for (ColumnFamilyDescriptor hcd : hcds) {
+ public static boolean hasMobColumns(HTableDescriptor htd) {
+ HColumnDescriptor[] hcds = htd.getColumnFamilies();
+ for (HColumnDescriptor hcd : hcds) {
if (hcd.isMobEnabled()) {
return true;
}
@@ -898,7 +899,7 @@ public final class MobUtils {
* @param fileDate The date string parsed from the mob file name.
* @return True if the mob file is expired.
*/
- public static boolean isMobFileExpired(ColumnFamilyDescriptor column, long current, String fileDate) {
+ public static boolean isMobFileExpired(HColumnDescriptor column, long current, String fileDate) {
if (column.getMinVersions() > 0) {
return false;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
index 7ebdbc7..77de0cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
@@ -27,9 +27,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -42,14 +42,14 @@ public abstract class MobCompactor {
protected FileSystem fs;
protected Configuration conf;
protected TableName tableName;
- protected ColumnFamilyDescriptor column;
+ protected HColumnDescriptor column;
protected Path mobTableDir;
protected Path mobFamilyDir;
protected ExecutorService pool;
public MobCompactor(Configuration conf, FileSystem fs, TableName tableName,
- ColumnFamilyDescriptor column, ExecutorService pool) {
+ HColumnDescriptor column, ExecutorService pool) {
this.conf = conf;
this.fs = fs;
this.tableName = tableName;
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index da664cd..d37292c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -45,13 +45,13 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
import org.apache.hadoop.hbase.TagUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
@@ -109,7 +109,7 @@ public class PartitionedMobCompactor extends MobCompactor {
private Encryption.Context cryptoContext = Encryption.Context.NONE;
public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName,
- ColumnFamilyDescriptor column, ExecutorService pool) throws IOException {
+ HColumnDescriptor column, ExecutorService pool) throws IOException {
super(conf, fs, tableName, column, pool);
mergeableSize = conf.getLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD,
MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index eb9a5f7..e1d2ea1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.mapreduce.JobUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
@@ -109,13 +109,13 @@ public class CompactionTool extends Configured implements Tool {
if (isFamilyDir(fs, path)) {
Path regionDir = path.getParent();
Path tableDir = regionDir.getParent();
- TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+ HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
compactStoreFiles(tableDir, htd, hri,
path.getName(), compactOnce, major);
} else if (isRegionDir(fs, path)) {
Path tableDir = path.getParent();
- TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+ HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
compactRegion(tableDir, htd, path, compactOnce, major);
} else if (isTableDir(fs, path)) {
compactTable(path, compactOnce, major);
@@ -127,13 +127,13 @@ public class CompactionTool extends Configured implements Tool {
private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
throws IOException {
- TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+ HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
compactRegion(tableDir, htd, regionDir, compactOnce, major);
}
}
- private void compactRegion(final Path tableDir, final TableDescriptor htd,
+ private void compactRegion(final Path tableDir, final HTableDescriptor htd,
final Path regionDir, final boolean compactOnce, final boolean major)
throws IOException {
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
@@ -147,7 +147,7 @@ public class CompactionTool extends Configured implements Tool {
* If the compact once flag is not specified, execute the compaction until
* no more compactions are needed. Uses the Configuration settings provided.
*/
- private void compactStoreFiles(final Path tableDir, final TableDescriptor htd,
+ private void compactStoreFiles(final Path tableDir, final HTableDescriptor htd,
final HRegionInfo hri, final String familyName, final boolean compactOnce,
final boolean major) throws IOException {
HStore store = getStore(conf, fs, tableDir, htd, hri, familyName, tmpDir);
@@ -177,7 +177,7 @@ public class CompactionTool extends Configured implements Tool {
* the store dir to compact as source.
*/
private static HStore getStore(final Configuration conf, final FileSystem fs,
- final Path tableDir, final TableDescriptor htd, final HRegionInfo hri,
+ final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
final String familyName, final Path tempDir) throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
@Override
@@ -186,7 +186,7 @@ public class CompactionTool extends Configured implements Tool {
}
};
HRegion region = new HRegion(regionFs, null, conf, htd, null);
- return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf);
+ return new HStore(region, htd.getFamily(Bytes.toBytes(familyName)), conf);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 74a2998..9cb1316 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -41,12 +41,12 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.util.Bytes;
@@ -340,8 +340,8 @@ public class HRegionFileSystem {
* @return true if region has reference file
* @throws IOException
*/
- public boolean hasReferences(final TableDescriptor htd) throws IOException {
- for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
+ public boolean hasReferences(final HTableDescriptor htd) throws IOException {
+ for (HColumnDescriptor family : htd.getFamilies()) {
if (hasReferences(family.getNameAsString())) {
return true;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index ae2f7dd..28b7a43 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -50,7 +50,6 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.function.Function;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
@@ -90,7 +89,6 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.locking.EntityLock;
import org.apache.hadoop.hbase.client.locking.LockServiceClient;
import org.apache.hadoop.hbase.conf.ConfigurationManager;
@@ -702,11 +700,7 @@ public class HRegionServer extends HasThread implements
protected TableDescriptors getFsTableDescriptors() throws IOException {
return new FSTableDescriptors(this.conf,
- this.fs, this.rootDir, !canUpdateTableDescriptor(), false, getMetaTableObserver());
- }
-
- protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
- return null;
+ this.fs, this.rootDir, !canUpdateTableDescriptor(), false);
}
protected void setInitLatch(CountDownLatch latch) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 7fc025a..9a25275 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MultiActionResultTooLarge;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
@@ -76,7 +77,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.VersionInfoUtil;
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
@@ -1859,7 +1859,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
final int regionCount = request.getOpenInfoCount();
- final Map<TableName, TableDescriptor> htds = new HashMap<>(regionCount);
+ final Map<TableName, HTableDescriptor> htds = new HashMap<>(regionCount);
final boolean isBulkAssign = regionCount > 1;
try {
checkOpen();
@@ -1898,7 +1898,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
- TableDescriptor htd;
+ HTableDescriptor htd;
try {
String encodedName = region.getEncodedName();
byte[] encodedNameBytes = region.getEncodedNameAsBytes();
@@ -2020,7 +2020,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
RegionInfo regionInfo = request.getRegionInfo();
final HRegionInfo region = HRegionInfo.convert(regionInfo);
- TableDescriptor htd;
+ HTableDescriptor htd;
WarmupRegionResponse response = WarmupRegionResponse.getDefaultInstance();
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
index 6913ecd..e49b164 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.regionserver.handler;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@InterfaceAudience.Private
public class OpenMetaHandler extends OpenRegionHandler {
public OpenMetaHandler(final Server server,
- final RegionServerServices rsServices, HRegionInfo regionInfo,
- final TableDescriptor htd, long masterSystemTime) {
+ final RegionServerServices rsServices, HRegionInfo regionInfo,
+ final HTableDescriptor htd, long masterSystemTime) {
super(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_META);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
index ced9ef2..83d4d3f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
@@ -19,10 +19,11 @@
package org.apache.hadoop.hbase.regionserver.handler;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
/**
@@ -33,7 +34,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@InterfaceAudience.Private
public class OpenPriorityRegionHandler extends OpenRegionHandler {
public OpenPriorityRegionHandler(Server server, RegionServerServices rsServices,
- HRegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) {
+ HRegionInfo regionInfo, HTableDescriptor htd, long masterSystemTime) {
super(server, rsServices, regionInfo, htd, masterSystemTime,
EventType.M_RS_OPEN_PRIORITY_REGION);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
index bbb084c..8369100 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
@@ -25,8 +25,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
@@ -48,18 +48,18 @@ public class OpenRegionHandler extends EventHandler {
protected final RegionServerServices rsServices;
private final HRegionInfo regionInfo;
- private final TableDescriptor htd;
+ private final HTableDescriptor htd;
private final long masterSystemTime;
public OpenRegionHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
- TableDescriptor htd, long masterSystemTime) {
+ HTableDescriptor htd, long masterSystemTime) {
this(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_REGION);
}
protected OpenRegionHandler(final Server server,
- final RegionServerServices rsServices, final HRegionInfo regionInfo,
- final TableDescriptor htd, long masterSystemTime, EventType eventType) {
+ final RegionServerServices rsServices, final HRegionInfo regionInfo,
+ final HTableDescriptor htd, long masterSystemTime, EventType eventType) {
super(server, eventType);
this.rsServices = rsServices;
this.regionInfo = regionInfo;
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index 4ea0434..9f600da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
@@ -54,7 +55,6 @@ import org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.RetryingCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
@@ -399,7 +399,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
if (requiresReplication == null) {
// check if the table requires memstore replication
// some unit-test drop the table, so we should do a bypass check and always replicate.
- TableDescriptor htd = tableDescriptors.get(tableName);
+ HTableDescriptor htd = tableDescriptors.get(tableName);
requiresReplication = htd == null || htd.hasRegionMemstoreReplication();
memstoreReplicationEnabled.put(tableName, requiresReplication);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index cae4c7e..9875ac0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -34,7 +34,6 @@ import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ThreadPoolExecutor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -44,6 +43,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -125,7 +125,7 @@ public class RestoreSnapshotHelper {
private final SnapshotDescription snapshotDesc;
private final TableName snapshotTable;
- private final TableDescriptor tableDesc;
+ private final HTableDescriptor tableDesc;
private final Path rootDir;
private final Path tableDir;
@@ -136,7 +136,7 @@ public class RestoreSnapshotHelper {
public RestoreSnapshotHelper(final Configuration conf,
final FileSystem fs,
final SnapshotManifest manifest,
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor tableDescriptor,
final Path rootDir,
final ForeignExceptionDispatcher monitor,
final MonitoredTask status) {
@@ -146,7 +146,7 @@ public class RestoreSnapshotHelper {
public RestoreSnapshotHelper(final Configuration conf,
final FileSystem fs,
final SnapshotManifest manifest,
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor tableDescriptor,
final Path rootDir,
final ForeignExceptionDispatcher monitor,
final MonitoredTask status,
@@ -265,18 +265,18 @@ public class RestoreSnapshotHelper {
*/
public static class RestoreMetaChanges {
private final Map<String, Pair<String, String> > parentsMap;
- private final TableDescriptor htd;
+ private final HTableDescriptor htd;
private List<HRegionInfo> regionsToRestore = null;
private List<HRegionInfo> regionsToRemove = null;
private List<HRegionInfo> regionsToAdd = null;
- public RestoreMetaChanges(TableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
+ public RestoreMetaChanges(HTableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
this.parentsMap = parentsMap;
this.htd = htd;
}
- public TableDescriptor getTableDescriptor() {
+ public HTableDescriptor getTableDescriptor() {
return htd;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 32cdabf..f70fe9e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -36,10 +36,10 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -76,7 +76,7 @@ public final class SnapshotManifest {
private List<SnapshotRegionManifest> regionManifests;
private SnapshotDescription desc;
- private TableDescriptor htd;
+ private HTableDescriptor htd;
private final ForeignExceptionSnare monitor;
private final Configuration conf;
@@ -119,7 +119,7 @@ public final class SnapshotManifest {
/**
* Return a SnapshotManifest instance with the information already loaded in-memory.
* SnapshotManifest manifest = SnapshotManifest.open(...)
- * TableDescriptor htd = manifest.getTableDescriptor()
+ * HTableDescriptor htd = manifest.getTableDescriptor()
* for (SnapshotRegionManifest regionManifest: manifest.getRegionManifests())
* hri = regionManifest.getRegionInfo()
* for (regionManifest.getFamilyFiles())
@@ -136,7 +136,7 @@ public final class SnapshotManifest {
/**
* Add the table descriptor to the snapshot manifest
*/
- public void addTableDescriptor(final TableDescriptor htd) throws IOException {
+ public void addTableDescriptor(final HTableDescriptor htd) throws IOException {
this.htd = htd;
}
@@ -182,7 +182,7 @@ public final class SnapshotManifest {
LOG.debug("Creating references for mob files");
Path mobRegionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable());
- for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+ for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
// 2.1. build the snapshot reference for the store if it's a mob store
if (!hcd.isMobEnabled()) {
continue;
@@ -377,7 +377,7 @@ public final class SnapshotManifest {
case SnapshotManifestV2.DESCRIPTOR_VERSION: {
SnapshotDataManifest dataManifest = readDataManifest();
if (dataManifest != null) {
- htd = ProtobufUtil.toTableDescriptor(dataManifest.getTableSchema());
+ htd = ProtobufUtil.convertToHTableDesc(dataManifest.getTableSchema());
regionManifests = dataManifest.getRegionManifestsList();
} else {
// Compatibility, load the v1 regions
@@ -429,7 +429,7 @@ public final class SnapshotManifest {
/**
* Get the table descriptor from the Snapshot
*/
- public TableDescriptor getTableDescriptor() {
+ public HTableDescriptor getTableDescriptor() {
return this.htd;
}
@@ -485,7 +485,7 @@ public final class SnapshotManifest {
}
SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder();
- dataManifestBuilder.setTableSchema(ProtobufUtil.toTableSchema(htd));
+ dataManifestBuilder.setTableSchema(ProtobufUtil.convertToTableSchema(htd));
if (v1Regions != null && v1Regions.size() > 0) {
dataManifestBuilder.addAllRegionManifests(v1Regions);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index a73883b..eb6b766 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -24,10 +24,11 @@ import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Ints;
import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.logging.Log;
@@ -39,19 +40,17 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Ints;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableInfoMissingException;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.regionserver.BloomType;
/**
* Implementation of {@link TableDescriptors} that reads descriptors from the
@@ -80,14 +79,10 @@ public class FSTableDescriptors implements TableDescriptors {
private volatile boolean usecache;
private volatile boolean fsvisited;
- @VisibleForTesting
- long cachehits = 0;
- @VisibleForTesting
- long invocations = 0;
+ @VisibleForTesting long cachehits = 0;
+ @VisibleForTesting long invocations = 0;
- /**
- * The file name prefix used to store HTD in HDFS
- */
+ /** The file name prefix used to store HTD in HDFS */
static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
static final String TABLEINFO_DIR = ".tabledesc";
static final String TMP_DIR = ".tmp";
@@ -95,12 +90,12 @@ public class FSTableDescriptors implements TableDescriptors {
// This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge.
// TODO.
- private final Map<TableName, TableDescriptor> cache = new ConcurrentHashMap<>();
+ private final Map<TableName, HTableDescriptor> cache = new ConcurrentHashMap<>();
/**
* Table descriptor for <code>hbase:meta</code> catalog table
*/
- private final TableDescriptor metaTableDescriptor;
+ private final HTableDescriptor metaTableDescriptor;
/**
* Construct a FSTableDescriptors instance using the hbase root dir of the given
@@ -112,112 +107,91 @@ public class FSTableDescriptors implements TableDescriptors {
}
public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir)
- throws IOException {
+ throws IOException {
this(conf, fs, rootdir, false, true);
}
/**
* @param fsreadonly True if we are read-only when it comes to filesystem
- * operations; i.e. on remove, we do not do delete in fs.
- */
- public FSTableDescriptors(final Configuration conf, final FileSystem fs,
- final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
- this(conf, fs, rootdir, fsreadonly, usecache, null);
- }
-
- /**
- * @param fsreadonly True if we are read-only when it comes to filesystem
- * operations; i.e. on remove, we do not do delete in fs.
- * @param metaObserver Used by HMaster. It need to modify the META_REPLICAS_NUM for meta table descriptor.
- * see HMaster#finishActiveMasterInitialization
- * TODO: This is a workaround. Should remove this ugly code...
+ * operations; i.e. on remove, we do not do delete in fs.
*/
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
- final Path rootdir, final boolean fsreadonly, final boolean usecache,
- Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
+ final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
+ super();
this.fs = fs;
this.rootdir = rootdir;
this.fsreadonly = fsreadonly;
this.usecache = usecache;
- this.metaTableDescriptor = metaObserver == null ? createMetaTableDescriptor(conf)
- : metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
- }
- @VisibleForTesting
- public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException {
- return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
- .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
- .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
- HConstants.DEFAULT_HBASE_META_VERSIONS))
- .setInMemory(true)
- .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
- HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true)
- .build())
- .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY)
- .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
- HConstants.DEFAULT_HBASE_META_VERSIONS))
- .setInMemory(true)
- .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
- HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true)
- .build())
- .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_POSITION_FAMILY)
- .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
- HConstants.DEFAULT_HBASE_META_VERSIONS))
- .setInMemory(true)
- .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
- HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true)
- .build())
- .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_META_FAMILY)
- .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
- HConstants.DEFAULT_HBASE_META_VERSIONS))
- .setInMemory(true)
- .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
- HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true)
- .build())
- .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY)
- // Ten is arbitrary number. Keep versions to help debugging.
- .setMaxVersions(10)
- .setInMemory(true)
- .setBlocksize(8 * 1024)
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true)
- .build())
- .addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
- null, Coprocessor.PRIORITY_SYSTEM, null);
+ this.metaTableDescriptor = createMetaTableDescriptor(conf);
}
@VisibleForTesting
- public static TableDescriptor createMetaTableDescriptor(final Configuration conf)
+ public static HTableDescriptor createMetaTableDescriptor(final Configuration conf)
throws IOException {
- return createMetaTableDescriptorBuilder(conf).build();
+ return new HTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+ .addColumnFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)
+ .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+ HConstants.DEFAULT_HBASE_META_VERSIONS))
+ .setInMemory(true)
+ .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+ HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true))
+ .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY)
+ .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+ HConstants.DEFAULT_HBASE_META_VERSIONS))
+ .setInMemory(true)
+ .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+ HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true))
+ .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY)
+ .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+ HConstants.DEFAULT_HBASE_META_VERSIONS))
+ .setInMemory(true)
+ .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+ HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true))
+ .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY)
+ .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+ HConstants.DEFAULT_HBASE_META_VERSIONS))
+ .setInMemory(true)
+ .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+ HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true))
+ .addColumnFamily(new HColumnDescriptor(HConstants.TABLE_FAMILY)
+ // Ten is arbitrary number. Keep versions to help debugging.
+ .setMaxVersions(10)
+ .setInMemory(true)
+ .setBlocksize(8 * 1024)
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true))
+ .addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
+ null, Coprocessor.PRIORITY_SYSTEM, null)
+ .build());
}
@Override
@@ -245,7 +219,7 @@ public class FSTableDescriptors implements TableDescriptors {
*/
@Override
@Nullable
- public TableDescriptor get(final TableName tablename)
+ public HTableDescriptor get(final TableName tablename)
throws IOException {
invocations++;
if (TableName.META_TABLE_NAME.equals(tablename)) {
@@ -260,13 +234,13 @@ public class FSTableDescriptors implements TableDescriptors {
if (usecache) {
// Look in cache of descriptors.
- TableDescriptor cachedtdm = this.cache.get(tablename);
+ HTableDescriptor cachedtdm = this.cache.get(tablename);
if (cachedtdm != null) {
cachehits++;
return cachedtdm;
}
}
- TableDescriptor tdmt = null;
+ HTableDescriptor tdmt = null;
try {
tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
} catch (NullPointerException e) {
@@ -290,21 +264,21 @@ public class FSTableDescriptors implements TableDescriptors {
* Returns a map from table name to table descriptor for all tables.
*/
@Override
- public Map<String, TableDescriptor> getAllDescriptors()
+ public Map<String, HTableDescriptor> getAllDescriptors()
throws IOException {
- Map<String, TableDescriptor> tds = new TreeMap<>();
+ Map<String, HTableDescriptor> tds = new TreeMap<>();
if (fsvisited && usecache) {
- for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
+ for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) {
tds.put(entry.getKey().toString(), entry.getValue());
}
// add hbase:meta to the response
- tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor);
+ tds.put(this.metaTableDescriptor.getNameAsString(), metaTableDescriptor);
} else {
LOG.debug("Fetching table descriptors from the filesystem.");
boolean allvisited = true;
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
- TableDescriptor htd = null;
+ HTableDescriptor htd = null;
try {
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
@@ -327,10 +301,10 @@ public class FSTableDescriptors implements TableDescriptors {
* Returns a map from table name to table descriptor for all tables.
*/
@Override
- public Map<String, TableDescriptor> getAll() throws IOException {
- Map<String, TableDescriptor> htds = new TreeMap<>();
- Map<String, TableDescriptor> allDescriptors = getAllDescriptors();
- for (Map.Entry<String, TableDescriptor> entry : allDescriptors
+ public Map<String, HTableDescriptor> getAll() throws IOException {
+ Map<String, HTableDescriptor> htds = new TreeMap<>();
+ Map<String, HTableDescriptor> allDescriptors = getAllDescriptors();
+ for (Map.Entry<String, HTableDescriptor> entry : allDescriptors
.entrySet()) {
htds.put(entry.getKey(), entry.getValue());
}
@@ -342,13 +316,13 @@ public class FSTableDescriptors implements TableDescriptors {
* @see #get(org.apache.hadoop.hbase.TableName)
*/
@Override
- public Map<String, TableDescriptor> getByNamespace(String name)
+ public Map<String, HTableDescriptor> getByNamespace(String name)
throws IOException {
- Map<String, TableDescriptor> htds = new TreeMap<>();
+ Map<String, HTableDescriptor> htds = new TreeMap<>();
List<Path> tableDirs =
FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
for (Path d: tableDirs) {
- TableDescriptor htd = null;
+ HTableDescriptor htd = null;
try {
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
@@ -366,7 +340,7 @@ public class FSTableDescriptors implements TableDescriptors {
* and updates the local cache with it.
*/
@Override
- public void add(TableDescriptor htd) throws IOException {
+ public void add(HTableDescriptor htd) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
}
@@ -377,7 +351,7 @@ public class FSTableDescriptors implements TableDescriptors {
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
throw new NotImplementedException(
"Cannot add a table descriptor for a reserved subdirectory name: "
- + htd.getTableName().getNameAsString());
+ + htd.getNameAsString());
}
updateTableDescriptor(htd);
}
@@ -388,7 +362,7 @@ public class FSTableDescriptors implements TableDescriptors {
* from the FileSystem.
*/
@Override
- public TableDescriptor remove(final TableName tablename)
+ public HTableDescriptor remove(final TableName tablename)
throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
@@ -399,7 +373,7 @@ public class FSTableDescriptors implements TableDescriptors {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
- TableDescriptor descriptor = this.cache.remove(tablename);
+ HTableDescriptor descriptor = this.cache.remove(tablename);
return descriptor;
}
@@ -583,7 +557,7 @@ public class FSTableDescriptors implements TableDescriptors {
* if it exists, bypassing the local cache.
* Returns null if it's not found.
*/
- public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
+ public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir);
@@ -594,7 +568,7 @@ public class FSTableDescriptors implements TableDescriptors {
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
- public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
+ public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir, false);
if (status == null) {
@@ -603,7 +577,7 @@ public class FSTableDescriptors implements TableDescriptors {
return readTableDescriptor(fs, status);
}
- private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
+ private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
throws IOException {
int len = Ints.checkedCast(status.getLen());
byte [] content = new byte[len];
@@ -613,9 +587,9 @@ public class FSTableDescriptors implements TableDescriptors {
} finally {
fsDataInputStream.close();
}
- TableDescriptor htd = null;
+ HTableDescriptor htd = null;
try {
- htd = TableDescriptorBuilder.parseFrom(content);
+ htd = HTableDescriptor.parseFrom(content);
} catch (DeserializationException e) {
throw new IOException("content=" + Bytes.toShort(content), e);
}
@@ -627,7 +601,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @throws IOException Thrown if failed update.
* @throws NotImplementedException if in read only mode
*/
- @VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
+ @VisibleForTesting Path updateTableDescriptor(HTableDescriptor td)
throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
@@ -689,7 +663,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @return Descriptor file or null if we failed write.
*/
private static Path writeTableDescriptor(final FileSystem fs,
- final TableDescriptor htd, final Path tableDir,
+ final HTableDescriptor htd, final Path tableDir,
final FileStatus currentDescriptorFile)
throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
@@ -744,42 +718,42 @@ public class FSTableDescriptors implements TableDescriptors {
return tableInfoDirPath;
}
- private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
+ private static void writeTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, false);
try {
// We used to write this file out as a serialized HTD Writable followed by two '\n's and then
// the toString version of HTD. Now we just write out the pb serialization.
- out.write(TableDescriptorBuilder.toByteArray(htd));
+ out.write(htd.toByteArray());
} finally {
out.close();
}
}
/**
- * Create new TableDescriptor in HDFS. Happens when we are creating table.
+ * Create new HTableDescriptor in HDFS. Happens when we are creating table.
* Used by tests.
* @return True if we successfully created file.
*/
- public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
+ public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
return createTableDescriptor(htd, false);
}
/**
- * Create new TableDescriptor in HDFS. Happens when we are creating table. If
+ * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @return True if we successfully created file.
*/
- public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
+ public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
throws IOException {
Path tableDir = getTableDir(htd.getTableName());
return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
}
/**
- * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
+ * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table or snapshot a table.
* @param tableDir table directory under which we should write the file
* @param htd description of the table to write
@@ -790,7 +764,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @throws IOException if a filesystem error occurs
*/
public boolean createTableDescriptorForTableDirectory(Path tableDir,
- TableDescriptor htd, boolean forceCreation) throws IOException {
+ HTableDescriptor htd, boolean forceCreation) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 199ed7d..ff5d482 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -17,10 +17,6 @@
*/
package org.apache.hadoop.hbase.util;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
@@ -88,9 +84,11 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -941,7 +939,7 @@ public class HBaseFsck extends Configured implements Closeable {
TableName tableName = hi.getTableName();
TableInfo tableInfo = tablesInfo.get(tableName);
Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
- TableDescriptor template = tableInfo.getHTD();
+ HTableDescriptor template = tableInfo.getHTD();
// find min and max key values
Pair<byte[],byte[]> orphanRegionRange = null;
@@ -1202,17 +1200,17 @@ public class HBaseFsck extends Configured implements Closeable {
*/
private void reportTablesInFlux() {
AtomicInteger numSkipped = new AtomicInteger(0);
- TableDescriptor[] allTables = getTables(numSkipped);
+ HTableDescriptor[] allTables = getTables(numSkipped);
errors.print("Number of Tables: " + allTables.length);
if (details) {
if (numSkipped.get() > 0) {
errors.detail("Number of Tables in flux: " + numSkipped.get());
}
- for (TableDescriptor td : allTables) {
+ for (HTableDescriptor td : allTables) {
errors.detail(" Table: " + td.getTableName() + "\t" +
(td.isReadOnly() ? "ro" : "rw") + "\t" +
(td.isMetaRegion() ? "META" : " ") + "\t" +
- " families: " + td.getColumnFamilyCount());
+ " families: " + td.getFamilies().size());
}
}
}
@@ -1316,7 +1314,7 @@ public class HBaseFsck extends Configured implements Closeable {
modTInfo = new TableInfo(tableName);
tablesInfo.put(tableName, modTInfo);
try {
- TableDescriptor htd =
+ HTableDescriptor htd =
FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
modTInfo.htds.add(htd);
} catch (IOException ioe) {
@@ -1363,17 +1361,17 @@ public class HBaseFsck extends Configured implements Closeable {
* To fabricate a .tableinfo file with following contents<br>
* 1. the correct tablename <br>
* 2. the correct colfamily list<br>
- * 3. the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}<br>
+ * 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @throws IOException
*/
private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
Set<String> columns) throws IOException {
if (columns ==null || columns.isEmpty()) return false;
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
+ HTableDescriptor htd = new HTableDescriptor(tableName);
for (String columnfamimly : columns) {
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
+ htd.addFamily(new HColumnDescriptor(columnfamimly));
}
- fstd.createTableDescriptor(builder.build(), true);
+ fstd.createTableDescriptor(htd, true);
return true;
}
@@ -1398,7 +1396,7 @@ public class HBaseFsck extends Configured implements Closeable {
* 2. else create a default .tableinfo file with following items<br>
* 2.1 the correct tablename <br>
* 2.2 the correct colfamily list<br>
- * 2.3 the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}<br>
+ * 2.3 the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @throws IOException
*/
public void fixOrphanTables() throws IOException {
@@ -1406,7 +1404,7 @@ public class HBaseFsck extends Configured implements Closeable {
List<TableName> tmpList = new ArrayList<>(orphanTableDirs.keySet().size());
tmpList.addAll(orphanTableDirs.keySet());
- TableDescriptor[] htds = getTableDescriptors(tmpList);
+ HTableDescriptor[] htds = getHTableDescriptors(tmpList);
Iterator<Entry<TableName, Set<String>>> iter =
orphanTableDirs.entrySet().iterator();
int j = 0;
@@ -1419,7 +1417,7 @@ public class HBaseFsck extends Configured implements Closeable {
LOG.info("Trying to fix orphan table error: " + tableName);
if (j < htds.length) {
if (tableName.equals(htds[j].getTableName())) {
- TableDescriptor htd = htds[j];
+ HTableDescriptor htd = htds[j];
LOG.info("fixing orphan table: " + tableName + " from cache");
fstd.createTableDescriptor(htd, true);
j++;
@@ -1428,7 +1426,7 @@ public class HBaseFsck extends Configured implements Closeable {
} else {
if (fabricateTableInfo(fstd, tableName, entry.getValue())) {
LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file");
- LOG.warn("Strongly recommend to modify the TableDescriptor if necessary for: " + tableName);
+ LOG.warn("Strongly recommend to modify the HTableDescriptor if necessary for: " + tableName);
iter.remove();
} else {
LOG.error("Unable to create default .tableinfo for " + tableName + " while missing column family information");
@@ -1465,7 +1463,7 @@ public class HBaseFsck extends Configured implements Closeable {
Path rootdir = FSUtils.getRootDir(getConf());
Configuration c = getConf();
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
- TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
+ HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false);
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
@@ -2648,8 +2646,8 @@ public class HBaseFsck extends Configured implements Closeable {
* regions reported for the table, but table dir is there in hdfs
*/
private void loadTableInfosForTablesWithNoRegion() throws IOException {
- Map<String, TableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
- for (TableDescriptor htd : allTables.values()) {
+ Map<String, HTableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
+ for (HTableDescriptor htd : allTables.values()) {
if (checkMetaOnly && !htd.isMetaTable()) {
continue;
}
@@ -2772,8 +2770,8 @@ public class HBaseFsck extends Configured implements Closeable {
// region split calculator
final RegionSplitCalculator<HbckInfo> sc = new RegionSplitCalculator<>(cmp);
- // Histogram of different TableDescriptors found. Ideally there is only one!
- final Set<TableDescriptor> htds = new HashSet<>();
+ // Histogram of different HTableDescriptors found. Ideally there is only one!
+ final Set<HTableDescriptor> htds = new HashSet<>();
// key = start split, values = set of splits in problem group
final Multimap<byte[], HbckInfo> overlapGroups =
@@ -2790,9 +2788,9 @@ public class HBaseFsck extends Configured implements Closeable {
/**
* @return descriptor common to all regions. null if are none or multiple!
*/
- private TableDescriptor getHTD() {
+ private HTableDescriptor getHTD() {
if (htds.size() == 1) {
- return (TableDescriptor)htds.toArray()[0];
+ return (HTableDescriptor)htds.toArray()[0];
} else {
LOG.error("None/Multiple table descriptors found for table '"
+ tableName + "' regions: " + htds);
@@ -2962,7 +2960,7 @@ public class HBaseFsck extends Configured implements Closeable {
"First region should start with an empty key. Creating a new " +
"region and regioninfo in HDFS to plug the hole.",
getTableInfo(), next);
- TableDescriptor htd = getTableInfo().getHTD();
+ HTableDescriptor htd = getTableInfo().getHTD();
// from special EMPTY_START_ROW to next region's startKey
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
HConstants.EMPTY_START_ROW, next.getStartKey());
@@ -2979,7 +2977,7 @@ public class HBaseFsck extends Configured implements Closeable {
errors.reportError(ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY,
"Last region should end with an empty key. Creating a new "
+ "region and regioninfo in HDFS to plug the hole.", getTableInfo());
- TableDescriptor htd = getTableInfo().getHTD();
+ HTableDescriptor htd = getTableInfo().getHTD();
// from curEndKey to EMPTY_START_ROW
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
HConstants.EMPTY_START_ROW);
@@ -3003,7 +3001,7 @@ public class HBaseFsck extends Configured implements Closeable {
+ Bytes.toStringBinary(holeStopKey)
+ ". Creating a new regioninfo and region "
+ "dir in hdfs to plug the hole.");
- TableDescriptor htd = getTableInfo().getHTD();
+ HTableDescriptor htd = getTableInfo().getHTD();
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Plugged hole by creating new empty region: "+ newRegion + " " +region);
@@ -3204,7 +3202,7 @@ public class HBaseFsck extends Configured implements Closeable {
}
// create new empty container region.
- TableDescriptor htd = getTableInfo().getHTD();
+ HTableDescriptor htd = getTableInfo().getHTD();
// from start key to end Key
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(),
range.getSecond());
@@ -3505,7 +3503,7 @@ public class HBaseFsck extends Configured implements Closeable {
* @return tables that have not been modified recently
* @throws IOException if an error is encountered
*/
- TableDescriptor[] getTables(AtomicInteger numSkipped) {
+ HTableDescriptor[] getTables(AtomicInteger numSkipped) {
List<TableName> tableNames = new ArrayList<>();
long now = EnvironmentEdgeManager.currentTime();
@@ -3522,19 +3520,19 @@ public class HBaseFsck extends Configured implements Closeable {
}
}
}
- return getTableDescriptors(tableNames);
+ return getHTableDescriptors(tableNames);
}
- TableDescriptor[] getTableDescriptors(List<TableName> tableNames) {
- LOG.info("getTableDescriptors == tableNames => " + tableNames);
+ HTableDescriptor[] getHTableDescriptors(List<TableName> tableNames) {
+ HTableDescriptor[] htd = new HTableDescriptor[0];
+ LOG.info("getHTableDescriptors == tableNames => " + tableNames);
try (Connection conn = ConnectionFactory.createConnection(getConf());
Admin admin = conn.getAdmin()) {
- List<TableDescriptor> tds = admin.listTableDescriptors(tableNames);
- return tds.toArray(new TableDescriptor[tds.size()]);
+ htd = admin.getTableDescriptorsByTableName(tableNames);
} catch (IOException e) {
LOG.debug("Exception getting table descriptors", e);
}
- return new TableDescriptor[0];
+ return htd;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 6552b32..961e8a0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -188,7 +188,7 @@ public class HBaseFsckRepair {
* Creates, flushes, and closes a new region.
*/
public static HRegion createHDFSRegionDir(Configuration conf,
- HRegionInfo hri, TableDescriptor htd) throws IOException {
+ HRegionInfo hri, HTableDescriptor htd) throws IOException {
// Create HRegion
Path root = FSUtils.getRootDir(conf);
HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index cc478a5..8ea7012 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -39,8 +39,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
/**
* Utility methods for interacting with the regions.
@@ -60,13 +61,13 @@ public abstract class ModifyRegionUtils {
void editRegion(final HRegionInfo region) throws IOException;
}
- public static HRegionInfo[] createHRegionInfos(TableDescriptor tableDescriptor,
+ public static HRegionInfo[] createHRegionInfos(HTableDescriptor hTableDescriptor,
byte[][] splitKeys) {
long regionId = System.currentTimeMillis();
HRegionInfo[] hRegionInfos = null;
if (splitKeys == null || splitKeys.length == 0) {
hRegionInfos = new HRegionInfo[]{
- new HRegionInfo(tableDescriptor.getTableName(), null, null, false, regionId)
+ new HRegionInfo(hTableDescriptor.getTableName(), null, null, false, regionId)
};
} else {
int numRegions = splitKeys.length + 1;
@@ -76,7 +77,7 @@ public abstract class ModifyRegionUtils {
for (int i = 0; i < numRegions; i++) {
endKey = (i == splitKeys.length) ? null : splitKeys[i];
hRegionInfos[i] =
- new HRegionInfo(tableDescriptor.getTableName(), startKey, endKey,
+ new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
false, regionId);
startKey = endKey;
}
@@ -90,20 +91,20 @@ public abstract class ModifyRegionUtils {
*
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
- * @param tableDescriptor description of the table
+ * @param hTableDescriptor description of the table
* @param newRegions {@link HRegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir,
- final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
+ final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
final RegionFillTask task) throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf,
- "RegionOpenAndInitThread-" + tableDescriptor.getTableName(), regionNumber);
+ "RegionOpenAndInitThread-" + hTableDescriptor.getTableName(), regionNumber);
try {
- return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task);
+ return createRegions(exec, conf, rootDir, hTableDescriptor, newRegions, task);
} finally {
exec.shutdownNow();
}
@@ -116,15 +117,15 @@ public abstract class ModifyRegionUtils {
* @param exec Thread Pool Executor
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
- * @param tableDescriptor description of the table
+ * @param hTableDescriptor description of the table
* @param newRegions {@link HRegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static List<HRegionInfo> createRegions(final ThreadPoolExecutor exec,
- final Configuration conf, final Path rootDir,
- final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
- final RegionFillTask task) throws IOException {
+ final Configuration conf, final Path rootDir,
+ final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
+ final RegionFillTask task) throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<>(exec);
@@ -133,7 +134,7 @@ public abstract class ModifyRegionUtils {
completionService.submit(new Callable<HRegionInfo>() {
@Override
public HRegionInfo call() throws IOException {
- return createRegion(conf, rootDir, tableDescriptor, newRegion, task);
+ return createRegion(conf, rootDir, hTableDescriptor, newRegion, task);
}
});
}
@@ -155,20 +156,20 @@ public abstract class ModifyRegionUtils {
* Create new set of regions on the specified file-system.
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
- * @param tableDescriptor description of the table
+ * @param hTableDescriptor description of the table
* @param newRegion {@link HRegionInfo} that describes the region to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static HRegionInfo createRegion(final Configuration conf, final Path rootDir,
- final TableDescriptor tableDescriptor, final HRegionInfo newRegion,
+ final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion,
final RegionFillTask task) throws IOException {
// 1. Create HRegion
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
Configuration confForWAL = new Configuration(conf);
confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
- HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false);
+ HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, hTableDescriptor, null, false);
try {
// 2. Custom user code to interact with the created region
if (task != null) {
[6/8] hbase git commit: HBASE-18503 Change ***Util and Master to use
TableDescriptor and ColumnFamilyDescriptor
Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index fce4eaa..979a351 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -33,9 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -89,7 +89,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
protected final SnapshotManifest snapshotManifest;
protected final SnapshotManager snapshotManager;
- protected HTableDescriptor htd;
+ protected TableDescriptor htd;
/**
* @param snapshot descriptor of the snapshot to take
@@ -124,12 +124,12 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
"Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
}
- private HTableDescriptor loadTableDescriptor()
+ private TableDescriptor loadTableDescriptor()
throws FileNotFoundException, IOException {
- HTableDescriptor htd =
+ TableDescriptor htd =
this.master.getTableDescriptors().get(snapshotTable);
if (htd == null) {
- throw new IOException("HTableDescriptor missing for " + snapshotTable);
+ throw new IOException("TableDescriptor missing for " + snapshotTable);
}
return htd;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index d4a54bb..b1d1415 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -59,7 +59,7 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
* @param tableName The current table name.
* @param family The current family.
*/
- public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family) throws IOException {
+ public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor family) throws IOException {
Configuration conf = getConf();
TableName tn = TableName.valueOf(tableName);
FileSystem fs = FileSystem.get(conf);
@@ -98,8 +98,8 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
Connection connection = ConnectionFactory.createConnection(getConf());
Admin admin = connection.getAdmin();
try {
- HTableDescriptor htd = admin.getTableDescriptor(tn);
- HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
+ TableDescriptor htd = admin.listTableDescriptor(tn);
+ ColumnFamilyDescriptor family = htd.getColumnFamily(Bytes.toBytes(familyName));
if (family == null || !family.isMobEnabled()) {
throw new IOException("Column family " + familyName + " is not a MOB column family");
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 80bda28..4273098 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -44,10 +44,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
@@ -57,6 +55,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Encryption;
@@ -286,7 +285,7 @@ public final class MobUtils {
* @throws IOException
*/
public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, TableName tableName,
- HColumnDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
+ ColumnFamilyDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
throws IOException {
long timeToLive = columnDescriptor.getTimeToLive();
if (Integer.MAX_VALUE == timeToLive) {
@@ -519,7 +518,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
+ ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, String startKey, CacheConfig cacheConfig,
Encryption.Context cryptoContext, boolean isCompaction)
throws IOException {
@@ -543,7 +542,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createRefFileWriter(Configuration conf, FileSystem fs,
- HColumnDescriptor family, Path basePath, long maxKeyCount, CacheConfig cacheConfig,
+ ColumnFamilyDescriptor family, Path basePath, long maxKeyCount, CacheConfig cacheConfig,
Encryption.Context cryptoContext, boolean isCompaction)
throws IOException {
return createWriter(conf, fs, family,
@@ -570,7 +569,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
+ ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
Encryption.Context cryptoContext, boolean isCompaction)
throws IOException {
@@ -596,7 +595,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createDelFileWriter(Configuration conf, FileSystem fs,
- HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
+ ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
Encryption.Context cryptoContext)
throws IOException {
@@ -623,7 +622,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- HColumnDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
+ ColumnFamilyDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext,
boolean isCompaction)
throws IOException {
@@ -797,7 +796,7 @@ public final class MobUtils {
* @param allFiles Whether add all mob files into the compaction.
*/
public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
- HColumnDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
+ ColumnFamilyDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
throws IOException {
String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY,
PartitionedMobCompactor.class.getName());
@@ -805,7 +804,7 @@ public final class MobUtils {
MobCompactor compactor = null;
try {
compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {
- Configuration.class, FileSystem.class, TableName.class, HColumnDescriptor.class,
+ Configuration.class, FileSystem.class, TableName.class, ColumnFamilyDescriptor.class,
ExecutorService.class }, new Object[] { conf, fs, tableName, hcd, pool });
} catch (Exception e) {
throw new IOException("Unable to load configured mob file compactor '" + className + "'", e);
@@ -857,9 +856,9 @@ public final class MobUtils {
* @param htd The current table descriptor.
* @return Whether this table has mob-enabled columns.
*/
- public static boolean hasMobColumns(HTableDescriptor htd) {
- HColumnDescriptor[] hcds = htd.getColumnFamilies();
- for (HColumnDescriptor hcd : hcds) {
+ public static boolean hasMobColumns(TableDescriptor htd) {
+ ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
+ for (ColumnFamilyDescriptor hcd : hcds) {
if (hcd.isMobEnabled()) {
return true;
}
@@ -899,7 +898,7 @@ public final class MobUtils {
* @param fileDate The date string parsed from the mob file name.
* @return True if the mob file is expired.
*/
- public static boolean isMobFileExpired(HColumnDescriptor column, long current, String fileDate) {
+ public static boolean isMobFileExpired(ColumnFamilyDescriptor column, long current, String fileDate) {
if (column.getMinVersions() > 0) {
return false;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
index 77de0cd..7ebdbc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
@@ -27,9 +27,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -42,14 +42,14 @@ public abstract class MobCompactor {
protected FileSystem fs;
protected Configuration conf;
protected TableName tableName;
- protected HColumnDescriptor column;
+ protected ColumnFamilyDescriptor column;
protected Path mobTableDir;
protected Path mobFamilyDir;
protected ExecutorService pool;
public MobCompactor(Configuration conf, FileSystem fs, TableName tableName,
- HColumnDescriptor column, ExecutorService pool) {
+ ColumnFamilyDescriptor column, ExecutorService pool) {
this.conf = conf;
this.fs = fs;
this.tableName = tableName;
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index d37292c..da664cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -45,13 +45,13 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
import org.apache.hadoop.hbase.TagUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
@@ -109,7 +109,7 @@ public class PartitionedMobCompactor extends MobCompactor {
private Encryption.Context cryptoContext = Encryption.Context.NONE;
public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName,
- HColumnDescriptor column, ExecutorService pool) throws IOException {
+ ColumnFamilyDescriptor column, ExecutorService pool) throws IOException {
super(conf, fs, tableName, column, pool);
mergeableSize = conf.getLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD,
MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index e1d2ea1..eb9a5f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.mapreduce.JobUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
@@ -109,13 +109,13 @@ public class CompactionTool extends Configured implements Tool {
if (isFamilyDir(fs, path)) {
Path regionDir = path.getParent();
Path tableDir = regionDir.getParent();
- HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+ TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
compactStoreFiles(tableDir, htd, hri,
path.getName(), compactOnce, major);
} else if (isRegionDir(fs, path)) {
Path tableDir = path.getParent();
- HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+ TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
compactRegion(tableDir, htd, path, compactOnce, major);
} else if (isTableDir(fs, path)) {
compactTable(path, compactOnce, major);
@@ -127,13 +127,13 @@ public class CompactionTool extends Configured implements Tool {
private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
throws IOException {
- HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+ TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
compactRegion(tableDir, htd, regionDir, compactOnce, major);
}
}
- private void compactRegion(final Path tableDir, final HTableDescriptor htd,
+ private void compactRegion(final Path tableDir, final TableDescriptor htd,
final Path regionDir, final boolean compactOnce, final boolean major)
throws IOException {
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
@@ -147,7 +147,7 @@ public class CompactionTool extends Configured implements Tool {
* If the compact once flag is not specified, execute the compaction until
* no more compactions are needed. Uses the Configuration settings provided.
*/
- private void compactStoreFiles(final Path tableDir, final HTableDescriptor htd,
+ private void compactStoreFiles(final Path tableDir, final TableDescriptor htd,
final HRegionInfo hri, final String familyName, final boolean compactOnce,
final boolean major) throws IOException {
HStore store = getStore(conf, fs, tableDir, htd, hri, familyName, tmpDir);
@@ -177,7 +177,7 @@ public class CompactionTool extends Configured implements Tool {
* the store dir to compact as source.
*/
private static HStore getStore(final Configuration conf, final FileSystem fs,
- final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
+ final Path tableDir, final TableDescriptor htd, final HRegionInfo hri,
final String familyName, final Path tempDir) throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
@Override
@@ -186,7 +186,7 @@ public class CompactionTool extends Configured implements Tool {
}
};
HRegion region = new HRegion(regionFs, null, conf, htd, null);
- return new HStore(region, htd.getFamily(Bytes.toBytes(familyName)), conf);
+ return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 9cb1316..74a2998 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -41,12 +41,12 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.util.Bytes;
@@ -340,8 +340,8 @@ public class HRegionFileSystem {
* @return true if region has reference file
* @throws IOException
*/
- public boolean hasReferences(final HTableDescriptor htd) throws IOException {
- for (HColumnDescriptor family : htd.getFamilies()) {
+ public boolean hasReferences(final TableDescriptor htd) throws IOException {
+ for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
if (hasReferences(family.getNameAsString())) {
return true;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 28b7a43..ae2f7dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -50,6 +50,7 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Function;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
@@ -89,6 +90,7 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.locking.EntityLock;
import org.apache.hadoop.hbase.client.locking.LockServiceClient;
import org.apache.hadoop.hbase.conf.ConfigurationManager;
@@ -700,7 +702,11 @@ public class HRegionServer extends HasThread implements
protected TableDescriptors getFsTableDescriptors() throws IOException {
return new FSTableDescriptors(this.conf,
- this.fs, this.rootDir, !canUpdateTableDescriptor(), false);
+ this.fs, this.rootDir, !canUpdateTableDescriptor(), false, getMetaTableObserver());
+ }
+
+ protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
+ return null;
}
protected void setInitLatch(CountDownLatch latch) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 9a25275..7fc025a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MultiActionResultTooLarge;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
@@ -77,6 +76,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.VersionInfoUtil;
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
@@ -1859,7 +1859,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
final int regionCount = request.getOpenInfoCount();
- final Map<TableName, HTableDescriptor> htds = new HashMap<>(regionCount);
+ final Map<TableName, TableDescriptor> htds = new HashMap<>(regionCount);
final boolean isBulkAssign = regionCount > 1;
try {
checkOpen();
@@ -1898,7 +1898,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
- HTableDescriptor htd;
+ TableDescriptor htd;
try {
String encodedName = region.getEncodedName();
byte[] encodedNameBytes = region.getEncodedNameAsBytes();
@@ -2020,7 +2020,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
RegionInfo regionInfo = request.getRegionInfo();
final HRegionInfo region = HRegionInfo.convert(regionInfo);
- HTableDescriptor htd;
+ TableDescriptor htd;
WarmupRegionResponse response = WarmupRegionResponse.getDefaultInstance();
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
index e49b164..6913ecd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.regionserver.handler;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@InterfaceAudience.Private
public class OpenMetaHandler extends OpenRegionHandler {
public OpenMetaHandler(final Server server,
- final RegionServerServices rsServices, HRegionInfo regionInfo,
- final HTableDescriptor htd, long masterSystemTime) {
+ final RegionServerServices rsServices, HRegionInfo regionInfo,
+ final TableDescriptor htd, long masterSystemTime) {
super(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_META);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
index 83d4d3f..ced9ef2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
@@ -19,11 +19,10 @@
package org.apache.hadoop.hbase.regionserver.handler;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
/**
@@ -34,7 +33,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@InterfaceAudience.Private
public class OpenPriorityRegionHandler extends OpenRegionHandler {
public OpenPriorityRegionHandler(Server server, RegionServerServices rsServices,
- HRegionInfo regionInfo, HTableDescriptor htd, long masterSystemTime) {
+ HRegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) {
super(server, rsServices, regionInfo, htd, masterSystemTime,
EventType.M_RS_OPEN_PRIORITY_REGION);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
index 8369100..bbb084c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
@@ -25,8 +25,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
@@ -48,18 +48,18 @@ public class OpenRegionHandler extends EventHandler {
protected final RegionServerServices rsServices;
private final HRegionInfo regionInfo;
- private final HTableDescriptor htd;
+ private final TableDescriptor htd;
private final long masterSystemTime;
public OpenRegionHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
- HTableDescriptor htd, long masterSystemTime) {
+ TableDescriptor htd, long masterSystemTime) {
this(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_REGION);
}
protected OpenRegionHandler(final Server server,
- final RegionServerServices rsServices, final HRegionInfo regionInfo,
- final HTableDescriptor htd, long masterSystemTime, EventType eventType) {
+ final RegionServerServices rsServices, final HRegionInfo regionInfo,
+ final TableDescriptor htd, long masterSystemTime, EventType eventType) {
super(server, eventType);
this.rsServices = rsServices;
this.regionInfo = regionInfo;
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index 9f600da..4ea0434 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
@@ -55,6 +54,7 @@ import org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.RetryingCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
@@ -399,7 +399,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
if (requiresReplication == null) {
// check if the table requires memstore replication
// some unit-test drop the table, so we should do a bypass check and always replicate.
- HTableDescriptor htd = tableDescriptors.get(tableName);
+ TableDescriptor htd = tableDescriptors.get(tableName);
requiresReplication = htd == null || htd.hasRegionMemstoreReplication();
memstoreReplicationEnabled.put(tableName, requiresReplication);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index 9875ac0..cae4c7e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -34,6 +34,7 @@ import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ThreadPoolExecutor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -43,7 +44,6 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -125,7 +125,7 @@ public class RestoreSnapshotHelper {
private final SnapshotDescription snapshotDesc;
private final TableName snapshotTable;
- private final HTableDescriptor tableDesc;
+ private final TableDescriptor tableDesc;
private final Path rootDir;
private final Path tableDir;
@@ -136,7 +136,7 @@ public class RestoreSnapshotHelper {
public RestoreSnapshotHelper(final Configuration conf,
final FileSystem fs,
final SnapshotManifest manifest,
- final HTableDescriptor tableDescriptor,
+ final TableDescriptor tableDescriptor,
final Path rootDir,
final ForeignExceptionDispatcher monitor,
final MonitoredTask status) {
@@ -146,7 +146,7 @@ public class RestoreSnapshotHelper {
public RestoreSnapshotHelper(final Configuration conf,
final FileSystem fs,
final SnapshotManifest manifest,
- final HTableDescriptor tableDescriptor,
+ final TableDescriptor tableDescriptor,
final Path rootDir,
final ForeignExceptionDispatcher monitor,
final MonitoredTask status,
@@ -265,18 +265,18 @@ public class RestoreSnapshotHelper {
*/
public static class RestoreMetaChanges {
private final Map<String, Pair<String, String> > parentsMap;
- private final HTableDescriptor htd;
+ private final TableDescriptor htd;
private List<HRegionInfo> regionsToRestore = null;
private List<HRegionInfo> regionsToRemove = null;
private List<HRegionInfo> regionsToAdd = null;
- public RestoreMetaChanges(HTableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
+ public RestoreMetaChanges(TableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
this.parentsMap = parentsMap;
this.htd = htd;
}
- public HTableDescriptor getTableDescriptor() {
+ public TableDescriptor getTableDescriptor() {
return htd;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index f70fe9e..32cdabf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -36,10 +36,10 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -76,7 +76,7 @@ public final class SnapshotManifest {
private List<SnapshotRegionManifest> regionManifests;
private SnapshotDescription desc;
- private HTableDescriptor htd;
+ private TableDescriptor htd;
private final ForeignExceptionSnare monitor;
private final Configuration conf;
@@ -119,7 +119,7 @@ public final class SnapshotManifest {
/**
* Return a SnapshotManifest instance with the information already loaded in-memory.
* SnapshotManifest manifest = SnapshotManifest.open(...)
- * HTableDescriptor htd = manifest.getTableDescriptor()
+ * TableDescriptor htd = manifest.getTableDescriptor()
* for (SnapshotRegionManifest regionManifest: manifest.getRegionManifests())
* hri = regionManifest.getRegionInfo()
* for (regionManifest.getFamilyFiles())
@@ -136,7 +136,7 @@ public final class SnapshotManifest {
/**
* Add the table descriptor to the snapshot manifest
*/
- public void addTableDescriptor(final HTableDescriptor htd) throws IOException {
+ public void addTableDescriptor(final TableDescriptor htd) throws IOException {
this.htd = htd;
}
@@ -182,7 +182,7 @@ public final class SnapshotManifest {
LOG.debug("Creating references for mob files");
Path mobRegionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable());
- for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+ for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
// 2.1. build the snapshot reference for the store if it's a mob store
if (!hcd.isMobEnabled()) {
continue;
@@ -377,7 +377,7 @@ public final class SnapshotManifest {
case SnapshotManifestV2.DESCRIPTOR_VERSION: {
SnapshotDataManifest dataManifest = readDataManifest();
if (dataManifest != null) {
- htd = ProtobufUtil.convertToHTableDesc(dataManifest.getTableSchema());
+ htd = ProtobufUtil.toTableDescriptor(dataManifest.getTableSchema());
regionManifests = dataManifest.getRegionManifestsList();
} else {
// Compatibility, load the v1 regions
@@ -429,7 +429,7 @@ public final class SnapshotManifest {
/**
* Get the table descriptor from the Snapshot
*/
- public HTableDescriptor getTableDescriptor() {
+ public TableDescriptor getTableDescriptor() {
return this.htd;
}
@@ -485,7 +485,7 @@ public final class SnapshotManifest {
}
SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder();
- dataManifestBuilder.setTableSchema(ProtobufUtil.convertToTableSchema(htd));
+ dataManifestBuilder.setTableSchema(ProtobufUtil.toTableSchema(htd));
if (v1Regions != null && v1Regions.size() > 0) {
dataManifestBuilder.addAllRegionManifests(v1Regions);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index eb6b766..a73883b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -24,11 +24,10 @@ import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Ints;
import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.logging.Log;
@@ -40,17 +39,19 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Ints;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableInfoMissingException;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.regionserver.BloomType;
/**
* Implementation of {@link TableDescriptors} that reads descriptors from the
@@ -79,10 +80,14 @@ public class FSTableDescriptors implements TableDescriptors {
private volatile boolean usecache;
private volatile boolean fsvisited;
- @VisibleForTesting long cachehits = 0;
- @VisibleForTesting long invocations = 0;
+ @VisibleForTesting
+ long cachehits = 0;
+ @VisibleForTesting
+ long invocations = 0;
- /** The file name prefix used to store HTD in HDFS */
+ /**
+ * The file name prefix used to store HTD in HDFS
+ */
static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
static final String TABLEINFO_DIR = ".tabledesc";
static final String TMP_DIR = ".tmp";
@@ -90,12 +95,12 @@ public class FSTableDescriptors implements TableDescriptors {
// This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge.
// TODO.
- private final Map<TableName, HTableDescriptor> cache = new ConcurrentHashMap<>();
+ private final Map<TableName, TableDescriptor> cache = new ConcurrentHashMap<>();
/**
* Table descriptor for <code>hbase:meta</code> catalog table
*/
- private final HTableDescriptor metaTableDescriptor;
+ private final TableDescriptor metaTableDescriptor;
/**
* Construct a FSTableDescriptors instance using the hbase root dir of the given
@@ -107,91 +112,112 @@ public class FSTableDescriptors implements TableDescriptors {
}
public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir)
- throws IOException {
+ throws IOException {
this(conf, fs, rootdir, false, true);
}
/**
* @param fsreadonly True if we are read-only when it comes to filesystem
- * operations; i.e. on remove, we do not do delete in fs.
+ * operations; i.e. on remove, we do not do delete in fs.
+ */
+ public FSTableDescriptors(final Configuration conf, final FileSystem fs,
+ final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
+ this(conf, fs, rootdir, fsreadonly, usecache, null);
+ }
+
+ /**
+ * @param fsreadonly True if we are read-only when it comes to filesystem
+ * operations; i.e. on remove, we do not do delete in fs.
+ * @param metaObserver Used by HMaster. It need to modify the META_REPLICAS_NUM for meta table descriptor.
+ * see HMaster#finishActiveMasterInitialization
+ * TODO: This is a workaround. Should remove this ugly code...
*/
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
- final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
- super();
+ final Path rootdir, final boolean fsreadonly, final boolean usecache,
+ Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
this.fs = fs;
this.rootdir = rootdir;
this.fsreadonly = fsreadonly;
this.usecache = usecache;
+ this.metaTableDescriptor = metaObserver == null ? createMetaTableDescriptor(conf)
+ : metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
+ }
- this.metaTableDescriptor = createMetaTableDescriptor(conf);
+ @VisibleForTesting
+ public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException {
+ return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
+ .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+ HConstants.DEFAULT_HBASE_META_VERSIONS))
+ .setInMemory(true)
+ .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+ HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true)
+ .build())
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY)
+ .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+ HConstants.DEFAULT_HBASE_META_VERSIONS))
+ .setInMemory(true)
+ .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+ HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true)
+ .build())
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_POSITION_FAMILY)
+ .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+ HConstants.DEFAULT_HBASE_META_VERSIONS))
+ .setInMemory(true)
+ .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+ HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true)
+ .build())
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_META_FAMILY)
+ .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+ HConstants.DEFAULT_HBASE_META_VERSIONS))
+ .setInMemory(true)
+ .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+ HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true)
+ .build())
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY)
+ // Ten is arbitrary number. Keep versions to help debugging.
+ .setMaxVersions(10)
+ .setInMemory(true)
+ .setBlocksize(8 * 1024)
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+ // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
+ .setBloomFilterType(BloomType.NONE)
+ // Enable cache of data blocks in L1 if more than one caching tier deployed:
+ // e.g. if using CombinedBlockCache (BucketCache).
+ .setCacheDataInL1(true)
+ .build())
+ .addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
+ null, Coprocessor.PRIORITY_SYSTEM, null);
}
@VisibleForTesting
- public static HTableDescriptor createMetaTableDescriptor(final Configuration conf)
+ public static TableDescriptor createMetaTableDescriptor(final Configuration conf)
throws IOException {
- return new HTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
- .addColumnFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)
- .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
- HConstants.DEFAULT_HBASE_META_VERSIONS))
- .setInMemory(true)
- .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
- HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true))
- .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY)
- .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
- HConstants.DEFAULT_HBASE_META_VERSIONS))
- .setInMemory(true)
- .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
- HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true))
- .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY)
- .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
- HConstants.DEFAULT_HBASE_META_VERSIONS))
- .setInMemory(true)
- .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
- HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true))
- .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY)
- .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
- HConstants.DEFAULT_HBASE_META_VERSIONS))
- .setInMemory(true)
- .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
- HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true))
- .addColumnFamily(new HColumnDescriptor(HConstants.TABLE_FAMILY)
- // Ten is arbitrary number. Keep versions to help debugging.
- .setMaxVersions(10)
- .setInMemory(true)
- .setBlocksize(8 * 1024)
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
- // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
- .setBloomFilterType(BloomType.NONE)
- // Enable cache of data blocks in L1 if more than one caching tier deployed:
- // e.g. if using CombinedBlockCache (BucketCache).
- .setCacheDataInL1(true))
- .addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
- null, Coprocessor.PRIORITY_SYSTEM, null)
- .build());
+ return createMetaTableDescriptorBuilder(conf).build();
}
@Override
@@ -219,7 +245,7 @@ public class FSTableDescriptors implements TableDescriptors {
*/
@Override
@Nullable
- public HTableDescriptor get(final TableName tablename)
+ public TableDescriptor get(final TableName tablename)
throws IOException {
invocations++;
if (TableName.META_TABLE_NAME.equals(tablename)) {
@@ -234,13 +260,13 @@ public class FSTableDescriptors implements TableDescriptors {
if (usecache) {
// Look in cache of descriptors.
- HTableDescriptor cachedtdm = this.cache.get(tablename);
+ TableDescriptor cachedtdm = this.cache.get(tablename);
if (cachedtdm != null) {
cachehits++;
return cachedtdm;
}
}
- HTableDescriptor tdmt = null;
+ TableDescriptor tdmt = null;
try {
tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
} catch (NullPointerException e) {
@@ -264,21 +290,21 @@ public class FSTableDescriptors implements TableDescriptors {
* Returns a map from table name to table descriptor for all tables.
*/
@Override
- public Map<String, HTableDescriptor> getAllDescriptors()
+ public Map<String, TableDescriptor> getAllDescriptors()
throws IOException {
- Map<String, HTableDescriptor> tds = new TreeMap<>();
+ Map<String, TableDescriptor> tds = new TreeMap<>();
if (fsvisited && usecache) {
- for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) {
+ for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
tds.put(entry.getKey().toString(), entry.getValue());
}
// add hbase:meta to the response
- tds.put(this.metaTableDescriptor.getNameAsString(), metaTableDescriptor);
+ tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor);
} else {
LOG.debug("Fetching table descriptors from the filesystem.");
boolean allvisited = true;
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
- HTableDescriptor htd = null;
+ TableDescriptor htd = null;
try {
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
@@ -301,10 +327,10 @@ public class FSTableDescriptors implements TableDescriptors {
* Returns a map from table name to table descriptor for all tables.
*/
@Override
- public Map<String, HTableDescriptor> getAll() throws IOException {
- Map<String, HTableDescriptor> htds = new TreeMap<>();
- Map<String, HTableDescriptor> allDescriptors = getAllDescriptors();
- for (Map.Entry<String, HTableDescriptor> entry : allDescriptors
+ public Map<String, TableDescriptor> getAll() throws IOException {
+ Map<String, TableDescriptor> htds = new TreeMap<>();
+ Map<String, TableDescriptor> allDescriptors = getAllDescriptors();
+ for (Map.Entry<String, TableDescriptor> entry : allDescriptors
.entrySet()) {
htds.put(entry.getKey(), entry.getValue());
}
@@ -316,13 +342,13 @@ public class FSTableDescriptors implements TableDescriptors {
* @see #get(org.apache.hadoop.hbase.TableName)
*/
@Override
- public Map<String, HTableDescriptor> getByNamespace(String name)
+ public Map<String, TableDescriptor> getByNamespace(String name)
throws IOException {
- Map<String, HTableDescriptor> htds = new TreeMap<>();
+ Map<String, TableDescriptor> htds = new TreeMap<>();
List<Path> tableDirs =
FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
for (Path d: tableDirs) {
- HTableDescriptor htd = null;
+ TableDescriptor htd = null;
try {
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
@@ -340,7 +366,7 @@ public class FSTableDescriptors implements TableDescriptors {
* and updates the local cache with it.
*/
@Override
- public void add(HTableDescriptor htd) throws IOException {
+ public void add(TableDescriptor htd) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
}
@@ -351,7 +377,7 @@ public class FSTableDescriptors implements TableDescriptors {
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
throw new NotImplementedException(
"Cannot add a table descriptor for a reserved subdirectory name: "
- + htd.getNameAsString());
+ + htd.getTableName().getNameAsString());
}
updateTableDescriptor(htd);
}
@@ -362,7 +388,7 @@ public class FSTableDescriptors implements TableDescriptors {
* from the FileSystem.
*/
@Override
- public HTableDescriptor remove(final TableName tablename)
+ public TableDescriptor remove(final TableName tablename)
throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
@@ -373,7 +399,7 @@ public class FSTableDescriptors implements TableDescriptors {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
- HTableDescriptor descriptor = this.cache.remove(tablename);
+ TableDescriptor descriptor = this.cache.remove(tablename);
return descriptor;
}
@@ -557,7 +583,7 @@ public class FSTableDescriptors implements TableDescriptors {
* if it exists, bypassing the local cache.
* Returns null if it's not found.
*/
- public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
+ public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir);
@@ -568,7 +594,7 @@ public class FSTableDescriptors implements TableDescriptors {
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
- public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
+ public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir, false);
if (status == null) {
@@ -577,7 +603,7 @@ public class FSTableDescriptors implements TableDescriptors {
return readTableDescriptor(fs, status);
}
- private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
+ private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
throws IOException {
int len = Ints.checkedCast(status.getLen());
byte [] content = new byte[len];
@@ -587,9 +613,9 @@ public class FSTableDescriptors implements TableDescriptors {
} finally {
fsDataInputStream.close();
}
- HTableDescriptor htd = null;
+ TableDescriptor htd = null;
try {
- htd = HTableDescriptor.parseFrom(content);
+ htd = TableDescriptorBuilder.parseFrom(content);
} catch (DeserializationException e) {
throw new IOException("content=" + Bytes.toShort(content), e);
}
@@ -601,7 +627,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @throws IOException Thrown if failed update.
* @throws NotImplementedException if in read only mode
*/
- @VisibleForTesting Path updateTableDescriptor(HTableDescriptor td)
+ @VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
@@ -663,7 +689,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @return Descriptor file or null if we failed write.
*/
private static Path writeTableDescriptor(final FileSystem fs,
- final HTableDescriptor htd, final Path tableDir,
+ final TableDescriptor htd, final Path tableDir,
final FileStatus currentDescriptorFile)
throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
@@ -718,42 +744,42 @@ public class FSTableDescriptors implements TableDescriptors {
return tableInfoDirPath;
}
- private static void writeTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
+ private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, false);
try {
// We used to write this file out as a serialized HTD Writable followed by two '\n's and then
// the toString version of HTD. Now we just write out the pb serialization.
- out.write(htd.toByteArray());
+ out.write(TableDescriptorBuilder.toByteArray(htd));
} finally {
out.close();
}
}
/**
- * Create new HTableDescriptor in HDFS. Happens when we are creating table.
+ * Create new TableDescriptor in HDFS. Happens when we are creating table.
* Used by tests.
* @return True if we successfully created file.
*/
- public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
+ public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
return createTableDescriptor(htd, false);
}
/**
- * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
+ * Create new TableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @return True if we successfully created file.
*/
- public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
+ public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
throws IOException {
Path tableDir = getTableDir(htd.getTableName());
return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
}
/**
- * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
+ * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table or snapshot a table.
* @param tableDir table directory under which we should write the file
* @param htd description of the table to write
@@ -764,7 +790,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @throws IOException if a filesystem error occurs
*/
public boolean createTableDescriptorForTableDirectory(Path tableDir,
- HTableDescriptor htd, boolean forceCreation) throws IOException {
+ TableDescriptor htd, boolean forceCreation) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index ff5d482..199ed7d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hbase.util;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
@@ -84,11 +88,9 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -939,7 +941,7 @@ public class HBaseFsck extends Configured implements Closeable {
TableName tableName = hi.getTableName();
TableInfo tableInfo = tablesInfo.get(tableName);
Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
- HTableDescriptor template = tableInfo.getHTD();
+ TableDescriptor template = tableInfo.getHTD();
// find min and max key values
Pair<byte[],byte[]> orphanRegionRange = null;
@@ -1200,17 +1202,17 @@ public class HBaseFsck extends Configured implements Closeable {
*/
private void reportTablesInFlux() {
AtomicInteger numSkipped = new AtomicInteger(0);
- HTableDescriptor[] allTables = getTables(numSkipped);
+ TableDescriptor[] allTables = getTables(numSkipped);
errors.print("Number of Tables: " + allTables.length);
if (details) {
if (numSkipped.get() > 0) {
errors.detail("Number of Tables in flux: " + numSkipped.get());
}
- for (HTableDescriptor td : allTables) {
+ for (TableDescriptor td : allTables) {
errors.detail(" Table: " + td.getTableName() + "\t" +
(td.isReadOnly() ? "ro" : "rw") + "\t" +
(td.isMetaRegion() ? "META" : " ") + "\t" +
- " families: " + td.getFamilies().size());
+ " families: " + td.getColumnFamilyCount());
}
}
}
@@ -1314,7 +1316,7 @@ public class HBaseFsck extends Configured implements Closeable {
modTInfo = new TableInfo(tableName);
tablesInfo.put(tableName, modTInfo);
try {
- HTableDescriptor htd =
+ TableDescriptor htd =
FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
modTInfo.htds.add(htd);
} catch (IOException ioe) {
@@ -1361,17 +1363,17 @@ public class HBaseFsck extends Configured implements Closeable {
* To fabricate a .tableinfo file with following contents<br>
* 1. the correct tablename <br>
* 2. the correct colfamily list<br>
- * 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
+ * 3. the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}<br>
* @throws IOException
*/
private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
Set<String> columns) throws IOException {
if (columns ==null || columns.isEmpty()) return false;
- HTableDescriptor htd = new HTableDescriptor(tableName);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
for (String columnfamimly : columns) {
- htd.addFamily(new HColumnDescriptor(columnfamimly));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
}
- fstd.createTableDescriptor(htd, true);
+ fstd.createTableDescriptor(builder.build(), true);
return true;
}
@@ -1396,7 +1398,7 @@ public class HBaseFsck extends Configured implements Closeable {
* 2. else create a default .tableinfo file with following items<br>
* 2.1 the correct tablename <br>
* 2.2 the correct colfamily list<br>
- * 2.3 the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
+ * 2.3 the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}<br>
* @throws IOException
*/
public void fixOrphanTables() throws IOException {
@@ -1404,7 +1406,7 @@ public class HBaseFsck extends Configured implements Closeable {
List<TableName> tmpList = new ArrayList<>(orphanTableDirs.keySet().size());
tmpList.addAll(orphanTableDirs.keySet());
- HTableDescriptor[] htds = getHTableDescriptors(tmpList);
+ TableDescriptor[] htds = getTableDescriptors(tmpList);
Iterator<Entry<TableName, Set<String>>> iter =
orphanTableDirs.entrySet().iterator();
int j = 0;
@@ -1417,7 +1419,7 @@ public class HBaseFsck extends Configured implements Closeable {
LOG.info("Trying to fix orphan table error: " + tableName);
if (j < htds.length) {
if (tableName.equals(htds[j].getTableName())) {
- HTableDescriptor htd = htds[j];
+ TableDescriptor htd = htds[j];
LOG.info("fixing orphan table: " + tableName + " from cache");
fstd.createTableDescriptor(htd, true);
j++;
@@ -1426,7 +1428,7 @@ public class HBaseFsck extends Configured implements Closeable {
} else {
if (fabricateTableInfo(fstd, tableName, entry.getValue())) {
LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file");
- LOG.warn("Strongly recommend to modify the HTableDescriptor if necessary for: " + tableName);
+ LOG.warn("Strongly recommend to modify the TableDescriptor if necessary for: " + tableName);
iter.remove();
} else {
LOG.error("Unable to create default .tableinfo for " + tableName + " while missing column family information");
@@ -1463,7 +1465,7 @@ public class HBaseFsck extends Configured implements Closeable {
Path rootdir = FSUtils.getRootDir(getConf());
Configuration c = getConf();
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
- HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
+ TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false);
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
@@ -2646,8 +2648,8 @@ public class HBaseFsck extends Configured implements Closeable {
* regions reported for the table, but table dir is there in hdfs
*/
private void loadTableInfosForTablesWithNoRegion() throws IOException {
- Map<String, HTableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
- for (HTableDescriptor htd : allTables.values()) {
+ Map<String, TableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
+ for (TableDescriptor htd : allTables.values()) {
if (checkMetaOnly && !htd.isMetaTable()) {
continue;
}
@@ -2770,8 +2772,8 @@ public class HBaseFsck extends Configured implements Closeable {
// region split calculator
final RegionSplitCalculator<HbckInfo> sc = new RegionSplitCalculator<>(cmp);
- // Histogram of different HTableDescriptors found. Ideally there is only one!
- final Set<HTableDescriptor> htds = new HashSet<>();
+ // Histogram of different TableDescriptors found. Ideally there is only one!
+ final Set<TableDescriptor> htds = new HashSet<>();
// key = start split, values = set of splits in problem group
final Multimap<byte[], HbckInfo> overlapGroups =
@@ -2788,9 +2790,9 @@ public class HBaseFsck extends Configured implements Closeable {
/**
* @return descriptor common to all regions. null if are none or multiple!
*/
- private HTableDescriptor getHTD() {
+ private TableDescriptor getHTD() {
if (htds.size() == 1) {
- return (HTableDescriptor)htds.toArray()[0];
+ return (TableDescriptor)htds.toArray()[0];
} else {
LOG.error("None/Multiple table descriptors found for table '"
+ tableName + "' regions: " + htds);
@@ -2960,7 +2962,7 @@ public class HBaseFsck extends Configured implements Closeable {
"First region should start with an empty key. Creating a new " +
"region and regioninfo in HDFS to plug the hole.",
getTableInfo(), next);
- HTableDescriptor htd = getTableInfo().getHTD();
+ TableDescriptor htd = getTableInfo().getHTD();
// from special EMPTY_START_ROW to next region's startKey
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
HConstants.EMPTY_START_ROW, next.getStartKey());
@@ -2977,7 +2979,7 @@ public class HBaseFsck extends Configured implements Closeable {
errors.reportError(ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY,
"Last region should end with an empty key. Creating a new "
+ "region and regioninfo in HDFS to plug the hole.", getTableInfo());
- HTableDescriptor htd = getTableInfo().getHTD();
+ TableDescriptor htd = getTableInfo().getHTD();
// from curEndKey to EMPTY_START_ROW
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
HConstants.EMPTY_START_ROW);
@@ -3001,7 +3003,7 @@ public class HBaseFsck extends Configured implements Closeable {
+ Bytes.toStringBinary(holeStopKey)
+ ". Creating a new regioninfo and region "
+ "dir in hdfs to plug the hole.");
- HTableDescriptor htd = getTableInfo().getHTD();
+ TableDescriptor htd = getTableInfo().getHTD();
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Plugged hole by creating new empty region: "+ newRegion + " " +region);
@@ -3202,7 +3204,7 @@ public class HBaseFsck extends Configured implements Closeable {
}
// create new empty container region.
- HTableDescriptor htd = getTableInfo().getHTD();
+ TableDescriptor htd = getTableInfo().getHTD();
// from start key to end Key
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(),
range.getSecond());
@@ -3503,7 +3505,7 @@ public class HBaseFsck extends Configured implements Closeable {
* @return tables that have not been modified recently
* @throws IOException if an error is encountered
*/
- HTableDescriptor[] getTables(AtomicInteger numSkipped) {
+ TableDescriptor[] getTables(AtomicInteger numSkipped) {
List<TableName> tableNames = new ArrayList<>();
long now = EnvironmentEdgeManager.currentTime();
@@ -3520,19 +3522,19 @@ public class HBaseFsck extends Configured implements Closeable {
}
}
}
- return getHTableDescriptors(tableNames);
+ return getTableDescriptors(tableNames);
}
- HTableDescriptor[] getHTableDescriptors(List<TableName> tableNames) {
- HTableDescriptor[] htd = new HTableDescriptor[0];
- LOG.info("getHTableDescriptors == tableNames => " + tableNames);
+ TableDescriptor[] getTableDescriptors(List<TableName> tableNames) {
+ LOG.info("getTableDescriptors == tableNames => " + tableNames);
try (Connection conn = ConnectionFactory.createConnection(getConf());
Admin admin = conn.getAdmin()) {
- htd = admin.getTableDescriptorsByTableName(tableNames);
+ List<TableDescriptor> tds = admin.listTableDescriptors(tableNames);
+ return tds.toArray(new TableDescriptor[tds.size()]);
} catch (IOException e) {
LOG.debug("Exception getting table descriptors", e);
}
- return htd;
+ return new TableDescriptor[0];
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 961e8a0..6552b32 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -28,7 +28,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -40,6 +39,7 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -188,7 +188,7 @@ public class HBaseFsckRepair {
* Creates, flushes, and closes a new region.
*/
public static HRegion createHDFSRegionDir(Configuration conf,
- HRegionInfo hri, HTableDescriptor htd) throws IOException {
+ HRegionInfo hri, TableDescriptor htd) throws IOException {
// Create HRegion
Path root = FSUtils.getRootDir(conf);
HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index 8ea7012..cc478a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -39,9 +39,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
/**
* Utility methods for interacting with the regions.
@@ -61,13 +60,13 @@ public abstract class ModifyRegionUtils {
void editRegion(final HRegionInfo region) throws IOException;
}
- public static HRegionInfo[] createHRegionInfos(HTableDescriptor hTableDescriptor,
+ public static HRegionInfo[] createHRegionInfos(TableDescriptor tableDescriptor,
byte[][] splitKeys) {
long regionId = System.currentTimeMillis();
HRegionInfo[] hRegionInfos = null;
if (splitKeys == null || splitKeys.length == 0) {
hRegionInfos = new HRegionInfo[]{
- new HRegionInfo(hTableDescriptor.getTableName(), null, null, false, regionId)
+ new HRegionInfo(tableDescriptor.getTableName(), null, null, false, regionId)
};
} else {
int numRegions = splitKeys.length + 1;
@@ -77,7 +76,7 @@ public abstract class ModifyRegionUtils {
for (int i = 0; i < numRegions; i++) {
endKey = (i == splitKeys.length) ? null : splitKeys[i];
hRegionInfos[i] =
- new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
+ new HRegionInfo(tableDescriptor.getTableName(), startKey, endKey,
false, regionId);
startKey = endKey;
}
@@ -91,20 +90,20 @@ public abstract class ModifyRegionUtils {
*
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
- * @param hTableDescriptor description of the table
+ * @param tableDescriptor description of the table
* @param newRegions {@link HRegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir,
- final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
+ final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
final RegionFillTask task) throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf,
- "RegionOpenAndInitThread-" + hTableDescriptor.getTableName(), regionNumber);
+ "RegionOpenAndInitThread-" + tableDescriptor.getTableName(), regionNumber);
try {
- return createRegions(exec, conf, rootDir, hTableDescriptor, newRegions, task);
+ return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task);
} finally {
exec.shutdownNow();
}
@@ -117,15 +116,15 @@ public abstract class ModifyRegionUtils {
* @param exec Thread Pool Executor
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
- * @param hTableDescriptor description of the table
+ * @param tableDescriptor description of the table
* @param newRegions {@link HRegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static List<HRegionInfo> createRegions(final ThreadPoolExecutor exec,
- final Configuration conf, final Path rootDir,
- final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
- final RegionFillTask task) throws IOException {
+ final Configuration conf, final Path rootDir,
+ final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
+ final RegionFillTask task) throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<>(exec);
@@ -134,7 +133,7 @@ public abstract class ModifyRegionUtils {
completionService.submit(new Callable<HRegionInfo>() {
@Override
public HRegionInfo call() throws IOException {
- return createRegion(conf, rootDir, hTableDescriptor, newRegion, task);
+ return createRegion(conf, rootDir, tableDescriptor, newRegion, task);
}
});
}
@@ -156,20 +155,20 @@ public abstract class ModifyRegionUtils {
* Create new set of regions on the specified file-system.
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
- * @param hTableDescriptor description of the table
+ * @param tableDescriptor description of the table
* @param newRegion {@link HRegionInfo} that describes the region to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static HRegionInfo createRegion(final Configuration conf, final Path rootDir,
- final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion,
+ final TableDescriptor tableDescriptor, final HRegionInfo newRegion,
final RegionFillTask task) throws IOException {
// 1. Create HRegion
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
Configuration confForWAL = new Configuration(conf);
confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
- HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, hTableDescriptor, null, false);
+ HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false);
try {
// 2. Custom user code to interact with the created region
if (task != null) {
[4/8] hbase git commit: Revert "HBASE-18503 Change ***Util and Master
to use TableDescriptor and ColumnFamilyDescriptor" Wrong author information
This reverts commit b03348630c145aa6cc29f0f295442c6deb28a28e.
Posted by ch...@apache.org.
Revert "HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor"
Wrong author information
This reverts commit b03348630c145aa6cc29f0f295442c6deb28a28e.
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12f2b02a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12f2b02a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12f2b02a
Branch: refs/heads/master
Commit: 12f2b02a805817bda2800bf1017c6b1c58dba866
Parents: b033486
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Thu Aug 24 13:02:11 2017 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Thu Aug 24 13:02:11 2017 +0800
----------------------------------------------------------------------
.../hadoop/hbase/backup/util/BackupUtils.java | 4 +-
.../hadoop/hbase/backup/util/RestoreTool.java | 48 ++--
.../apache/hadoop/hbase/HColumnDescriptor.java | 11 +-
.../apache/hadoop/hbase/HTableDescriptor.java | 11 +-
.../client/ColumnFamilyDescriptorBuilder.java | 13 +-
.../apache/hadoop/hbase/client/HBaseAdmin.java | 41 ++-
.../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 2 +-
.../hbase/client/TableDescriptorBuilder.java | 20 +-
.../hbase/shaded/protobuf/ProtobufUtil.java | 101 +++++++-
.../hbase/shaded/protobuf/RequestConverter.java | 18 +-
.../apache/hadoop/hbase/TableDescriptors.java | 15 +-
.../hbase/client/ClientSideRegionScanner.java | 3 +-
.../hbase/client/TableSnapshotScanner.java | 3 +-
.../mapreduce/TableSnapshotInputFormatImpl.java | 18 +-
.../hadoop/hbase/master/CatalogJanitor.java | 13 +-
.../master/ExpiredMobFileCleanerChore.java | 10 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 86 ++++---
.../hadoop/hbase/master/MasterFileSystem.java | 24 +-
.../hbase/master/MasterMobCompactionThread.java | 10 +-
.../hadoop/hbase/master/MasterRpcServices.java | 25 +-
.../hadoop/hbase/master/MasterServices.java | 15 +-
.../hadoop/hbase/master/MobCompactionChore.java | 10 +-
.../hadoop/hbase/master/TableStateManager.java | 6 +-
.../assignment/MergeTableRegionsProcedure.java | 10 +-
.../master/assignment/RegionStateStore.java | 12 +-
.../master/balancer/RegionLocationFinder.java | 12 +-
.../master/cleaner/ReplicationMetaCleaner.java | 10 +-
.../procedure/AddColumnFamilyProcedure.java | 50 ++--
.../procedure/CloneSnapshotProcedure.java | 51 ++--
.../master/procedure/CreateTableProcedure.java | 66 ++---
.../procedure/DeleteColumnFamilyProcedure.java | 37 ++-
.../procedure/ModifyColumnFamilyProcedure.java | 43 ++--
.../master/procedure/ModifyTableProcedure.java | 75 +++---
.../procedure/RestoreSnapshotProcedure.java | 34 +--
.../procedure/TruncateTableProcedure.java | 22 +-
.../master/snapshot/MasterSnapshotVerifier.java | 8 +-
.../hbase/master/snapshot/SnapshotManager.java | 29 +--
.../master/snapshot/TakeSnapshotHandler.java | 10 +-
.../hadoop/hbase/mob/ExpiredMobFileCleaner.java | 10 +-
.../org/apache/hadoop/hbase/mob/MobUtils.java | 27 +-
.../hbase/mob/compactions/MobCompactor.java | 6 +-
.../compactions/PartitionedMobCompactor.java | 4 +-
.../hbase/regionserver/CompactionTool.java | 16 +-
.../hbase/regionserver/HRegionFileSystem.java | 8 +-
.../hbase/regionserver/HRegionServer.java | 8 +-
.../hbase/regionserver/RSRpcServices.java | 8 +-
.../regionserver/handler/OpenMetaHandler.java | 6 +-
.../handler/OpenPriorityRegionHandler.java | 5 +-
.../regionserver/handler/OpenRegionHandler.java | 10 +-
.../RegionReplicaReplicationEndpoint.java | 4 +-
.../hbase/snapshot/RestoreSnapshotHelper.java | 14 +-
.../hadoop/hbase/snapshot/SnapshotManifest.java | 18 +-
.../hadoop/hbase/util/FSTableDescriptors.java | 258 +++++++++----------
.../org/apache/hadoop/hbase/util/HBaseFsck.java | 68 +++--
.../hadoop/hbase/util/HBaseFsckRepair.java | 4 +-
.../hadoop/hbase/util/ModifyRegionUtils.java | 33 +--
.../hadoop/hbase/HBaseTestingUtility.java | 13 +-
.../TestFSTableDescriptorForceCreation.java | 13 +-
.../TestHColumnDescriptorDefaultVersions.java | 12 +-
.../hbase/client/TestAsyncTableAdminApi.java | 5 +-
.../hbase/master/MockNoopMasterServices.java | 13 +-
.../master/assignment/MockMasterServices.java | 24 +-
.../MasterProcedureTestingUtility.java | 40 ++-
.../procedure/TestCreateTableProcedure.java | 33 +--
.../TestMasterFailoverWithProcedures.java | 4 +-
.../procedure/TestMasterProcedureWalLease.java | 4 +-
...stTableDescriptorModificationFromClient.java | 7 +-
.../TestPartitionedMobCompactor.java | 3 +-
.../regionserver/TestGetClosestAtOrBefore.java | 6 +-
.../TestRegionMergeTransactionOnCluster.java | 14 +-
.../regionserver/TestRegionServerNoMaster.java | 4 +-
.../hbase/security/access/SecureTestUtil.java | 21 +-
.../hbase/snapshot/MobSnapshotTestingUtils.java | 65 +++--
.../hbase/snapshot/SnapshotTestingUtils.java | 50 ++--
.../snapshot/TestRestoreSnapshotHelper.java | 16 +-
.../hbase/snapshot/TestSnapshotManifest.java | 8 +-
.../hbase/util/TestFSTableDescriptors.java | 81 +++---
77 files changed, 977 insertions(+), 942 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
index 11a1a3d..ce77645 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -55,7 +56,6 @@ import org.apache.hadoop.hbase.backup.impl.BackupManifest;
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -139,7 +139,7 @@ public final class BackupUtils {
LOG.warn("Table " + table + " does not exists, skipping it.");
continue;
}
- TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
+ HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
// write a copy of descriptor to the target directory
Path target = new Path(backupInfo.getTableBackupDir(table));
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index 2e311cf..0cfe099 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -33,17 +33,16 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
import org.apache.hadoop.hbase.backup.HBackupFileSystem;
import org.apache.hadoop.hbase.backup.RestoreJob;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
@@ -123,10 +122,10 @@ public class RestoreTool {
}
- void modifyTableSync(Connection conn, TableDescriptor desc) throws IOException {
+ void modifyTableSync(Connection conn, HTableDescriptor desc) throws IOException {
try (Admin admin = conn.getAdmin();) {
- admin.modifyTable(desc);
+ admin.modifyTable(desc.getTableName(), desc);
int attempt = 0;
int maxAttempts = 600;
while (!admin.isTableAvailable(desc.getTableName())) {
@@ -173,30 +172,29 @@ public class RestoreTool {
// adjust table schema
for (int i = 0; i < tableNames.length; i++) {
TableName tableName = tableNames[i];
- TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
+ HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
TableName newTableName = newTableNames[i];
- TableDescriptor newTableDescriptor = admin.listTableDescriptor(newTableName);
- List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
- List<ColumnFamilyDescriptor> existingFamilies =
+ HTableDescriptor newTableDescriptor = new HTableDescriptor(admin.getTableDescriptor(newTableName));
+ List<HColumnDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
+ List<HColumnDescriptor> existingFamilies =
Arrays.asList(newTableDescriptor.getColumnFamilies());
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
boolean schemaChangeNeeded = false;
- for (ColumnFamilyDescriptor family : families) {
+ for (HColumnDescriptor family : families) {
if (!existingFamilies.contains(family)) {
- builder.addColumnFamily(family);
+ newTableDescriptor.addFamily(family);
schemaChangeNeeded = true;
}
}
- for (ColumnFamilyDescriptor family : existingFamilies) {
+ for (HColumnDescriptor family : existingFamilies) {
if (!families.contains(family)) {
- builder.removeColumnFamily(family.getName());
+ newTableDescriptor.removeFamily(family.getName());
schemaChangeNeeded = true;
}
}
if (schemaChangeNeeded) {
- modifyTableSync(conn, builder.build());
+ modifyTableSync(conn, newTableDescriptor);
LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
}
}
@@ -255,24 +253,24 @@ public class RestoreTool {
/**
* Get table descriptor
* @param tableName is the table backed up
- * @return {@link TableDescriptor} saved in backup image of the table
+ * @return {@link HTableDescriptor} saved in backup image of the table
*/
- TableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
+ HTableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
Path tableInfoPath = this.getTableInfoPath(tableName);
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
- TableDescriptor tableDescriptor = manifest.getTableDescriptor();
+ HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
if (!tableDescriptor.getTableName().equals(tableName)) {
LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
+ tableInfoPath.toString());
- LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
+ LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName
+ " under tableInfoPath: " + tableInfoPath.toString());
}
return tableDescriptor;
}
- private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
+ private HTableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
String lastIncrBackupId) throws IOException {
if (lastIncrBackupId != null) {
String target =
@@ -291,7 +289,7 @@ public class RestoreTool {
FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
// get table descriptor first
- TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
+ HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
if (tableDescriptor != null) {
LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
}
@@ -327,7 +325,7 @@ public class RestoreTool {
LOG.debug("find table descriptor but no archive dir for table " + tableName
+ ", will only create table");
}
- tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
+ tableDescriptor = new HTableDescriptor(newTableName, tableDescriptor);
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor,
truncateIfExists);
return;
@@ -338,9 +336,9 @@ public class RestoreTool {
}
if (tableDescriptor == null) {
- tableDescriptor = TableDescriptorBuilder.newBuilder(newTableName).build();
+ tableDescriptor = new HTableDescriptor(newTableName);
} else {
- tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
+ tableDescriptor = new HTableDescriptor(newTableName, tableDescriptor);
}
// record all region dirs:
@@ -472,7 +470,7 @@ public class RestoreTool {
* @throws IOException exception
*/
private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName,
- TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd,
+ TableName targetTableName, ArrayList<Path> regionDirList, HTableDescriptor htd,
boolean truncateIfExists) throws IOException {
try (Admin admin = conn.getAdmin();) {
boolean createNew = false;
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 8802553..507bf49 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -639,10 +639,13 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
if (this == obj) {
return true;
}
- if (obj instanceof HColumnDescriptor) {
- return delegatee.equals(((HColumnDescriptor) obj).delegatee);
+ if (obj == null) {
+ return false;
}
- return false;
+ if (!(obj instanceof HColumnDescriptor)) {
+ return false;
+ }
+ return compareTo((HColumnDescriptor)obj) == 0;
}
/**
@@ -655,7 +658,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
@Override
public int compareTo(HColumnDescriptor other) {
- return COMPARATOR.compare(this, other);
+ return delegatee.compareTo(other.delegatee);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index 86ba287..a0f23c1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -495,10 +495,13 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
if (this == obj) {
return true;
}
- if (obj instanceof HTableDescriptor) {
- return delegatee.equals(((HTableDescriptor) obj).delegatee);
+ if (obj == null) {
+ return false;
}
- return false;
+ if (!(obj instanceof HTableDescriptor)) {
+ return false;
+ }
+ return compareTo((HTableDescriptor)obj) == 0;
}
/**
@@ -520,7 +523,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
*/
@Override
public int compareTo(final HTableDescriptor other) {
- return TableDescriptor.COMPARATOR.compare(this, other);
+ return delegatee.compareTo(other.delegatee);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index 67d2c56..b3abaca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -1160,10 +1160,13 @@ public class ColumnFamilyDescriptorBuilder {
if (this == obj) {
return true;
}
- if (obj instanceof ModifyableColumnFamilyDescriptor) {
- return ColumnFamilyDescriptor.COMPARATOR.compare(this, (ModifyableColumnFamilyDescriptor) obj) == 0;
+ if (obj == null) {
+ return false;
}
- return false;
+ if (!(obj instanceof ModifyableColumnFamilyDescriptor)) {
+ return false;
+ }
+ return compareTo((ModifyableColumnFamilyDescriptor) obj) == 0;
}
@Override
@@ -1185,7 +1188,7 @@ public class ColumnFamilyDescriptorBuilder {
* @see #parseFrom(byte[])
*/
private byte[] toByteArray() {
- return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this)
+ return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToColumnFamilySchema(this)
.toByteArray());
}
@@ -1210,7 +1213,7 @@ public class ColumnFamilyDescriptorBuilder {
} catch (IOException e) {
throw new DeserializationException(e);
}
- return ProtobufUtil.toColumnFamilyDescriptor(cfs);
+ return ProtobufUtil.convertToColumnDesc(cfs);
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index a2fa7e0..11f3273 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -378,7 +378,7 @@ public class HBaseAdmin implements Admin {
.setNamespaceName(Bytes.toString(name)).build())
.getTableSchemaList()
.stream()
- .map(ProtobufUtil::toTableDescriptor)
+ .map(ProtobufUtil::convertToTableDesc)
.collect(Collectors.toList());
}
});
@@ -459,8 +459,8 @@ public class HBaseAdmin implements Admin {
protected HTableDescriptor[] rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
- return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
- req)).stream().map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new);
+ return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(getRpcController(),
+ req));
}
});
}
@@ -525,7 +525,7 @@ public class HBaseAdmin implements Admin {
RequestConverter.buildGetTableDescriptorsRequest(tableName);
GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
if (!htds.getTableSchemaList().isEmpty()) {
- return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
+ return ProtobufUtil.convertToTableDesc(htds.getTableSchemaList().get(0));
}
return null;
}
@@ -554,7 +554,7 @@ public class HBaseAdmin implements Admin {
RequestConverter.buildGetTableDescriptorsRequest(tableName);
GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
if (!htds.getTableSchemaList().isEmpty()) {
- return new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)));
+ return ProtobufUtil.convertToHTableDesc(htds.getTableSchemaList().get(0));
}
return null;
}
@@ -2300,7 +2300,7 @@ public class HBaseAdmin implements Admin {
.build()).getTableSchemaList();
HTableDescriptor[] res = new HTableDescriptor[list.size()];
for(int i=0; i < list.size(); i++) {
- res[i] = new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(list.get(i)));
+ res[i] = new ImmutableHTableDescriptor(ProtobufUtil.convertToHTableDesc(list.get(i)));
}
return res;
}
@@ -2419,14 +2419,33 @@ public class HBaseAdmin implements Admin {
protected HTableDescriptor[] rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
- return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), req))
- .stream()
- .map(ImmutableHTableDescriptor::new)
- .toArray(HTableDescriptor[]::new);
+ return ProtobufUtil.
+ getHTableDescriptorArray(master.getTableDescriptors(getRpcController(), req));
}
});
}
+ /**
+ * Get tableDescriptor
+ * @param tableName one table name
+ * @return HTD the HTableDescriptor or null if the table not exists
+ * @throws IOException if a remote or network exception occurs
+ */
+ private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
+ throws IOException {
+ List<TableName> tableNames = new ArrayList<>(1);
+ tableNames.add(tableName);
+
+ HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
+
+ if (htdl == null || htdl.length == 0) {
+ return null;
+ }
+ else {
+ return htdl[0];
+ }
+ }
+
@Override
public HTableDescriptor[] getTableDescriptors(List<String> names)
throws IOException {
@@ -3690,7 +3709,7 @@ public class HBaseAdmin implements Admin {
* @return the table descriptor
*/
protected TableDescriptor getTableDescriptor() throws IOException {
- return getAdmin().listTableDescriptor(getTableName());
+ return getAdmin().getTableDescriptorByTableName(getTableName());
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 19bc2f4..ba68a96 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -453,7 +453,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
return;
}
if (!tableSchemas.isEmpty()) {
- future.complete(ProtobufUtil.toTableDescriptor(tableSchemas.get(0)));
+ future.complete(ProtobufUtil.convertToTableDesc(tableSchemas.get(0)));
} else {
future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index a710077..44d5c99 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -38,7 +38,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -253,14 +252,10 @@ public class TableDescriptorBuilder {
return new TableDescriptorBuilder(name);
}
- public static TableDescriptor copy(TableDescriptor desc) {
+ public static TableDescriptor copy(TableDescriptor desc) throws DeserializationException {
return new ModifyableTableDescriptor(desc);
}
- public static TableDescriptor copy(TableName name, TableDescriptor desc) {
- return new ModifyableTableDescriptor(name, desc);
- }
-
/**
* Copy all configuration, values, families, and name from the input.
* @param desc The desciptor to copy
@@ -1017,10 +1012,13 @@ public class TableDescriptorBuilder {
if (this == obj) {
return true;
}
- if (obj instanceof ModifyableTableDescriptor) {
- return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0;
+ if (obj == null) {
+ return false;
}
- return false;
+ if (!(obj instanceof ModifyableTableDescriptor)) {
+ return false;
+ }
+ return compareTo((ModifyableTableDescriptor) obj) == 0;
}
/**
@@ -1397,7 +1395,7 @@ public class TableDescriptorBuilder {
* @return the bytes in pb format
*/
private byte[] toByteArray() {
- return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray());
+ return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToTableSchema(this).toByteArray());
}
/**
@@ -1417,7 +1415,7 @@ public class TableDescriptorBuilder {
HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder();
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
- return ProtobufUtil.toTableDescriptor(builder.build());
+ return ProtobufUtil.convertToTableDesc(builder.build());
} catch (IOException e) {
throw new DeserializationException(e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index abcc5e2..a527883 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -17,12 +17,15 @@
*/
package org.apache.hadoop.hbase.shaded.protobuf;
+import java.awt.image.BandCombineOp;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
+import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
@@ -47,8 +50,10 @@ import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
@@ -69,6 +74,7 @@ import org.apache.hadoop.hbase.client.Cursor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
@@ -96,6 +102,7 @@ import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
import org.apache.hadoop.hbase.quotas.ThrottleType;
import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
+import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.visibility.Authorizations;
import org.apache.hadoop.hbase.security.visibility.CellVisibility;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
@@ -175,6 +182,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.DynamicClassLoader;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
import org.apache.hadoop.hbase.util.Methods;
@@ -417,6 +425,24 @@ public final class ProtobufUtil {
}
/**
+ * Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf
+ *
+ * @param proto the GetTableDescriptorsResponse
+ * @return a immutable HTableDescriptor array
+ * @deprecated Use {@link #toTableDescriptorList} after removing the HTableDescriptor
+ */
+ @Deprecated
+ public static HTableDescriptor[] getHTableDescriptorArray(GetTableDescriptorsResponse proto) {
+ if (proto == null) return null;
+
+ HTableDescriptor[] ret = new HTableDescriptor[proto.getTableSchemaCount()];
+ for (int i = 0; i < proto.getTableSchemaCount(); ++i) {
+ ret[i] = new ImmutableHTableDescriptor(convertToHTableDesc(proto.getTableSchema(i)));
+ }
+ return ret;
+ }
+
+ /**
* Get a list of TableDescriptor from GetTableDescriptorsResponse protobuf
*
* @param proto the GetTableDescriptorsResponse
@@ -424,7 +450,7 @@ public final class ProtobufUtil {
*/
public static List<TableDescriptor> toTableDescriptorList(GetTableDescriptorsResponse proto) {
if (proto == null) return new ArrayList<>();
- return proto.getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor)
+ return proto.getTableSchemaList().stream().map(ProtobufUtil::convertToTableDesc)
.collect(Collectors.toList());
}
@@ -2815,11 +2841,11 @@ public final class ProtobufUtil {
}
/**
- * Converts an ColumnFamilyDescriptor to ColumnFamilySchema
- * @param hcd the ColumnFamilySchema
+ * Converts an HColumnDescriptor to ColumnFamilySchema
+ * @param hcd the HColummnDescriptor
* @return Convert this instance to a the pb column family type
*/
- public static ColumnFamilySchema toColumnFamilySchema(ColumnFamilyDescriptor hcd) {
+ public static ColumnFamilySchema convertToColumnFamilySchema(ColumnFamilyDescriptor hcd) {
ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
builder.setName(UnsafeByteOperations.unsafeWrap(hcd.getName()));
for (Map.Entry<Bytes, Bytes> e : hcd.getValues().entrySet()) {
@@ -2838,11 +2864,31 @@ public final class ProtobufUtil {
}
/**
- * Converts a ColumnFamilySchema to ColumnFamilyDescriptor
+ * Converts a ColumnFamilySchema to HColumnDescriptor
+ * @param cfs the ColumnFamilySchema
+ * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
+ */
+ @Deprecated
+ public static HColumnDescriptor convertToHColumnDesc(final ColumnFamilySchema cfs) {
+ // Use the empty constructor so we preserve the initial values set on construction for things
+ // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for
+ // unrelated-looking test failures that are hard to trace back to here.
+ HColumnDescriptor hcd = new HColumnDescriptor(cfs.getName().toByteArray());
+ for (BytesBytesPair a: cfs.getAttributesList()) {
+ hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
+ }
+ for (NameStringPair a: cfs.getConfigurationList()) {
+ hcd.setConfiguration(a.getName(), a.getValue());
+ }
+ return hcd;
+ }
+
+ /**
+ * Converts a ColumnFamilySchema to HColumnDescriptor
* @param cfs the ColumnFamilySchema
- * @return An {@link ColumnFamilyDescriptor} made from the passed in <code>cfs</code>
+ * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
*/
- public static ColumnFamilyDescriptor toColumnFamilyDescriptor(final ColumnFamilySchema cfs) {
+ public static ColumnFamilyDescriptor convertToColumnDesc(final ColumnFamilySchema cfs) {
// Use the empty constructor so we preserve the initial values set on construction for things
// like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for
// unrelated-looking test failures that are hard to trace back to here.
@@ -2854,11 +2900,11 @@ public final class ProtobufUtil {
}
/**
- * Converts an TableDescriptor to TableSchema
- * @param htd the TableDescriptor
- * @return Convert the current {@link TableDescriptor} into a pb TableSchema instance.
+ * Converts an HTableDescriptor to TableSchema
+ * @param htd the HTableDescriptor
+ * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
*/
- public static TableSchema toTableSchema(TableDescriptor htd) {
+ public static TableSchema convertToTableSchema(TableDescriptor htd) {
TableSchema.Builder builder = TableSchema.newBuilder();
builder.setTableName(toProtoTableName(htd.getTableName()));
for (Map.Entry<Bytes, Bytes> e : htd.getValues().entrySet()) {
@@ -2868,7 +2914,7 @@ public final class ProtobufUtil {
builder.addAttributes(aBuilder.build());
}
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
- builder.addColumnFamilies(toColumnFamilySchema(hcd));
+ builder.addColumnFamilies(convertToColumnFamilySchema(hcd));
}
for (Map.Entry<String, String> e : htd.getConfiguration().entrySet()) {
NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
@@ -2880,16 +2926,43 @@ public final class ProtobufUtil {
}
/**
+ * Converts a TableSchema to HTableDescriptor
+ * @param ts A pb TableSchema instance.
+ * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
+ * @deprecated Use {@link #convertToTableDesc} after removing the HTableDescriptor
+ */
+ @Deprecated
+ public static HTableDescriptor convertToHTableDesc(final TableSchema ts) {
+ List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
+ HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
+ int index = 0;
+ for (ColumnFamilySchema cfs: list) {
+ hcds[index++] = ProtobufUtil.convertToHColumnDesc(cfs);
+ }
+ HTableDescriptor htd = new HTableDescriptor(ProtobufUtil.toTableName(ts.getTableName()));
+ for (HColumnDescriptor hcd : hcds) {
+ htd.addFamily(hcd);
+ }
+ for (BytesBytesPair a: ts.getAttributesList()) {
+ htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
+ }
+ for (NameStringPair a: ts.getConfigurationList()) {
+ htd.setConfiguration(a.getName(), a.getValue());
+ }
+ return htd;
+ }
+
+ /**
* Converts a TableSchema to TableDescriptor
* @param ts A pb TableSchema instance.
* @return An {@link TableDescriptor} made from the passed in pb <code>ts</code>.
*/
- public static TableDescriptor toTableDescriptor(final TableSchema ts) {
+ public static TableDescriptor convertToTableDesc(final TableSchema ts) {
TableDescriptorBuilder builder
= TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName()));
ts.getColumnFamiliesList()
.stream()
- .map(ProtobufUtil::toColumnFamilyDescriptor)
+ .map(ProtobufUtil::convertToColumnDesc)
.forEach(builder::addColumnFamily);
ts.getAttributesList()
.forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray()));
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index a8a56c7..08ed3dc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -1080,7 +1080,7 @@ public final class RequestConverter {
final long nonce) {
AddColumnRequest.Builder builder = AddColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
- builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column));
+ builder.setColumnFamilies(ProtobufUtil.convertToColumnFamilySchema(column));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
@@ -1120,7 +1120,7 @@ public final class RequestConverter {
final long nonce) {
ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
- builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column));
+ builder.setColumnFamilies(ProtobufUtil.convertToColumnFamilySchema(column));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
@@ -1306,28 +1306,28 @@ public final class RequestConverter {
/**
* Creates a protocol buffer CreateTableRequest
*
- * @param tableDescriptor
+ * @param hTableDesc
* @param splitKeys
* @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(
- final TableDescriptor tableDescriptor,
+ final TableDescriptor hTableDesc,
final byte [][] splitKeys,
final long nonceGroup,
final long nonce) {
- return buildCreateTableRequest(tableDescriptor, Optional.ofNullable(splitKeys), nonceGroup, nonce);
+ return buildCreateTableRequest(hTableDesc, Optional.ofNullable(splitKeys), nonceGroup, nonce);
}
/**
* Creates a protocol buffer CreateTableRequest
- * @param tableDescriptor
+ * @param hTableDesc
* @param splitKeys
* @return a CreateTableRequest
*/
- public static CreateTableRequest buildCreateTableRequest(TableDescriptor tableDescriptor,
+ public static CreateTableRequest buildCreateTableRequest(TableDescriptor hTableDesc,
Optional<byte[][]> splitKeys, long nonceGroup, long nonce) {
CreateTableRequest.Builder builder = CreateTableRequest.newBuilder();
- builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
+ builder.setTableSchema(ProtobufUtil.convertToTableSchema(hTableDesc));
splitKeys.ifPresent(keys -> Arrays.stream(keys).forEach(
key -> builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key))));
builder.setNonceGroup(nonceGroup);
@@ -1349,7 +1349,7 @@ public final class RequestConverter {
final long nonce) {
ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
- builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc));
+ builder.setTableSchema(ProtobufUtil.convertToTableSchema(tableDesc));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index 58b28e4..7de2629 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
/**
* Get, remove and modify table descriptors.
@@ -34,7 +33,7 @@ public interface TableDescriptors {
* @return TableDescriptor for tablename
* @throws IOException
*/
- TableDescriptor get(final TableName tableName)
+ HTableDescriptor get(final TableName tableName)
throws IOException;
/**
@@ -42,16 +41,16 @@ public interface TableDescriptors {
* @return Map of all descriptors.
* @throws IOException
*/
- Map<String, TableDescriptor> getByNamespace(String name)
+ Map<String, HTableDescriptor> getByNamespace(String name)
throws IOException;
/**
- * Get Map of all TableDescriptors. Populates the descriptor cache as a
+ * Get Map of all HTableDescriptors. Populates the descriptor cache as a
* side effect.
* @return Map of all descriptors.
* @throws IOException
*/
- Map<String, TableDescriptor> getAll()
+ Map<String, HTableDescriptor> getAll()
throws IOException;
/**
@@ -60,7 +59,7 @@ public interface TableDescriptors {
* @return Map of all descriptors.
* @throws IOException
*/
- Map<String, TableDescriptor> getAllDescriptors()
+ Map<String, HTableDescriptor> getAllDescriptors()
throws IOException;
/**
@@ -68,7 +67,7 @@ public interface TableDescriptors {
* @param htd Descriptor to set into TableDescriptors
* @throws IOException
*/
- void add(final TableDescriptor htd)
+ void add(final HTableDescriptor htd)
throws IOException;
/**
@@ -76,7 +75,7 @@ public interface TableDescriptors {
* @return Instance of table descriptor or null if none found.
* @throws IOException
*/
- TableDescriptor remove(final TableName tablename)
+ HTableDescriptor remove(final TableName tablename)
throws IOException;
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index 1d0d57b..7ae0537 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -49,7 +50,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
List<Cell> values;
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
- Path rootDir, TableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
+ Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
throws IOException {
// region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
index bcd433c..b861969 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -74,7 +75,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
private Path restoreDir;
private Scan scan;
private ArrayList<HRegionInfo> regions;
- private TableDescriptor htd;
+ private HTableDescriptor htd;
private ClientSideRegionScanner currentRegionScanner = null;
private int currentRegion = -1;
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index bf11473..2f6955e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.mapreduce;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -29,6 +28,7 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HDFSBlocksDistribution.HostAndWeight;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.ClientSideRegionScanner;
@@ -81,7 +81,7 @@ public class TableSnapshotInputFormatImpl {
*/
public static class InputSplit implements Writable {
- private TableDescriptor htd;
+ private HTableDescriptor htd;
private HRegionInfo regionInfo;
private String[] locations;
private String scan;
@@ -90,7 +90,7 @@ public class TableSnapshotInputFormatImpl {
// constructor for mapreduce framework / Writable
public InputSplit() {}
- public InputSplit(TableDescriptor htd, HRegionInfo regionInfo, List<String> locations,
+ public InputSplit(HTableDescriptor htd, HRegionInfo regionInfo, List<String> locations,
Scan scan, Path restoreDir) {
this.htd = htd;
this.regionInfo = regionInfo;
@@ -108,7 +108,7 @@ public class TableSnapshotInputFormatImpl {
this.restoreDir = restoreDir.toString();
}
- public TableDescriptor getHtd() {
+ public HTableDescriptor getHtd() {
return htd;
}
@@ -129,7 +129,7 @@ public class TableSnapshotInputFormatImpl {
return locations;
}
- public TableDescriptor getTableDescriptor() {
+ public HTableDescriptor getTableDescriptor() {
return htd;
}
@@ -142,7 +142,7 @@ public class TableSnapshotInputFormatImpl {
@Override
public void write(DataOutput out) throws IOException {
TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder()
- .setTable(ProtobufUtil.toTableSchema(htd))
+ .setTable(ProtobufUtil.convertToTableSchema(htd))
.setRegion(HRegionInfo.convert(regionInfo));
for (String location : locations) {
@@ -169,7 +169,7 @@ public class TableSnapshotInputFormatImpl {
byte[] buf = new byte[len];
in.readFully(buf);
TableSnapshotRegionSplit split = TableSnapshotRegionSplit.PARSER.parseFrom(buf);
- this.htd = ProtobufUtil.toTableDescriptor(split.getTable());
+ this.htd = ProtobufUtil.convertToHTableDesc(split.getTable());
this.regionInfo = HRegionInfo.convert(split.getRegion());
List<String> locationsList = split.getLocationsList();
this.locations = locationsList.toArray(new String[locationsList.size()]);
@@ -196,7 +196,7 @@ public class TableSnapshotInputFormatImpl {
public void initialize(InputSplit split, Configuration conf) throws IOException {
this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
this.split = split;
- TableDescriptor htd = split.htd;
+ HTableDescriptor htd = split.htd;
HRegionInfo hri = this.split.getRegionInfo();
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
@@ -311,7 +311,7 @@ public class TableSnapshotInputFormatImpl {
public static List<InputSplit> getSplits(Scan scan, SnapshotManifest manifest,
List<HRegionInfo> regionManifests, Path restoreDir, Configuration conf) throws IOException {
// load table descriptor
- TableDescriptor htd = manifest.getTableDescriptor();
+ HTableDescriptor htd = manifest.getTableDescriptor();
Path tableDir = FSUtils.getTableDir(restoreDir, htd.getTableName());
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index bcda145..8daa7db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -31,20 +31,21 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.assignment.GCMergedRegionsProcedure;
import org.apache.hadoop.hbase.master.assignment.GCRegionProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.util.Bytes;
@@ -205,7 +206,7 @@ public class CatalogJanitor extends ScheduledChore {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
- TableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
+ HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
@@ -413,12 +414,12 @@ public class CatalogJanitor extends ScheduledChore {
}
boolean references = false;
- TableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
+ HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
this.services.getConfiguration(), fs, tabledir, daughter, true);
- for (ColumnFamilyDescriptor family: parentDescriptor.getColumnFamilies()) {
+ for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
if ((references = regionFs.hasReferences(family.getNameAsString()))) {
break;
}
@@ -431,7 +432,7 @@ public class CatalogJanitor extends ScheduledChore {
return new Pair<>(Boolean.TRUE, Boolean.valueOf(references));
}
- private TableDescriptor getTableDescriptor(final TableName tableName)
+ private HTableDescriptor getTableDescriptor(final TableName tableName)
throws FileNotFoundException, IOException {
return this.services.getTableDescriptors().get(tableName);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
index c4438bb..faa4f0e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
@@ -23,11 +23,11 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner;
@@ -61,9 +61,9 @@ public class ExpiredMobFileCleanerChore extends ScheduledChore {
protected void chore() {
try {
TableDescriptors htds = master.getTableDescriptors();
- Map<String, TableDescriptor> map = htds.getAll();
- for (TableDescriptor htd : map.values()) {
- for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+ Map<String, HTableDescriptor> map = htds.getAll();
+ for (HTableDescriptor htd : map.values()) {
+ for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) {
// clean only for mob-enabled column.
// obtain a read table lock before cleaning, synchronize with MobFileCompactionChore.
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 93624de..6b4d4e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -41,7 +41,6 @@ import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Function;
import java.util.regex.Pattern;
import javax.servlet.ServletException;
@@ -61,8 +60,10 @@ import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -76,12 +77,9 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.coprocessor.BypassCoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -592,9 +590,11 @@ public class HMaster extends HRegionServer implements MasterServices {
return connector.getLocalPort();
}
- protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
- return builder -> builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
+ @Override
+ protected TableDescriptors getFsTableDescriptors() throws IOException {
+ return super.getFsTableDescriptors();
}
+
/**
* For compatibility, if failed with regionserver credentials, try the master one
*/
@@ -761,7 +761,9 @@ public class HMaster extends HRegionServer implements MasterServices {
// enable table descriptors cache
this.tableDescriptors.setCacheOn();
-
+ // set the META's descriptor to the correct replication
+ this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
+ conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
// warm-up HTDs cache on master initialization
if (preLoadTableDescriptors) {
status.setStatus("Pre-loading table descriptors");
@@ -1499,7 +1501,7 @@ public class HMaster extends HRegionServer implements MasterServices {
return false;
}
- TableDescriptor tblDesc = getTableDescriptors().get(table);
+ HTableDescriptor tblDesc = getTableDescriptors().get(table);
if (table.isSystemTable() || (tblDesc != null &&
!tblDesc.isNormalizationEnabled())) {
LOG.debug("Skipping normalization for table: " + table + ", as it's either system"
@@ -1710,34 +1712,34 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public long createTable(
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor hTableDescriptor,
final byte [][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException {
checkInitialized();
- String namespace = tableDescriptor.getTableName().getNamespaceAsString();
+ String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
this.clusterSchemaService.getNamespace(namespace);
- HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(tableDescriptor, splitKeys);
- sanityCheckTableDescriptor(tableDescriptor);
+ HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
+ sanityCheckTableDescriptor(hTableDescriptor);
return MasterProcedureUtil.submitProcedure(
new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
@Override
protected void run() throws IOException {
- getMaster().getMasterCoprocessorHost().preCreateTable(tableDescriptor, newRegions);
+ getMaster().getMasterCoprocessorHost().preCreateTable(hTableDescriptor, newRegions);
- LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
+ LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
// TODO: We can handle/merge duplicate requests, and differentiate the case of
// TableExistsException by saying if the schema is the same or not.
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
submitProcedure(new CreateTableProcedure(
- procedureExecutor.getEnvironment(), tableDescriptor, newRegions, latch));
+ procedureExecutor.getEnvironment(), hTableDescriptor, newRegions, latch));
latch.await();
- getMaster().getMasterCoprocessorHost().postCreateTable(tableDescriptor, newRegions);
+ getMaster().getMasterCoprocessorHost().postCreateTable(hTableDescriptor, newRegions);
}
@Override
@@ -1748,25 +1750,25 @@ public class HMaster extends HRegionServer implements MasterServices {
}
@Override
- public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
+ public long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException {
if (isStopped()) {
throw new MasterNotRunningException();
}
- TableName tableName = tableDescriptor.getTableName();
+ TableName tableName = hTableDescriptor.getTableName();
if (!(tableName.isSystemTable())) {
throw new IllegalArgumentException(
"Only system table creation can use this createSystemTable API");
}
- HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(tableDescriptor, null);
+ HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null);
- LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
+ LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
// This special create table is called locally to master. Therefore, no RPC means no need
// to use nonce to detect duplicated RPC call.
long procId = this.procedureExecutor.submitProcedure(
- new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions));
+ new CreateTableProcedure(procedureExecutor.getEnvironment(), hTableDescriptor, newRegions));
return procId;
}
@@ -1776,7 +1778,7 @@ public class HMaster extends HRegionServer implements MasterServices {
* values (compression, etc) work. Throws an exception if something is wrong.
* @throws IOException
*/
- private void sanityCheckTableDescriptor(final TableDescriptor htd) throws IOException {
+ private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
final String CONF_KEY = "hbase.table.sanity.checks";
boolean logWarn = false;
if (!conf.getBoolean(CONF_KEY, true)) {
@@ -1846,7 +1848,7 @@ public class HMaster extends HRegionServer implements MasterServices {
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
- for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+ for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
if (hcd.getTimeToLive() <= 0) {
String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
@@ -1867,7 +1869,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
// max versions already being checked
- // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor
+ // HBASE-13776 Setting illegal versions for HColumnDescriptor
// does not throw IllegalArgumentException
// check minVersions <= maxVerions
if (hcd.getMinVersions() > hcd.getMaxVersions()) {
@@ -1891,7 +1893,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
- private void checkReplicationScope(ColumnFamilyDescriptor hcd) throws IOException{
+ private void checkReplicationScope(HColumnDescriptor hcd) throws IOException{
// check replication scope
WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(hcd.getScope());
if (scop == null) {
@@ -1903,7 +1905,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
- private void checkCompactionPolicy(Configuration conf, TableDescriptor htd)
+ private void checkCompactionPolicy(Configuration conf, HTableDescriptor htd)
throws IOException {
// FIFO compaction has some requirements
// Actually FCP ignores periodic major compactions
@@ -1923,7 +1925,7 @@ public class HMaster extends HRegionServer implements MasterServices {
blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
}
- for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+ for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
String compactionPolicy =
hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
if (compactionPolicy == null) {
@@ -1936,7 +1938,7 @@ public class HMaster extends HRegionServer implements MasterServices {
String message = null;
// 1. Check TTL
- if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
+ if (hcd.getTimeToLive() == HColumnDescriptor.DEFAULT_TTL) {
message = "Default TTL is not supported for FIFO compaction";
throw new IOException(message);
}
@@ -2038,36 +2040,36 @@ public class HMaster extends HRegionServer implements MasterServices {
}, getServerName().toShortString() + ".masterManager"));
}
- private void checkCompression(final TableDescriptor htd)
+ private void checkCompression(final HTableDescriptor htd)
throws IOException {
if (!this.masterCheckCompression) return;
- for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+ for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
checkCompression(hcd);
}
}
- private void checkCompression(final ColumnFamilyDescriptor hcd)
+ private void checkCompression(final HColumnDescriptor hcd)
throws IOException {
if (!this.masterCheckCompression) return;
CompressionTest.testCompression(hcd.getCompressionType());
CompressionTest.testCompression(hcd.getCompactionCompressionType());
}
- private void checkEncryption(final Configuration conf, final TableDescriptor htd)
+ private void checkEncryption(final Configuration conf, final HTableDescriptor htd)
throws IOException {
if (!this.masterCheckEncryption) return;
- for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+ for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
checkEncryption(conf, hcd);
}
}
- private void checkEncryption(final Configuration conf, final ColumnFamilyDescriptor hcd)
+ private void checkEncryption(final Configuration conf, final HColumnDescriptor hcd)
throws IOException {
if (!this.masterCheckEncryption) return;
EncryptionTest.testEncryption(conf, hcd.getEncryptionType(), hcd.getEncryptionKey());
}
- private void checkClassLoading(final Configuration conf, final TableDescriptor htd)
+ private void checkClassLoading(final Configuration conf, final HTableDescriptor htd)
throws IOException {
RegionSplitPolicy.getSplitPolicyClass(htd, conf);
RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
@@ -2141,7 +2143,7 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public long addColumn(
final TableName tableName,
- final ColumnFamilyDescriptor columnDescriptor,
+ final HColumnDescriptor columnDescriptor,
final long nonceGroup,
final long nonce)
throws IOException {
@@ -2177,7 +2179,7 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public long modifyColumn(
final TableName tableName,
- final ColumnFamilyDescriptor descriptor,
+ final HColumnDescriptor descriptor,
final long nonceGroup,
final long nonce)
throws IOException {
@@ -2371,7 +2373,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
@Override
- public long modifyTable(final TableName tableName, final TableDescriptor descriptor,
+ public long modifyTable(final TableName tableName, final HTableDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException {
checkInitialized();
sanityCheckTableDescriptor(descriptor);
@@ -3125,7 +3127,7 @@ public class HMaster extends HRegionServer implements MasterServices {
throws IOException {
if (tableNameList == null || tableNameList.isEmpty()) {
// request for all TableDescriptors
- Collection<TableDescriptor> allHtds;
+ Collection<HTableDescriptor> allHtds;
if (namespace != null && namespace.length() > 0) {
// Do a check on the namespace existence. Will fail if does not exist.
this.clusterSchemaService.getNamespace(namespace);
@@ -3133,7 +3135,7 @@ public class HMaster extends HRegionServer implements MasterServices {
} else {
allHtds = tableDescriptors.getAll().values();
}
- for (TableDescriptor desc: allHtds) {
+ for (HTableDescriptor desc: allHtds) {
if (tableStateManager.isTablePresent(desc.getTableName())
&& (includeSysTables || !desc.getTableName().isSystemTable())) {
htds.add(desc);
@@ -3142,7 +3144,7 @@ public class HMaster extends HRegionServer implements MasterServices {
} else {
for (TableName s: tableNameList) {
if (tableStateManager.isTablePresent(s)) {
- TableDescriptor desc = tableDescriptors.get(s);
+ HTableDescriptor desc = tableDescriptors.get(s);
if (desc != null) {
htds.add(desc);
}
@@ -3247,7 +3249,7 @@ public class HMaster extends HRegionServer implements MasterServices {
* @param allFiles Whether add all mob files into the compaction.
*/
public void requestMobCompaction(TableName tableName,
- List<ColumnFamilyDescriptor> columns, boolean allFiles) throws IOException {
+ List<HColumnDescriptor> columns, boolean allFiles) throws IOException {
mobCompactThread.requestMobCompaction(conf, fs, tableName, columns, allFiles);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index f9d47e0..ee195cc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -28,15 +28,13 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
@@ -389,8 +387,10 @@ public class MasterFileSystem {
// not make it in first place. Turn off block caching for bootstrap.
// Enable after.
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
- TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
- HRegion meta = HRegion.createHRegion(metaHRI, rd, c, setInfoFamilyCachingForMeta(metaDescriptor, false), null);
+ HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
+ setInfoFamilyCachingForMeta(metaDescriptor, false);
+ HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null);
+ setInfoFamilyCachingForMeta(metaDescriptor, true);
meta.close();
} catch (IOException e) {
e = e instanceof RemoteException ?
@@ -403,17 +403,13 @@ public class MasterFileSystem {
/**
* Enable in memory caching for hbase:meta
*/
- public static TableDescriptor setInfoFamilyCachingForMeta(TableDescriptor metaDescriptor, final boolean b) {
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(metaDescriptor);
- for (ColumnFamilyDescriptor hcd: metaDescriptor.getColumnFamilies()) {
+ public static void setInfoFamilyCachingForMeta(HTableDescriptor metaDescriptor, final boolean b) {
+ for (HColumnDescriptor hcd: metaDescriptor.getColumnFamilies()) {
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
- builder.modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(hcd)
- .setBlockCacheEnabled(b)
- .setInMemory(b)
- .build());
+ hcd.setBlockCacheEnabled(b);
+ hcd.setInMemory(b);
}
}
- return builder.build();
}
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
index d092efe..2b1232a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
@@ -31,9 +31,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.mob.MobUtils;
@@ -79,7 +79,7 @@ public class MasterMobCompactionThread {
* @param allFiles Whether add all mob files into the compaction.
*/
public void requestMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
- List<ColumnFamilyDescriptor> columns, boolean allFiles) throws IOException {
+ List<HColumnDescriptor> columns, boolean allFiles) throws IOException {
master.reportMobCompactionStart(tableName);
try {
masterMobPool.execute(new CompactionRunner(fs, tableName, columns,
@@ -102,11 +102,11 @@ public class MasterMobCompactionThread {
private class CompactionRunner implements Runnable {
private FileSystem fs;
private TableName tableName;
- private List<ColumnFamilyDescriptor> hcds;
+ private List<HColumnDescriptor> hcds;
private boolean allFiles;
private ExecutorService pool;
- public CompactionRunner(FileSystem fs, TableName tableName, List<ColumnFamilyDescriptor> hcds,
+ public CompactionRunner(FileSystem fs, TableName tableName, List<HColumnDescriptor> hcds,
boolean allFiles, ExecutorService pool) {
super();
this.fs = fs;
@@ -123,7 +123,7 @@ public class MasterMobCompactionThread {
MobUtils.getTableLockName(tableName), LockProcedure.LockType.EXCLUSIVE,
this.getClass().getName() + ": mob compaction");
try {
- for (ColumnFamilyDescriptor hcd : hcds) {
+ for (HColumnDescriptor hcd : hcds) {
MobUtils.doMobCompaction(conf, fs, tableName, hcd, pool, allFiles, lock);
}
} catch (IOException e) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 3ec2c45..6e9b1e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -31,8 +31,10 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
@@ -41,7 +43,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
@@ -362,7 +363,7 @@ public class MasterRpcServices extends RSRpcServices
try {
long procId = master.addColumn(
ProtobufUtil.toTableName(req.getTableName()),
- ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
+ ProtobufUtil.convertToHColumnDesc(req.getColumnFamilies()),
req.getNonceGroup(),
req.getNonce());
if (procId == -1) {
@@ -438,11 +439,11 @@ public class MasterRpcServices extends RSRpcServices
@Override
public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
throws ServiceException {
- TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema());
+ HTableDescriptor hTableDescriptor = ProtobufUtil.convertToHTableDesc(req.getTableSchema());
byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
try {
long procId =
- master.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
+ master.createTable(hTableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
return CreateTableResponse.newBuilder().setProcId(procId).build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
@@ -864,7 +865,7 @@ public class MasterRpcServices extends RSRpcServices
if (descriptors != null && descriptors.size() > 0) {
// Add the table descriptors to the response
for (TableDescriptor htd: descriptors) {
- builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
+ builder.addTableSchema(ProtobufUtil.convertToTableSchema(htd));
}
}
return builder.build();
@@ -1117,7 +1118,7 @@ public class MasterRpcServices extends RSRpcServices
ListTableDescriptorsByNamespaceResponse.newBuilder();
for (TableDescriptor htd : master
.listTableDescriptorsByNamespace(request.getNamespaceName())) {
- b.addTableSchema(ProtobufUtil.toTableSchema(htd));
+ b.addTableSchema(ProtobufUtil.convertToTableSchema(htd));
}
return b.build();
} catch (IOException e) {
@@ -1146,7 +1147,7 @@ public class MasterRpcServices extends RSRpcServices
try {
long procId = master.modifyColumn(
ProtobufUtil.toTableName(req.getTableName()),
- ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
+ ProtobufUtil.convertToHColumnDesc(req.getColumnFamilies()),
req.getNonceGroup(),
req.getNonce());
if (procId == -1) {
@@ -1180,7 +1181,7 @@ public class MasterRpcServices extends RSRpcServices
try {
long procId = master.modifyTable(
ProtobufUtil.toTableName(req.getTableName()),
- ProtobufUtil.toTableDescriptor(req.getTableSchema()),
+ ProtobufUtil.convertToHTableDesc(req.getTableSchema()),
req.getNonceGroup(),
req.getNonce());
return ModifyTableResponse.newBuilder().setProcId(procId).build();
@@ -1531,12 +1532,12 @@ public class MasterRpcServices extends RSRpcServices
throw new DoNotRetryIOException("Table " + tableName + " is not enabled");
}
boolean allFiles = false;
- List<ColumnFamilyDescriptor> compactedColumns = new ArrayList<>();
- ColumnFamilyDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
+ List<HColumnDescriptor> compactedColumns = new ArrayList<>();
+ HColumnDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
byte[] family = null;
if (request.hasFamily()) {
family = request.getFamily().toByteArray();
- for (ColumnFamilyDescriptor hcd : hcds) {
+ for (HColumnDescriptor hcd : hcds) {
if (Bytes.equals(family, hcd.getName())) {
if (!hcd.isMobEnabled()) {
LOG.error("Column family " + hcd.getNameAsString() + " is not a mob column family");
@@ -1547,7 +1548,7 @@ public class MasterRpcServices extends RSRpcServices
}
}
} else {
- for (ColumnFamilyDescriptor hcd : hcds) {
+ for (HColumnDescriptor hcd : hcds) {
if (hcd.isMobEnabled()) {
compactedColumns.add(hcd);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index cde9e34..6e97bf4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -21,7 +21,9 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
@@ -30,7 +32,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -158,17 +159,17 @@ public interface MasterServices extends Server {
* a single region is created.
*/
long createTable(
- final TableDescriptor desc,
+ final HTableDescriptor desc,
final byte[][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException;
/**
* Create a system table using the given table definition.
- * @param tableDescriptor The system table definition
+ * @param hTableDescriptor The system table definition
* a single region is created.
*/
- long createSystemTable(final TableDescriptor tableDescriptor) throws IOException;
+ long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException;
/**
* Delete a table
@@ -206,7 +207,7 @@ public interface MasterServices extends Server {
*/
long modifyTable(
final TableName tableName,
- final TableDescriptor descriptor,
+ final HTableDescriptor descriptor,
final long nonceGroup,
final long nonce)
throws IOException;
@@ -246,7 +247,7 @@ public interface MasterServices extends Server {
*/
long addColumn(
final TableName tableName,
- final ColumnFamilyDescriptor column,
+ final HColumnDescriptor column,
final long nonceGroup,
final long nonce)
throws IOException;
@@ -261,7 +262,7 @@ public interface MasterServices extends Server {
*/
long modifyColumn(
final TableName tableName,
- final ColumnFamilyDescriptor descriptor,
+ final HColumnDescriptor descriptor,
final long nonceGroup,
final long nonce)
throws IOException;
[3/8] hbase git commit: Revert "HBASE-18503 Change ***Util and Master
to use TableDescriptor and ColumnFamilyDescriptor" Wrong author information
This reverts commit b03348630c145aa6cc29f0f295442c6deb28a28e.
Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
index 476c65c..42a5445 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
@@ -24,11 +24,11 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
@@ -55,8 +55,8 @@ public class MobCompactionChore extends ScheduledChore {
protected void chore() {
try {
TableDescriptors htds = master.getTableDescriptors();
- Map<String, TableDescriptor> map = htds.getAll();
- for (TableDescriptor htd : map.values()) {
+ Map<String, HTableDescriptor> map = htds.getAll();
+ for (HTableDescriptor htd : map.values()) {
if (!master.getTableStateManager().isTableState(htd.getTableName(),
TableState.State.ENABLED)) {
continue;
@@ -66,7 +66,7 @@ public class MobCompactionChore extends ScheduledChore {
final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
MobUtils.getTableLockName(htd.getTableName()), LockProcedure.LockType.EXCLUSIVE,
this.getClass().getName() + ": mob compaction");
- for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+ for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
if (!hcd.isMobEnabled()) {
continue;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index fb83971..18f6856 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -24,12 +24,12 @@ import java.util.Set;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
@@ -198,7 +198,7 @@ public class TableStateManager {
public static void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
throws IOException {
- final Map<String, TableDescriptor> allDescriptors =
+ final Map<String, HTableDescriptor> allDescriptors =
tableDescriptors.getAllDescriptors();
final Map<String, TableState> states = new HashMap<>();
MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() {
@@ -210,7 +210,7 @@ public class TableStateManager {
return true;
}
});
- for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) {
+ for (Map.Entry<String, HTableDescriptor> entry : allDescriptors.entrySet()) {
String table = entry.getKey();
if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
continue;
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index c398c9a..9aaf297 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -31,18 +31,18 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaMutationAnnotation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.master.CatalogJanitor;
@@ -603,10 +603,10 @@ public class MergeTableRegionsProcedure
throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Configuration conf = env.getMasterConfiguration();
- final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
+ final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
for (String family: regionFs.getFamilies()) {
- final ColumnFamilyDescriptor hcd = htd.getColumnFamily(family.getBytes());
+ final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
if (storeFiles != null && storeFiles.size() > 0) {
@@ -682,7 +682,7 @@ public class MergeTableRegionsProcedure
}
private int getRegionReplication(final MasterProcedureEnv env) throws IOException {
- final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
+ final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
return htd.getRegionReplication();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 072800b..627eb57 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
@@ -37,7 +38,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.RegionState.State;
@@ -221,7 +221,7 @@ public class RegionStateStore {
// ============================================================================================
public void splitRegion(final HRegionInfo parent, final HRegionInfo hriA,
final HRegionInfo hriB, final ServerName serverName) throws IOException {
- final TableDescriptor htd = getTableDescriptor(parent.getTable());
+ final HTableDescriptor htd = getTableDescriptor(parent.getTable());
MetaTableAccessor.splitRegion(master.getConnection(), parent, hriA, hriB, serverName,
getRegionReplication(htd), hasSerialReplicationScope(htd));
}
@@ -231,7 +231,7 @@ public class RegionStateStore {
// ============================================================================================
public void mergeRegions(final HRegionInfo parent, final HRegionInfo hriA,
final HRegionInfo hriB, final ServerName serverName) throws IOException {
- final TableDescriptor htd = getTableDescriptor(parent.getTable());
+ final HTableDescriptor htd = getTableDescriptor(parent.getTable());
MetaTableAccessor.mergeRegions(master.getConnection(), parent, hriA, hriB, serverName,
getRegionReplication(htd), EnvironmentEdgeManager.currentTime(),
hasSerialReplicationScope(htd));
@@ -255,15 +255,15 @@ public class RegionStateStore {
return hasSerialReplicationScope(getTableDescriptor(tableName));
}
- private boolean hasSerialReplicationScope(final TableDescriptor htd) {
+ private boolean hasSerialReplicationScope(final HTableDescriptor htd) {
return (htd != null)? htd.hasSerialReplicationScope(): false;
}
- private int getRegionReplication(final TableDescriptor htd) {
+ private int getRegionReplication(final HTableDescriptor htd) {
return (htd != null) ? htd.getRegionReplication() : 1;
}
- private TableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
+ private HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
return master.getTableDescriptors().get(tableName);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
index 5e3d8c4..d9a1ab8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
@@ -34,10 +34,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -194,7 +194,7 @@ class RegionLocationFinder {
*/
protected HDFSBlocksDistribution internalGetTopBlockLocation(HRegionInfo region) {
try {
- TableDescriptor tableDescriptor = getTableDescriptor(region.getTable());
+ HTableDescriptor tableDescriptor = getTableDescriptor(region.getTable());
if (tableDescriptor != null) {
HDFSBlocksDistribution blocksDistribution =
HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
@@ -209,14 +209,14 @@ class RegionLocationFinder {
}
/**
- * return TableDescriptor for a given tableName
+ * return HTableDescriptor for a given tableName
*
* @param tableName the table name
- * @return TableDescriptor
+ * @return HTableDescriptor
* @throws IOException
*/
- protected TableDescriptor getTableDescriptor(TableName tableName) throws IOException {
- TableDescriptor tableDescriptor = null;
+ protected HTableDescriptor getTableDescriptor(TableName tableName) throws IOException {
+ HTableDescriptor tableDescriptor = null;
try {
if (this.services != null && this.services.getTableDescriptors() != null) {
tableDescriptor = this.services.getTableDescriptors().get(tableName);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java
index f1ff936..45b2401 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java
@@ -27,17 +27,17 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.util.Bytes;
@@ -63,11 +63,11 @@ public class ReplicationMetaCleaner extends ScheduledChore {
@Override
protected void chore() {
try {
- Map<String, TableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
+ Map<String, HTableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
Map<String, Set<String>> serialTables = new HashMap<>();
- for (Map.Entry<String, TableDescriptor> entry : tables.entrySet()) {
+ for (Map.Entry<String, HTableDescriptor> entry : tables.entrySet()) {
boolean hasSerialScope = false;
- for (ColumnFamilyDescriptor column : entry.getValue().getColumnFamilies()) {
+ for (HColumnDescriptor column : entry.getValue().getFamilies()) {
if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL) {
hasSerialScope = true;
break;
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
index f19195e..34c1853 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
@@ -25,13 +25,12 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -46,30 +45,30 @@ public class AddColumnFamilyProcedure
private static final Log LOG = LogFactory.getLog(AddColumnFamilyProcedure.class);
private TableName tableName;
- private TableDescriptor unmodifiedTableDescriptor;
- private ColumnFamilyDescriptor cfDescriptor;
+ private HTableDescriptor unmodifiedHTableDescriptor;
+ private HColumnDescriptor cfDescriptor;
private List<HRegionInfo> regionInfoList;
private Boolean traceEnabled;
public AddColumnFamilyProcedure() {
super();
- this.unmodifiedTableDescriptor = null;
+ this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
public AddColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
- final ColumnFamilyDescriptor cfDescriptor) throws IOException {
+ final HColumnDescriptor cfDescriptor) throws IOException {
this(env, tableName, cfDescriptor, null);
}
public AddColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
- final ColumnFamilyDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
+ final HColumnDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
super(env, latch);
this.tableName = tableName;
this.cfDescriptor = cfDescriptor;
- this.unmodifiedTableDescriptor = null;
+ this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
@@ -173,10 +172,10 @@ public class AddColumnFamilyProcedure
MasterProcedureProtos.AddColumnFamilyStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
- .setColumnfamilySchema(ProtobufUtil.toColumnFamilySchema(cfDescriptor));
- if (unmodifiedTableDescriptor != null) {
+ .setColumnfamilySchema(ProtobufUtil.convertToColumnFamilySchema(cfDescriptor));
+ if (unmodifiedHTableDescriptor != null) {
addCFMsg
- .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
+ .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
}
addCFMsg.build().writeDelimitedTo(stream);
@@ -190,9 +189,9 @@ public class AddColumnFamilyProcedure
MasterProcedureProtos.AddColumnFamilyStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(addCFMsg.getUserInfo()));
tableName = ProtobufUtil.toTableName(addCFMsg.getTableName());
- cfDescriptor = ProtobufUtil.toColumnFamilyDescriptor(addCFMsg.getColumnfamilySchema());
+ cfDescriptor = ProtobufUtil.convertToHColumnDesc(addCFMsg.getColumnfamilySchema());
if (addCFMsg.hasUnmodifiedTableSchema()) {
- unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(addCFMsg.getUnmodifiedTableSchema());
+ unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(addCFMsg.getUnmodifiedTableSchema());
}
}
@@ -230,11 +229,11 @@ public class AddColumnFamilyProcedure
checkTableModifiable(env);
// In order to update the descriptor, we need to retrieve the old descriptor for comparison.
- unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
- if (unmodifiedTableDescriptor == null) {
- throw new IOException("TableDescriptor missing for " + tableName);
+ unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (unmodifiedHTableDescriptor == null) {
+ throw new IOException("HTableDescriptor missing for " + tableName);
}
- if (unmodifiedTableDescriptor.hasColumnFamily(cfDescriptor.getName())) {
+ if (unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) {
throw new InvalidFamilyOperationException("Column family '" + getColumnFamilyName()
+ "' in table '" + tableName + "' already exists so cannot be added");
}
@@ -259,18 +258,17 @@ public class AddColumnFamilyProcedure
// Update table descriptor
LOG.info("AddColumn. Table = " + tableName + " HCD = " + cfDescriptor.toString());
- TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+ HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
- if (htd.hasColumnFamily(cfDescriptor.getName())) {
+ if (htd.hasFamily(cfDescriptor.getName())) {
// It is possible to reach this situation, as we could already add the column family
// to table descriptor, but the master failover happens before we complete this state.
// We should be able to handle running this function multiple times without causing problem.
return;
}
- env.getMasterServices().getTableDescriptors().add(
- TableDescriptorBuilder.newBuilder(htd)
- .addColumnFamily(cfDescriptor).build());
+ htd.addFamily(cfDescriptor);
+ env.getMasterServices().getTableDescriptors().add(htd);
}
/**
@@ -279,14 +277,14 @@ public class AddColumnFamilyProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
- TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
- if (htd.hasColumnFamily(cfDescriptor.getName())) {
+ HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (htd.hasFamily(cfDescriptor.getName())) {
// Remove the column family from file system and update the table descriptor to
// the before-add-column-family-state
MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName,
getRegionInfoList(env), cfDescriptor.getName(), cfDescriptor.isMobEnabled());
- env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index cc39f53..afe72e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -33,12 +33,11 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -68,7 +67,7 @@ public class CloneSnapshotProcedure
extends AbstractStateMachineTableProcedure<CloneSnapshotState> {
private static final Log LOG = LogFactory.getLog(CloneSnapshotProcedure.class);
- private TableDescriptor tableDescriptor;
+ private HTableDescriptor hTableDescriptor;
private SnapshotDescription snapshot;
private boolean restoreAcl;
private List<HRegionInfo> newRegions = null;
@@ -86,21 +85,21 @@ public class CloneSnapshotProcedure
}
public CloneSnapshotProcedure(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor, final SnapshotDescription snapshot) {
- this(env, tableDescriptor, snapshot, false);
+ final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) {
+ this(env, hTableDescriptor, snapshot, false);
}
/**
* Constructor
* @param env MasterProcedureEnv
- * @param tableDescriptor the table to operate on
+ * @param hTableDescriptor the table to operate on
* @param snapshot snapshot to clone from
*/
public CloneSnapshotProcedure(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor, final SnapshotDescription snapshot,
+ final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot,
final boolean restoreAcl) {
super(env);
- this.tableDescriptor = tableDescriptor;
+ this.hTableDescriptor = hTableDescriptor;
this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
@@ -122,7 +121,7 @@ public class CloneSnapshotProcedure
Configuration conf = env.getMasterServices().getConfiguration();
if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils.isSecurityAvailable(conf)) {
- RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, tableDescriptor.getTableName(), conf);
+ RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, hTableDescriptor.getTableName(), conf);
}
}
@@ -142,7 +141,7 @@ public class CloneSnapshotProcedure
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_WRITE_FS_LAYOUT);
break;
case CLONE_SNAPSHOT_WRITE_FS_LAYOUT:
- newRegions = createFilesystemLayout(env, tableDescriptor, newRegions);
+ newRegions = createFilesystemLayout(env, hTableDescriptor, newRegions);
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META);
break;
case CLONE_SNAPSHOT_ADD_TO_META:
@@ -225,7 +224,7 @@ public class CloneSnapshotProcedure
@Override
public TableName getTableName() {
- return tableDescriptor.getTableName();
+ return hTableDescriptor.getTableName();
}
@Override
@@ -251,7 +250,7 @@ public class CloneSnapshotProcedure
MasterProcedureProtos.CloneSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
- .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
+ .setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
if (newRegions != null) {
for (HRegionInfo hri: newRegions) {
cloneSnapshotMsg.addRegionInfo(HRegionInfo.convert(hri));
@@ -282,7 +281,7 @@ public class CloneSnapshotProcedure
MasterProcedureProtos.CloneSnapshotStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(cloneSnapshotMsg.getUserInfo()));
snapshot = cloneSnapshotMsg.getSnapshot();
- tableDescriptor = ProtobufUtil.toTableDescriptor(cloneSnapshotMsg.getTableSchema());
+ hTableDescriptor = ProtobufUtil.convertToHTableDesc(cloneSnapshotMsg.getTableSchema());
if (cloneSnapshotMsg.getRegionInfoCount() == 0) {
newRegions = null;
} else {
@@ -342,7 +341,7 @@ public class CloneSnapshotProcedure
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
- cpHost.preCreateTableAction(tableDescriptor, null, getUser());
+ cpHost.preCreateTableAction(hTableDescriptor, null, getUser());
}
}
@@ -358,7 +357,7 @@ public class CloneSnapshotProcedure
if (cpHost != null) {
final HRegionInfo[] regions = (newRegions == null) ? null :
newRegions.toArray(new HRegionInfo[newRegions.size()]);
- cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser());
+ cpHost.postCompletedCreateTableAction(hTableDescriptor, regions, getUser());
}
}
@@ -369,9 +368,9 @@ public class CloneSnapshotProcedure
*/
private List<HRegionInfo> createFilesystemLayout(
final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor hTableDescriptor,
final List<HRegionInfo> newRegions) throws IOException {
- return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() {
+ return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {
@Override
public List<HRegionInfo> createHdfsRegions(
final MasterProcedureEnv env,
@@ -391,7 +390,7 @@ public class CloneSnapshotProcedure
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
- conf, fs, manifest, tableDescriptor, tableRootDir, monitorException, monitorStatus);
+ conf, fs, manifest, hTableDescriptor, tableRootDir, monitorException, monitorStatus);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
// Clone operation should not have stuff to restore or remove
@@ -430,7 +429,7 @@ public class CloneSnapshotProcedure
*/
private List<HRegionInfo> createFsLayout(
final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor hTableDescriptor,
List<HRegionInfo> newRegions,
final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
@@ -438,17 +437,17 @@ public class CloneSnapshotProcedure
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
- final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
+ HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor);
+ final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
- .createTableDescriptorForTableDirectory(tempTableDir,
- TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false);
+ .createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(
- env, tempdir, tableDescriptor.getTableName(), newRegions);
+ env, tempdir, hTableDescriptor.getTableName(), newRegions);
// 3. Move Table temp directory to the hbase root location
- CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
+ CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
return newRegions;
}
@@ -459,11 +458,11 @@ public class CloneSnapshotProcedure
* @throws IOException
*/
private void addRegionsToMeta(final MasterProcedureEnv env) throws IOException {
- newRegions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, newRegions);
+ newRegions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, newRegions);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
new RestoreSnapshotHelper.RestoreMetaChanges(
- tableDescriptor, parentsToChildrenPairMap);
+ hTableDescriptor, parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(env.getMasterServices().getConnection(), newRegions);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 14604fd..cf55463 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -30,12 +30,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -55,7 +55,7 @@ public class CreateTableProcedure
extends AbstractStateMachineTableProcedure<CreateTableState> {
private static final Log LOG = LogFactory.getLog(CreateTableProcedure.class);
- private TableDescriptor tableDescriptor;
+ private HTableDescriptor hTableDescriptor;
private List<HRegionInfo> newRegions;
public CreateTableProcedure() {
@@ -64,15 +64,15 @@ public class CreateTableProcedure
}
public CreateTableProcedure(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions) {
- this(env, tableDescriptor, newRegions, null);
+ final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions) {
+ this(env, hTableDescriptor, newRegions, null);
}
public CreateTableProcedure(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
+ final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
final ProcedurePrepareLatch syncLatch) {
super(env, syncLatch);
- this.tableDescriptor = tableDescriptor;
+ this.hTableDescriptor = hTableDescriptor;
this.newRegions = newRegions != null ? Lists.newArrayList(newRegions) : null;
}
@@ -98,11 +98,11 @@ public class CreateTableProcedure
setNextState(CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT);
break;
case CREATE_TABLE_WRITE_FS_LAYOUT:
- newRegions = createFsLayout(env, tableDescriptor, newRegions);
+ newRegions = createFsLayout(env, hTableDescriptor, newRegions);
setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META);
break;
case CREATE_TABLE_ADD_TO_META:
- newRegions = addTableToMeta(env, tableDescriptor, newRegions);
+ newRegions = addTableToMeta(env, hTableDescriptor, newRegions);
setNextState(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS);
break;
case CREATE_TABLE_ASSIGN_REGIONS:
@@ -174,7 +174,7 @@ public class CreateTableProcedure
@Override
public TableName getTableName() {
- return tableDescriptor.getTableName();
+ return hTableDescriptor.getTableName();
}
@Override
@@ -189,7 +189,7 @@ public class CreateTableProcedure
MasterProcedureProtos.CreateTableStateData.Builder state =
MasterProcedureProtos.CreateTableStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
- .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
+ .setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
if (newRegions != null) {
for (HRegionInfo hri: newRegions) {
state.addRegionInfo(HRegionInfo.convert(hri));
@@ -205,7 +205,7 @@ public class CreateTableProcedure
MasterProcedureProtos.CreateTableStateData state =
MasterProcedureProtos.CreateTableStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo()));
- tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema());
+ hTableDescriptor = ProtobufUtil.convertToHTableDesc(state.getTableSchema());
if (state.getRegionInfoCount() == 0) {
newRegions = null;
} else {
@@ -235,7 +235,7 @@ public class CreateTableProcedure
}
// check that we have at least 1 CF
- if (tableDescriptor.getColumnFamilyCount() == 0) {
+ if (hTableDescriptor.getColumnFamilyCount() == 0) {
setFailure("master-create-table", new DoNotRetryIOException("Table " +
getTableName().toString() + " should have at least one column family."));
return false;
@@ -256,7 +256,7 @@ public class CreateTableProcedure
if (cpHost != null) {
final HRegionInfo[] regions = newRegions == null ? null :
newRegions.toArray(new HRegionInfo[newRegions.size()]);
- cpHost.preCreateTableAction(tableDescriptor, regions, getUser());
+ cpHost.preCreateTableAction(hTableDescriptor, regions, getUser());
}
}
@@ -266,7 +266,7 @@ public class CreateTableProcedure
if (cpHost != null) {
final HRegionInfo[] regions = (newRegions == null) ? null :
newRegions.toArray(new HRegionInfo[newRegions.size()]);
- cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser());
+ cpHost.postCompletedCreateTableAction(hTableDescriptor, regions, getUser());
}
}
@@ -277,9 +277,9 @@ public class CreateTableProcedure
}
protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor, final List<HRegionInfo> newRegions)
+ final HTableDescriptor hTableDescriptor, final List<HRegionInfo> newRegions)
throws IOException {
- return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() {
+ return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {
@Override
public List<HRegionInfo> createHdfsRegions(final MasterProcedureEnv env,
final Path tableRootDir, final TableName tableName,
@@ -287,40 +287,40 @@ public class CreateTableProcedure
HRegionInfo[] regions = newRegions != null ?
newRegions.toArray(new HRegionInfo[newRegions.size()]) : null;
return ModifyRegionUtils.createRegions(env.getMasterConfiguration(),
- tableRootDir, tableDescriptor, regions, null);
+ tableRootDir, hTableDescriptor, regions, null);
}
});
}
protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor, List<HRegionInfo> newRegions,
+ final HTableDescriptor hTableDescriptor, List<HRegionInfo> newRegions,
final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tempdir = mfs.getTempDir();
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
- final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
+ final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
.createTableDescriptorForTableDirectory(
- tempTableDir, tableDescriptor, false);
+ tempTableDir, hTableDescriptor, false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
- tableDescriptor.getTableName(), newRegions);
+ hTableDescriptor.getTableName(), newRegions);
// 3. Move Table temp directory to the hbase root location
- moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
+ moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
return newRegions;
}
protected static void moveTempDirectoryToHBaseRoot(
final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor hTableDescriptor,
final Path tempTableDir) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
- final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
+ final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), hTableDescriptor.getTableName());
FileSystem fs = mfs.getFileSystem();
if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
throw new IOException("Couldn't delete " + tableDir);
@@ -332,20 +332,20 @@ public class CreateTableProcedure
}
protected static List<HRegionInfo> addTableToMeta(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor hTableDescriptor,
final List<HRegionInfo> regions) throws IOException {
assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions;
ProcedureSyncWait.waitMetaRegions(env);
// Add replicas if needed
- List<HRegionInfo> newRegions = addReplicas(env, tableDescriptor, regions);
+ List<HRegionInfo> newRegions = addReplicas(env, hTableDescriptor, regions);
// Add regions to META
- addRegionsToMeta(env, tableDescriptor, newRegions);
+ addRegionsToMeta(env, hTableDescriptor, newRegions);
// Setup replication for region replicas if needed
- if (tableDescriptor.getRegionReplication() > 1) {
+ if (hTableDescriptor.getRegionReplication() > 1) {
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
}
return newRegions;
@@ -354,14 +354,14 @@ public class CreateTableProcedure
/**
* Create any replicas for the regions (the default replicas that was
* already created is passed to the method)
- * @param tableDescriptor descriptor to use
+ * @param hTableDescriptor descriptor to use
* @param regions default replicas
* @return the combined list of default and non-default replicas
*/
private static List<HRegionInfo> addReplicas(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor hTableDescriptor,
final List<HRegionInfo> regions) {
- int numRegionReplicas = tableDescriptor.getRegionReplication() - 1;
+ int numRegionReplicas = hTableDescriptor.getRegionReplication() - 1;
if (numRegionReplicas <= 0) {
return regions;
}
@@ -394,10 +394,10 @@ public class CreateTableProcedure
* Add the specified set of regions to the hbase:meta table.
*/
private static void addRegionsToMeta(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor hTableDescriptor,
final List<HRegionInfo> regionInfos) throws IOException {
MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(),
- regionInfos, tableDescriptor.getRegionReplication());
+ regionInfos, hTableDescriptor.getRegionReplication());
}
protected static void updateTableDescCache(final MasterProcedureEnv env,
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
index 9ec814a..78bd715 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
@@ -26,11 +26,10 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -46,7 +45,7 @@ public class DeleteColumnFamilyProcedure
extends AbstractStateMachineTableProcedure<DeleteColumnFamilyState> {
private static final Log LOG = LogFactory.getLog(DeleteColumnFamilyProcedure.class);
- private TableDescriptor unmodifiedTableDescriptor;
+ private HTableDescriptor unmodifiedHTableDescriptor;
private TableName tableName;
private byte [] familyName;
private boolean hasMob;
@@ -56,7 +55,7 @@ public class DeleteColumnFamilyProcedure
public DeleteColumnFamilyProcedure() {
super();
- this.unmodifiedTableDescriptor = null;
+ this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
@@ -71,7 +70,7 @@ public class DeleteColumnFamilyProcedure
super(env, latch);
this.tableName = tableName;
this.familyName = familyName;
- this.unmodifiedTableDescriptor = null;
+ this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
@@ -180,9 +179,9 @@ public class DeleteColumnFamilyProcedure
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
.setColumnfamilyName(UnsafeByteOperations.unsafeWrap(familyName));
- if (unmodifiedTableDescriptor != null) {
+ if (unmodifiedHTableDescriptor != null) {
deleteCFMsg
- .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
+ .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
}
deleteCFMsg.build().writeDelimitedTo(stream);
@@ -198,7 +197,7 @@ public class DeleteColumnFamilyProcedure
familyName = deleteCFMsg.getColumnfamilyName().toByteArray();
if (deleteCFMsg.hasUnmodifiedTableSchema()) {
- unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(deleteCFMsg.getUnmodifiedTableSchema());
+ unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(deleteCFMsg.getUnmodifiedTableSchema());
}
}
@@ -236,22 +235,22 @@ public class DeleteColumnFamilyProcedure
checkTableModifiable(env);
// In order to update the descriptor, we need to retrieve the old descriptor for comparison.
- unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
- if (unmodifiedTableDescriptor == null) {
- throw new IOException("TableDescriptor missing for " + tableName);
+ unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (unmodifiedHTableDescriptor == null) {
+ throw new IOException("HTableDescriptor missing for " + tableName);
}
- if (!unmodifiedTableDescriptor.hasColumnFamily(familyName)) {
+ if (!unmodifiedHTableDescriptor.hasFamily(familyName)) {
throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ "' does not exist, so it cannot be deleted");
}
- if (unmodifiedTableDescriptor.getColumnFamilyCount() == 1) {
+ if (unmodifiedHTableDescriptor.getColumnFamilyCount() == 1) {
throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ "' is the only column family in the table, so it cannot be deleted");
}
// whether mob family
- hasMob = unmodifiedTableDescriptor.getColumnFamily(familyName).isMobEnabled();
+ hasMob = unmodifiedHTableDescriptor.getFamily(familyName).isMobEnabled();
}
/**
@@ -273,17 +272,17 @@ public class DeleteColumnFamilyProcedure
// Update table descriptor
LOG.info("DeleteColumn. Table = " + tableName + " family = " + getColumnFamilyName());
- TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+ HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
- if (!htd.hasColumnFamily(familyName)) {
+ if (!htd.hasFamily(familyName)) {
// It is possible to reach this situation, as we could already delete the column family
// from table descriptor, but the master failover happens before we complete this state.
// We should be able to handle running this function multiple times without causing problem.
return;
}
- env.getMasterServices().getTableDescriptors().add(
- TableDescriptorBuilder.newBuilder(htd).removeColumnFamily(familyName).build());
+ htd.removeFamily(familyName);
+ env.getMasterServices().getTableDescriptors().add(htd);
}
/**
@@ -292,7 +291,7 @@ public class DeleteColumnFamilyProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
index ac86dab..622c19f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
@@ -24,12 +24,11 @@ import java.io.OutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -44,28 +43,28 @@ public class ModifyColumnFamilyProcedure
private static final Log LOG = LogFactory.getLog(ModifyColumnFamilyProcedure.class);
private TableName tableName;
- private TableDescriptor unmodifiedtableDescriptor;
- private ColumnFamilyDescriptor cfDescriptor;
+ private HTableDescriptor unmodifiedHTableDescriptor;
+ private HColumnDescriptor cfDescriptor;
private Boolean traceEnabled;
public ModifyColumnFamilyProcedure() {
super();
- this.unmodifiedtableDescriptor = null;
+ this.unmodifiedHTableDescriptor = null;
this.traceEnabled = null;
}
public ModifyColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
- final ColumnFamilyDescriptor cfDescriptor) {
+ final HColumnDescriptor cfDescriptor) {
this(env, tableName, cfDescriptor, null);
}
public ModifyColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
- final ColumnFamilyDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
+ final HColumnDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
super(env, latch);
this.tableName = tableName;
this.cfDescriptor = cfDescriptor;
- this.unmodifiedtableDescriptor = null;
+ this.unmodifiedHTableDescriptor = null;
this.traceEnabled = null;
}
@@ -166,10 +165,10 @@ public class ModifyColumnFamilyProcedure
MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
- .setColumnfamilySchema(ProtobufUtil.toColumnFamilySchema(cfDescriptor));
- if (unmodifiedtableDescriptor != null) {
+ .setColumnfamilySchema(ProtobufUtil.convertToColumnFamilySchema(cfDescriptor));
+ if (unmodifiedHTableDescriptor != null) {
modifyCFMsg
- .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedtableDescriptor));
+ .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
}
modifyCFMsg.build().writeDelimitedTo(stream);
@@ -183,9 +182,9 @@ public class ModifyColumnFamilyProcedure
MasterProcedureProtos.ModifyColumnFamilyStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(modifyCFMsg.getUserInfo()));
tableName = ProtobufUtil.toTableName(modifyCFMsg.getTableName());
- cfDescriptor = ProtobufUtil.toColumnFamilyDescriptor(modifyCFMsg.getColumnfamilySchema());
+ cfDescriptor = ProtobufUtil.convertToHColumnDesc(modifyCFMsg.getColumnfamilySchema());
if (modifyCFMsg.hasUnmodifiedTableSchema()) {
- unmodifiedtableDescriptor = ProtobufUtil.toTableDescriptor(modifyCFMsg.getUnmodifiedTableSchema());
+ unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(modifyCFMsg.getUnmodifiedTableSchema());
}
}
@@ -222,11 +221,11 @@ public class ModifyColumnFamilyProcedure
// Checks whether the table is allowed to be modified.
checkTableModifiable(env);
- unmodifiedtableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
- if (unmodifiedtableDescriptor == null) {
- throw new IOException("TableDescriptor missing for " + tableName);
+ unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (unmodifiedHTableDescriptor == null) {
+ throw new IOException("HTableDescriptor missing for " + tableName);
}
- if (!unmodifiedtableDescriptor.hasColumnFamily(cfDescriptor.getName())) {
+ if (!unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) {
throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ "' does not exist, so it cannot be modified");
}
@@ -251,9 +250,9 @@ public class ModifyColumnFamilyProcedure
// Update table descriptor
LOG.info("ModifyColumnFamily. Table = " + tableName + " HCD = " + cfDescriptor.toString());
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(env.getMasterServices().getTableDescriptors().get(tableName));
- builder.modifyColumnFamily(cfDescriptor);
- env.getMasterServices().getTableDescriptors().add(builder.build());
+ HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+ htd.modifyFamily(cfDescriptor);
+ env.getMasterServices().getTableDescriptors().add(htd);
}
/**
@@ -262,7 +261,7 @@ public class ModifyColumnFamilyProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(unmodifiedtableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 9741faa..20a6a03 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -52,8 +52,8 @@ public class ModifyTableProcedure
extends AbstractStateMachineTableProcedure<ModifyTableState> {
private static final Log LOG = LogFactory.getLog(ModifyTableProcedure.class);
- private TableDescriptor unmodifiedTableDescriptor = null;
- private TableDescriptor modifiedTableDescriptor;
+ private HTableDescriptor unmodifiedHTableDescriptor = null;
+ private HTableDescriptor modifiedHTableDescriptor;
private boolean deleteColumnFamilyInModify;
private List<HRegionInfo> regionInfoList;
@@ -64,19 +64,19 @@ public class ModifyTableProcedure
initilize();
}
- public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd) {
+ public ModifyTableProcedure(final MasterProcedureEnv env, final HTableDescriptor htd) {
this(env, htd, null);
}
- public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd,
+ public ModifyTableProcedure(final MasterProcedureEnv env, final HTableDescriptor htd,
final ProcedurePrepareLatch latch) {
super(env, latch);
initilize();
- this.modifiedTableDescriptor = htd;
+ this.modifiedHTableDescriptor = htd;
}
private void initilize() {
- this.unmodifiedTableDescriptor = null;
+ this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
this.deleteColumnFamilyInModify = false;
@@ -104,7 +104,7 @@ public class ModifyTableProcedure
setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN);
break;
case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
- updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
+ updateReplicaColumnsIfNeeded(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
if (deleteColumnFamilyInModify) {
setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
} else {
@@ -112,7 +112,7 @@ public class ModifyTableProcedure
}
break;
case MODIFY_TABLE_DELETE_FS_LAYOUT:
- deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
+ deleteFromFs(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
break;
case MODIFY_TABLE_POST_OPERATION:
@@ -191,12 +191,12 @@ public class ModifyTableProcedure
MasterProcedureProtos.ModifyTableStateData.Builder modifyTableMsg =
MasterProcedureProtos.ModifyTableStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
- .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor))
+ .setModifiedTableSchema(ProtobufUtil.convertToTableSchema(modifiedHTableDescriptor))
.setDeleteColumnFamilyInModify(deleteColumnFamilyInModify);
- if (unmodifiedTableDescriptor != null) {
+ if (unmodifiedHTableDescriptor != null) {
modifyTableMsg
- .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
+ .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
}
modifyTableMsg.build().writeDelimitedTo(stream);
@@ -209,18 +209,18 @@ public class ModifyTableProcedure
MasterProcedureProtos.ModifyTableStateData modifyTableMsg =
MasterProcedureProtos.ModifyTableStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(modifyTableMsg.getUserInfo()));
- modifiedTableDescriptor = ProtobufUtil.toTableDescriptor(modifyTableMsg.getModifiedTableSchema());
+ modifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(modifyTableMsg.getModifiedTableSchema());
deleteColumnFamilyInModify = modifyTableMsg.getDeleteColumnFamilyInModify();
if (modifyTableMsg.hasUnmodifiedTableSchema()) {
- unmodifiedTableDescriptor =
- ProtobufUtil.toTableDescriptor(modifyTableMsg.getUnmodifiedTableSchema());
+ unmodifiedHTableDescriptor =
+ ProtobufUtil.convertToHTableDesc(modifyTableMsg.getUnmodifiedTableSchema());
}
}
@Override
public TableName getTableName() {
- return modifiedTableDescriptor.getTableName();
+ return modifiedHTableDescriptor.getTableName();
}
@Override
@@ -240,27 +240,27 @@ public class ModifyTableProcedure
}
// check that we have at least 1 CF
- if (modifiedTableDescriptor.getColumnFamilyCount() == 0) {
+ if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
throw new DoNotRetryIOException("Table " + getTableName().toString() +
" should have at least one column family.");
}
// In order to update the descriptor, we need to retrieve the old descriptor for comparison.
- this.unmodifiedTableDescriptor =
+ this.unmodifiedHTableDescriptor =
env.getMasterServices().getTableDescriptors().get(getTableName());
if (env.getMasterServices().getTableStateManager()
.isTableState(getTableName(), TableState.State.ENABLED)) {
- if (modifiedTableDescriptor.getRegionReplication() != unmodifiedTableDescriptor
+ if (modifiedHTableDescriptor.getRegionReplication() != unmodifiedHTableDescriptor
.getRegionReplication()) {
throw new IOException("REGION_REPLICATION change is not supported for enabled tables");
}
}
- // Find out whether all column families in unmodifiedTableDescriptor also exists in
- // the modifiedTableDescriptor. This is to determine whether we are safe to rollback.
- final Set<byte[]> oldFamilies = unmodifiedTableDescriptor.getColumnFamilyNames();
- final Set<byte[]> newFamilies = modifiedTableDescriptor.getColumnFamilyNames();
+ // Find out whether all column families in unmodifiedHTableDescriptor also exists in
+ // the modifiedHTableDescriptor. This is to determine whether we are safe to rollback.
+ final Set<byte[]> oldFamilies = unmodifiedHTableDescriptor.getFamiliesKeys();
+ final Set<byte[]> newFamilies = modifiedHTableDescriptor.getFamiliesKeys();
for (byte[] familyName : oldFamilies) {
if (!newFamilies.contains(familyName)) {
this.deleteColumnFamilyInModify = true;
@@ -287,7 +287,7 @@ public class ModifyTableProcedure
* @throws IOException
**/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor);
}
/**
@@ -296,10 +296,10 @@ public class ModifyTableProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
- // delete any new column families from the modifiedTableDescriptor.
- deleteFromFs(env, modifiedTableDescriptor, unmodifiedTableDescriptor);
+ // delete any new column families from the modifiedHTableDescriptor.
+ deleteFromFs(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);
@@ -312,17 +312,18 @@ public class ModifyTableProcedure
* @throws IOException
*/
private void deleteFromFs(final MasterProcedureEnv env,
- final TableDescriptor oldTableDescriptor, final TableDescriptor newTableDescriptor)
+ final HTableDescriptor oldHTableDescriptor, final HTableDescriptor newHTableDescriptor)
throws IOException {
- final Set<byte[]> oldFamilies = oldTableDescriptor.getColumnFamilyNames();
- final Set<byte[]> newFamilies = newTableDescriptor.getColumnFamilyNames();
+ final Set<byte[]> oldFamilies = oldHTableDescriptor.getFamiliesKeys();
+ final Set<byte[]> newFamilies = newHTableDescriptor.getFamiliesKeys();
for (byte[] familyName : oldFamilies) {
if (!newFamilies.contains(familyName)) {
MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(
env,
getTableName(),
getRegionInfoList(env),
- familyName, oldTableDescriptor.getColumnFamily(familyName).isMobEnabled());
+ familyName,
+ oldHTableDescriptor.getFamily(familyName).isMobEnabled());
}
}
}
@@ -334,10 +335,10 @@ public class ModifyTableProcedure
*/
private void updateReplicaColumnsIfNeeded(
final MasterProcedureEnv env,
- final TableDescriptor oldTableDescriptor,
- final TableDescriptor newTableDescriptor) throws IOException {
- final int oldReplicaCount = oldTableDescriptor.getRegionReplication();
- final int newReplicaCount = newTableDescriptor.getRegionReplication();
+ final HTableDescriptor oldHTableDescriptor,
+ final HTableDescriptor newHTableDescriptor) throws IOException {
+ final int oldReplicaCount = oldHTableDescriptor.getRegionReplication();
+ final int newReplicaCount = newHTableDescriptor.getRegionReplication();
if (newReplicaCount < oldReplicaCount) {
Set<byte[]> tableRows = new HashSet<>();
@@ -401,10 +402,10 @@ public class ModifyTableProcedure
if (cpHost != null) {
switch (state) {
case MODIFY_TABLE_PRE_OPERATION:
- cpHost.preModifyTableAction(getTableName(), modifiedTableDescriptor, getUser());
+ cpHost.preModifyTableAction(getTableName(), modifiedHTableDescriptor, getUser());
break;
case MODIFY_TABLE_POST_OPERATION:
- cpHost.postCompletedModifyTableAction(getTableName(), modifiedTableDescriptor,getUser());
+ cpHost.postCompletedModifyTableAction(getTableName(), modifiedHTableDescriptor,getUser());
break;
default:
throw new UnsupportedOperationException(this + " unhandled state=" + state);
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
index 4930396..cfd9df9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
@@ -33,12 +33,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -61,7 +61,7 @@ public class RestoreSnapshotProcedure
extends AbstractStateMachineTableProcedure<RestoreSnapshotState> {
private static final Log LOG = LogFactory.getLog(RestoreSnapshotProcedure.class);
- private TableDescriptor modifiedTableDescriptor;
+ private HTableDescriptor modifiedHTableDescriptor;
private List<HRegionInfo> regionsToRestore = null;
private List<HRegionInfo> regionsToRemove = null;
private List<HRegionInfo> regionsToAdd = null;
@@ -82,24 +82,24 @@ public class RestoreSnapshotProcedure
}
public RestoreSnapshotProcedure(final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor, final SnapshotDescription snapshot) {
- this(env, tableDescriptor, snapshot, false);
+ final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) {
+ this(env, hTableDescriptor, snapshot, false);
}
/**
* Constructor
* @param env MasterProcedureEnv
- * @param tableDescriptor the table to operate on
+ * @param hTableDescriptor the table to operate on
* @param snapshot snapshot to restore from
* @throws IOException
*/
public RestoreSnapshotProcedure(
final MasterProcedureEnv env,
- final TableDescriptor tableDescriptor,
+ final HTableDescriptor hTableDescriptor,
final SnapshotDescription snapshot,
final boolean restoreAcl) {
super(env);
// This is the new schema we are going to write out as this modification.
- this.modifiedTableDescriptor = tableDescriptor;
+ this.modifiedHTableDescriptor = hTableDescriptor;
// Snapshot information
this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
@@ -204,7 +204,7 @@ public class RestoreSnapshotProcedure
@Override
public TableName getTableName() {
- return modifiedTableDescriptor.getTableName();
+ return modifiedHTableDescriptor.getTableName();
}
@Override
@@ -236,7 +236,7 @@ public class RestoreSnapshotProcedure
MasterProcedureProtos.RestoreSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
- .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor));
+ .setModifiedTableSchema(ProtobufUtil.convertToTableSchema(modifiedHTableDescriptor));
if (regionsToRestore != null) {
for (HRegionInfo hri: regionsToRestore) {
@@ -278,8 +278,8 @@ public class RestoreSnapshotProcedure
MasterProcedureProtos.RestoreSnapshotStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo()));
snapshot = restoreSnapshotMsg.getSnapshot();
- modifiedTableDescriptor =
- ProtobufUtil.toTableDescriptor(restoreSnapshotMsg.getModifiedTableSchema());
+ modifiedHTableDescriptor =
+ ProtobufUtil.convertToHTableDesc(restoreSnapshotMsg.getModifiedTableSchema());
if (restoreSnapshotMsg.getRegionInfoForRestoreCount() == 0) {
regionsToRestore = null;
@@ -333,7 +333,7 @@ public class RestoreSnapshotProcedure
env.getMasterServices().checkTableModifiable(tableName);
// Check that we have at least 1 CF
- if (modifiedTableDescriptor.getColumnFamilyCount() == 0) {
+ if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
throw new DoNotRetryIOException("Table " + getTableName().toString() +
" should have at least one column family.");
}
@@ -363,7 +363,7 @@ public class RestoreSnapshotProcedure
* @throws IOException
**/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor);
}
/**
@@ -386,7 +386,7 @@ public class RestoreSnapshotProcedure
env.getMasterServices().getConfiguration(),
fs,
manifest,
- modifiedTableDescriptor,
+ modifiedHTableDescriptor,
rootDir,
monitorException,
getMonitorStatus());
@@ -440,19 +440,19 @@ public class RestoreSnapshotProcedure
MetaTableAccessor.addRegionsToMeta(
conn,
regionsToAdd,
- modifiedTableDescriptor.getRegionReplication());
+ modifiedHTableDescriptor.getRegionReplication());
}
if (regionsToRestore != null) {
MetaTableAccessor.overwriteRegions(
conn,
regionsToRestore,
- modifiedTableDescriptor.getRegionReplication());
+ modifiedHTableDescriptor.getRegionReplication());
}
RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
new RestoreSnapshotHelper.RestoreMetaChanges(
- modifiedTableDescriptor, parentsToChildrenPairMap);
+ modifiedHTableDescriptor, parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(conn, regionsToAdd);
// At this point the restore is complete.
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
index 506c67d..e7f5ead 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
@@ -28,11 +28,11 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.HBaseException;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -48,7 +48,7 @@ public class TruncateTableProcedure
private boolean preserveSplits;
private List<HRegionInfo> regions;
- private TableDescriptor tableDescriptor;
+ private HTableDescriptor hTableDescriptor;
private TableName tableName;
public TruncateTableProcedure() {
@@ -95,7 +95,7 @@ public class TruncateTableProcedure
setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META);
break;
case TRUNCATE_TABLE_REMOVE_FROM_META:
- tableDescriptor = env.getMasterServices().getTableDescriptors()
+ hTableDescriptor = env.getMasterServices().getTableDescriptors()
.get(tableName);
DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
DeleteTableProcedure.deleteAssignmentState(env, getTableName());
@@ -105,26 +105,26 @@ public class TruncateTableProcedure
DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
if (!preserveSplits) {
// if we are not preserving splits, generate a new single region
- regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(tableDescriptor, null));
+ regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null));
} else {
regions = recreateRegionInfo(regions);
}
setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT);
break;
case TRUNCATE_TABLE_CREATE_FS_LAYOUT:
- regions = CreateTableProcedure.createFsLayout(env, tableDescriptor, regions);
+ regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, regions);
CreateTableProcedure.updateTableDescCache(env, getTableName());
setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META);
break;
case TRUNCATE_TABLE_ADD_TO_META:
- regions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, regions);
+ regions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, regions);
setNextState(TruncateTableState.TRUNCATE_TABLE_ASSIGN_REGIONS);
break;
case TRUNCATE_TABLE_ASSIGN_REGIONS:
CreateTableProcedure.setEnablingState(env, getTableName());
addChildProcedure(env.getAssignmentManager().createAssignProcedures(regions));
setNextState(TruncateTableState.TRUNCATE_TABLE_POST_OPERATION);
- tableDescriptor = null;
+ hTableDescriptor = null;
regions = null;
break;
case TRUNCATE_TABLE_POST_OPERATION:
@@ -216,8 +216,8 @@ public class TruncateTableProcedure
MasterProcedureProtos.TruncateTableStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setPreserveSplits(preserveSplits);
- if (tableDescriptor != null) {
- state.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
+ if (hTableDescriptor != null) {
+ state.setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
} else {
state.setTableName(ProtobufUtil.toProtoTableName(tableName));
}
@@ -237,8 +237,8 @@ public class TruncateTableProcedure
MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo()));
if (state.hasTableSchema()) {
- tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema());
- tableName = tableDescriptor.getTableName();
+ hTableDescriptor = ProtobufUtil.convertToHTableDesc(state.getTableSchema());
+ tableName = hTableDescriptor.getTableName();
} else {
tableName = ProtobufUtil.toTableName(state.getTableName());
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index e8131af..0448f92 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -30,9 +30,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -137,16 +137,16 @@ public final class MasterSnapshotVerifier {
* @param manifest snapshot manifest to inspect
*/
private void verifyTableInfo(final SnapshotManifest manifest) throws IOException {
- TableDescriptor htd = manifest.getTableDescriptor();
+ HTableDescriptor htd = manifest.getTableDescriptor();
if (htd == null) {
throw new CorruptedSnapshotException("Missing Table Descriptor",
ProtobufUtil.createSnapshotDesc(snapshot));
}
- if (!htd.getTableName().getNameAsString().equals(snapshot.getTable())) {
+ if (!htd.getNameAsString().equals(snapshot.getTable())) {
throw new CorruptedSnapshotException(
"Invalid Table Descriptor. Expected " + snapshot.getTable() + " name, got "
- + htd.getTableName().getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot));
+ + htd.getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot));
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index b503d61..b81c7db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -38,13 +38,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -556,7 +555,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
cleanupSentinels();
// check to see if the table exists
- TableDescriptor desc = null;
+ HTableDescriptor desc = null;
try {
desc = master.getTableDescriptors().get(
TableName.valueOf(snapshot.getTable()));
@@ -680,10 +679,10 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @throws IOException
*/
private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
- final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
+ final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc,
final NonceKey nonceKey, final boolean restoreAcl) throws IOException {
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
- TableDescriptor htd = TableDescriptorBuilder.copy(tableName, snapshotTableDesc);
+ HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc);
if (cpHost != null) {
cpHost.preCloneSnapshot(reqSnapshot, htd);
}
@@ -708,14 +707,14 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* The operation will fail if the destination table has a snapshot or restore in progress.
*
* @param snapshot Snapshot Descriptor
- * @param tableDescriptor Table Descriptor of the table to create
+ * @param hTableDescriptor Table Descriptor of the table to create
* @param nonceKey unique identifier to prevent duplicated RPC
* @return procId the ID of the clone snapshot procedure
*/
synchronized long cloneSnapshot(final SnapshotDescription snapshot,
- final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
+ final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
throws HBaseSnapshotException {
- TableName tableName = tableDescriptor.getTableName();
+ TableName tableName = hTableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table
if (isTakingSnapshot(tableName)) {
@@ -730,7 +729,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(
new CloneSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
- tableDescriptor, snapshot, restoreAcl),
+ hTableDescriptor, snapshot, restoreAcl),
nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;
@@ -766,7 +765,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs,
snapshotDir, snapshot);
- TableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
+ HTableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
TableName tableName = TableName.valueOf(reqSnapshot.getTable());
// stop tracking "abandoned" handlers
@@ -800,7 +799,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @throws IOException
*/
private long restoreSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
- final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
+ final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc,
final NonceKey nonceKey, final boolean restoreAcl) throws IOException {
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
@@ -837,15 +836,15 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* Restore the specified snapshot. The restore will fail if the destination table has a snapshot
* or restore in progress.
* @param snapshot Snapshot Descriptor
- * @param tableDescriptor Table Descriptor
+ * @param hTableDescriptor Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
* @param restoreAcl true to restore acl of snapshot
* @return procId the ID of the restore snapshot procedure
*/
private synchronized long restoreSnapshot(final SnapshotDescription snapshot,
- final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
+ final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
throws HBaseSnapshotException {
- final TableName tableName = tableDescriptor.getTableName();
+ final TableName tableName = hTableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table
if (isTakingSnapshot(tableName)) {
@@ -860,7 +859,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(
new RestoreSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
- tableDescriptor, snapshot, restoreAcl),
+ hTableDescriptor, snapshot, restoreAcl),
nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;
[8/8] hbase git commit: HBASE-18503 Change ***Util and Master to use
TableDescriptor and ColumnFamilyDescriptor
Posted by ch...@apache.org.
HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25ff9d0b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25ff9d0b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25ff9d0b
Branch: refs/heads/master
Commit: 25ff9d0bbf36a68cdac99035c8d5ab1eb889ceb9
Parents: 12f2b02
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Thu Aug 24 13:03:38 2017 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Thu Aug 24 13:03:38 2017 +0800
----------------------------------------------------------------------
.../hadoop/hbase/backup/util/BackupUtils.java | 4 +-
.../hadoop/hbase/backup/util/RestoreTool.java | 48 ++--
.../apache/hadoop/hbase/HColumnDescriptor.java | 11 +-
.../apache/hadoop/hbase/HTableDescriptor.java | 11 +-
.../client/ColumnFamilyDescriptorBuilder.java | 13 +-
.../apache/hadoop/hbase/client/HBaseAdmin.java | 41 +--
.../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 2 +-
.../hbase/client/TableDescriptorBuilder.java | 20 +-
.../hbase/shaded/protobuf/ProtobufUtil.java | 101 +-------
.../hbase/shaded/protobuf/RequestConverter.java | 18 +-
.../apache/hadoop/hbase/TableDescriptors.java | 15 +-
.../hbase/client/ClientSideRegionScanner.java | 3 +-
.../hbase/client/TableSnapshotScanner.java | 3 +-
.../mapreduce/TableSnapshotInputFormatImpl.java | 18 +-
.../hadoop/hbase/master/CatalogJanitor.java | 13 +-
.../master/ExpiredMobFileCleanerChore.java | 10 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 86 +++----
.../hadoop/hbase/master/MasterFileSystem.java | 24 +-
.../hbase/master/MasterMobCompactionThread.java | 10 +-
.../hadoop/hbase/master/MasterRpcServices.java | 25 +-
.../hadoop/hbase/master/MasterServices.java | 15 +-
.../hadoop/hbase/master/MobCompactionChore.java | 10 +-
.../hadoop/hbase/master/TableStateManager.java | 6 +-
.../assignment/MergeTableRegionsProcedure.java | 10 +-
.../master/assignment/RegionStateStore.java | 12 +-
.../master/balancer/RegionLocationFinder.java | 12 +-
.../master/cleaner/ReplicationMetaCleaner.java | 10 +-
.../procedure/AddColumnFamilyProcedure.java | 50 ++--
.../procedure/CloneSnapshotProcedure.java | 51 ++--
.../master/procedure/CreateTableProcedure.java | 66 ++---
.../procedure/DeleteColumnFamilyProcedure.java | 37 +--
.../procedure/ModifyColumnFamilyProcedure.java | 43 ++--
.../master/procedure/ModifyTableProcedure.java | 75 +++---
.../procedure/RestoreSnapshotProcedure.java | 34 +--
.../procedure/TruncateTableProcedure.java | 22 +-
.../master/snapshot/MasterSnapshotVerifier.java | 8 +-
.../hbase/master/snapshot/SnapshotManager.java | 29 ++-
.../master/snapshot/TakeSnapshotHandler.java | 10 +-
.../hadoop/hbase/mob/ExpiredMobFileCleaner.java | 10 +-
.../org/apache/hadoop/hbase/mob/MobUtils.java | 27 +-
.../hbase/mob/compactions/MobCompactor.java | 6 +-
.../compactions/PartitionedMobCompactor.java | 4 +-
.../hbase/regionserver/CompactionTool.java | 16 +-
.../hbase/regionserver/HRegionFileSystem.java | 8 +-
.../hbase/regionserver/HRegionServer.java | 8 +-
.../hbase/regionserver/RSRpcServices.java | 8 +-
.../regionserver/handler/OpenMetaHandler.java | 6 +-
.../handler/OpenPriorityRegionHandler.java | 5 +-
.../regionserver/handler/OpenRegionHandler.java | 10 +-
.../RegionReplicaReplicationEndpoint.java | 4 +-
.../hbase/snapshot/RestoreSnapshotHelper.java | 14 +-
.../hadoop/hbase/snapshot/SnapshotManifest.java | 18 +-
.../hadoop/hbase/util/FSTableDescriptors.java | 258 ++++++++++---------
.../org/apache/hadoop/hbase/util/HBaseFsck.java | 68 ++---
.../hadoop/hbase/util/HBaseFsckRepair.java | 4 +-
.../hadoop/hbase/util/ModifyRegionUtils.java | 33 ++-
.../hadoop/hbase/HBaseTestingUtility.java | 13 +-
.../TestFSTableDescriptorForceCreation.java | 13 +-
.../TestHColumnDescriptorDefaultVersions.java | 12 +-
.../hbase/client/TestAsyncTableAdminApi.java | 5 +-
.../hbase/master/MockNoopMasterServices.java | 13 +-
.../master/assignment/MockMasterServices.java | 24 +-
.../MasterProcedureTestingUtility.java | 40 +--
.../procedure/TestCreateTableProcedure.java | 33 ++-
.../TestMasterFailoverWithProcedures.java | 4 +-
.../procedure/TestMasterProcedureWalLease.java | 4 +-
...stTableDescriptorModificationFromClient.java | 7 +-
.../TestPartitionedMobCompactor.java | 3 +-
.../regionserver/TestGetClosestAtOrBefore.java | 6 +-
.../TestRegionMergeTransactionOnCluster.java | 14 +-
.../regionserver/TestRegionServerNoMaster.java | 4 +-
.../hbase/security/access/SecureTestUtil.java | 21 +-
.../hbase/snapshot/MobSnapshotTestingUtils.java | 65 ++---
.../hbase/snapshot/SnapshotTestingUtils.java | 50 ++--
.../snapshot/TestRestoreSnapshotHelper.java | 16 +-
.../hbase/snapshot/TestSnapshotManifest.java | 8 +-
.../hbase/util/TestFSTableDescriptors.java | 81 +++---
77 files changed, 942 insertions(+), 977 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
index ce77645..11a1a3d 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -56,6 +55,7 @@ import org.apache.hadoop.hbase.backup.impl.BackupManifest;
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -139,7 +139,7 @@ public final class BackupUtils {
LOG.warn("Table " + table + " does not exists, skipping it.");
continue;
}
- HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
+ TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
// write a copy of descriptor to the target directory
Path target = new Path(backupInfo.getTableBackupDir(table));
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index 0cfe099..2e311cf 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -33,16 +33,17 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
import org.apache.hadoop.hbase.backup.HBackupFileSystem;
import org.apache.hadoop.hbase.backup.RestoreJob;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
@@ -122,10 +123,10 @@ public class RestoreTool {
}
- void modifyTableSync(Connection conn, HTableDescriptor desc) throws IOException {
+ void modifyTableSync(Connection conn, TableDescriptor desc) throws IOException {
try (Admin admin = conn.getAdmin();) {
- admin.modifyTable(desc.getTableName(), desc);
+ admin.modifyTable(desc);
int attempt = 0;
int maxAttempts = 600;
while (!admin.isTableAvailable(desc.getTableName())) {
@@ -172,29 +173,30 @@ public class RestoreTool {
// adjust table schema
for (int i = 0; i < tableNames.length; i++) {
TableName tableName = tableNames[i];
- HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
+ TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
TableName newTableName = newTableNames[i];
- HTableDescriptor newTableDescriptor = new HTableDescriptor(admin.getTableDescriptor(newTableName));
- List<HColumnDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
- List<HColumnDescriptor> existingFamilies =
+ TableDescriptor newTableDescriptor = admin.listTableDescriptor(newTableName);
+ List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
+ List<ColumnFamilyDescriptor> existingFamilies =
Arrays.asList(newTableDescriptor.getColumnFamilies());
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
boolean schemaChangeNeeded = false;
- for (HColumnDescriptor family : families) {
+ for (ColumnFamilyDescriptor family : families) {
if (!existingFamilies.contains(family)) {
- newTableDescriptor.addFamily(family);
+ builder.addColumnFamily(family);
schemaChangeNeeded = true;
}
}
- for (HColumnDescriptor family : existingFamilies) {
+ for (ColumnFamilyDescriptor family : existingFamilies) {
if (!families.contains(family)) {
- newTableDescriptor.removeFamily(family.getName());
+ builder.removeColumnFamily(family.getName());
schemaChangeNeeded = true;
}
}
if (schemaChangeNeeded) {
- modifyTableSync(conn, newTableDescriptor);
+ modifyTableSync(conn, builder.build());
LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
}
}
@@ -253,24 +255,24 @@ public class RestoreTool {
/**
* Get table descriptor
* @param tableName is the table backed up
- * @return {@link HTableDescriptor} saved in backup image of the table
+ * @return {@link TableDescriptor} saved in backup image of the table
*/
- HTableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
+ TableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
Path tableInfoPath = this.getTableInfoPath(tableName);
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
- HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
+ TableDescriptor tableDescriptor = manifest.getTableDescriptor();
if (!tableDescriptor.getTableName().equals(tableName)) {
LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
+ tableInfoPath.toString());
- LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
+ LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName
+ " under tableInfoPath: " + tableInfoPath.toString());
}
return tableDescriptor;
}
- private HTableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
+ private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
String lastIncrBackupId) throws IOException {
if (lastIncrBackupId != null) {
String target =
@@ -289,7 +291,7 @@ public class RestoreTool {
FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
// get table descriptor first
- HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
+ TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
if (tableDescriptor != null) {
LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
}
@@ -325,7 +327,7 @@ public class RestoreTool {
LOG.debug("find table descriptor but no archive dir for table " + tableName
+ ", will only create table");
}
- tableDescriptor = new HTableDescriptor(newTableName, tableDescriptor);
+ tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor,
truncateIfExists);
return;
@@ -336,9 +338,9 @@ public class RestoreTool {
}
if (tableDescriptor == null) {
- tableDescriptor = new HTableDescriptor(newTableName);
+ tableDescriptor = TableDescriptorBuilder.newBuilder(newTableName).build();
} else {
- tableDescriptor = new HTableDescriptor(newTableName, tableDescriptor);
+ tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
}
// record all region dirs:
@@ -470,7 +472,7 @@ public class RestoreTool {
* @throws IOException exception
*/
private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName,
- TableName targetTableName, ArrayList<Path> regionDirList, HTableDescriptor htd,
+ TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd,
boolean truncateIfExists) throws IOException {
try (Admin admin = conn.getAdmin();) {
boolean createNew = false;
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 507bf49..8802553 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -639,13 +639,10 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
if (this == obj) {
return true;
}
- if (obj == null) {
- return false;
+ if (obj instanceof HColumnDescriptor) {
+ return delegatee.equals(((HColumnDescriptor) obj).delegatee);
}
- if (!(obj instanceof HColumnDescriptor)) {
- return false;
- }
- return compareTo((HColumnDescriptor)obj) == 0;
+ return false;
}
/**
@@ -658,7 +655,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
@Override
public int compareTo(HColumnDescriptor other) {
- return delegatee.compareTo(other.delegatee);
+ return COMPARATOR.compare(this, other);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index a0f23c1..86ba287 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -495,13 +495,10 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
if (this == obj) {
return true;
}
- if (obj == null) {
- return false;
+ if (obj instanceof HTableDescriptor) {
+ return delegatee.equals(((HTableDescriptor) obj).delegatee);
}
- if (!(obj instanceof HTableDescriptor)) {
- return false;
- }
- return compareTo((HTableDescriptor)obj) == 0;
+ return false;
}
/**
@@ -523,7 +520,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
*/
@Override
public int compareTo(final HTableDescriptor other) {
- return delegatee.compareTo(other.delegatee);
+ return TableDescriptor.COMPARATOR.compare(this, other);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index b3abaca..67d2c56 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -1160,13 +1160,10 @@ public class ColumnFamilyDescriptorBuilder {
if (this == obj) {
return true;
}
- if (obj == null) {
- return false;
+ if (obj instanceof ModifyableColumnFamilyDescriptor) {
+ return ColumnFamilyDescriptor.COMPARATOR.compare(this, (ModifyableColumnFamilyDescriptor) obj) == 0;
}
- if (!(obj instanceof ModifyableColumnFamilyDescriptor)) {
- return false;
- }
- return compareTo((ModifyableColumnFamilyDescriptor) obj) == 0;
+ return false;
}
@Override
@@ -1188,7 +1185,7 @@ public class ColumnFamilyDescriptorBuilder {
* @see #parseFrom(byte[])
*/
private byte[] toByteArray() {
- return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToColumnFamilySchema(this)
+ return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this)
.toByteArray());
}
@@ -1213,7 +1210,7 @@ public class ColumnFamilyDescriptorBuilder {
} catch (IOException e) {
throw new DeserializationException(e);
}
- return ProtobufUtil.convertToColumnDesc(cfs);
+ return ProtobufUtil.toColumnFamilyDescriptor(cfs);
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 11f3273..a2fa7e0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -378,7 +378,7 @@ public class HBaseAdmin implements Admin {
.setNamespaceName(Bytes.toString(name)).build())
.getTableSchemaList()
.stream()
- .map(ProtobufUtil::convertToTableDesc)
+ .map(ProtobufUtil::toTableDescriptor)
.collect(Collectors.toList());
}
});
@@ -459,8 +459,8 @@ public class HBaseAdmin implements Admin {
protected HTableDescriptor[] rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
- return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(getRpcController(),
- req));
+ return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
+ req)).stream().map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new);
}
});
}
@@ -525,7 +525,7 @@ public class HBaseAdmin implements Admin {
RequestConverter.buildGetTableDescriptorsRequest(tableName);
GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
if (!htds.getTableSchemaList().isEmpty()) {
- return ProtobufUtil.convertToTableDesc(htds.getTableSchemaList().get(0));
+ return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
}
return null;
}
@@ -554,7 +554,7 @@ public class HBaseAdmin implements Admin {
RequestConverter.buildGetTableDescriptorsRequest(tableName);
GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
if (!htds.getTableSchemaList().isEmpty()) {
- return ProtobufUtil.convertToHTableDesc(htds.getTableSchemaList().get(0));
+ return new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)));
}
return null;
}
@@ -2300,7 +2300,7 @@ public class HBaseAdmin implements Admin {
.build()).getTableSchemaList();
HTableDescriptor[] res = new HTableDescriptor[list.size()];
for(int i=0; i < list.size(); i++) {
- res[i] = new ImmutableHTableDescriptor(ProtobufUtil.convertToHTableDesc(list.get(i)));
+ res[i] = new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(list.get(i)));
}
return res;
}
@@ -2419,33 +2419,14 @@ public class HBaseAdmin implements Admin {
protected HTableDescriptor[] rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
- return ProtobufUtil.
- getHTableDescriptorArray(master.getTableDescriptors(getRpcController(), req));
+ return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), req))
+ .stream()
+ .map(ImmutableHTableDescriptor::new)
+ .toArray(HTableDescriptor[]::new);
}
});
}
- /**
- * Get tableDescriptor
- * @param tableName one table name
- * @return HTD the HTableDescriptor or null if the table not exists
- * @throws IOException if a remote or network exception occurs
- */
- private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
- throws IOException {
- List<TableName> tableNames = new ArrayList<>(1);
- tableNames.add(tableName);
-
- HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
-
- if (htdl == null || htdl.length == 0) {
- return null;
- }
- else {
- return htdl[0];
- }
- }
-
@Override
public HTableDescriptor[] getTableDescriptors(List<String> names)
throws IOException {
@@ -3709,7 +3690,7 @@ public class HBaseAdmin implements Admin {
* @return the table descriptor
*/
protected TableDescriptor getTableDescriptor() throws IOException {
- return getAdmin().getTableDescriptorByTableName(getTableName());
+ return getAdmin().listTableDescriptor(getTableName());
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index ba68a96..19bc2f4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -453,7 +453,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
return;
}
if (!tableSchemas.isEmpty()) {
- future.complete(ProtobufUtil.convertToTableDesc(tableSchemas.get(0)));
+ future.complete(ProtobufUtil.toTableDescriptor(tableSchemas.get(0)));
} else {
future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index 44d5c99..a710077 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -38,6 +38,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -252,10 +253,14 @@ public class TableDescriptorBuilder {
return new TableDescriptorBuilder(name);
}
- public static TableDescriptor copy(TableDescriptor desc) throws DeserializationException {
+ public static TableDescriptor copy(TableDescriptor desc) {
return new ModifyableTableDescriptor(desc);
}
+ public static TableDescriptor copy(TableName name, TableDescriptor desc) {
+ return new ModifyableTableDescriptor(name, desc);
+ }
+
/**
* Copy all configuration, values, families, and name from the input.
* @param desc The desciptor to copy
@@ -1012,13 +1017,10 @@ public class TableDescriptorBuilder {
if (this == obj) {
return true;
}
- if (obj == null) {
- return false;
+ if (obj instanceof ModifyableTableDescriptor) {
+ return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0;
}
- if (!(obj instanceof ModifyableTableDescriptor)) {
- return false;
- }
- return compareTo((ModifyableTableDescriptor) obj) == 0;
+ return false;
}
/**
@@ -1395,7 +1397,7 @@ public class TableDescriptorBuilder {
* @return the bytes in pb format
*/
private byte[] toByteArray() {
- return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToTableSchema(this).toByteArray());
+ return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray());
}
/**
@@ -1415,7 +1417,7 @@ public class TableDescriptorBuilder {
HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder();
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
- return ProtobufUtil.convertToTableDesc(builder.build());
+ return ProtobufUtil.toTableDescriptor(builder.build());
} catch (IOException e) {
throw new DeserializationException(e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index a527883..abcc5e2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -17,15 +17,12 @@
*/
package org.apache.hadoop.hbase.shaded.protobuf;
-import java.awt.image.BandCombineOp;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
-import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
-import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
@@ -50,10 +47,8 @@ import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
@@ -74,7 +69,6 @@ import org.apache.hadoop.hbase.client.Cursor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
@@ -102,7 +96,6 @@ import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
import org.apache.hadoop.hbase.quotas.ThrottleType;
import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
-import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.visibility.Authorizations;
import org.apache.hadoop.hbase.security.visibility.CellVisibility;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
@@ -182,7 +175,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.DynamicClassLoader;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
import org.apache.hadoop.hbase.util.Methods;
@@ -425,24 +417,6 @@ public final class ProtobufUtil {
}
/**
- * Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf
- *
- * @param proto the GetTableDescriptorsResponse
- * @return a immutable HTableDescriptor array
- * @deprecated Use {@link #toTableDescriptorList} after removing the HTableDescriptor
- */
- @Deprecated
- public static HTableDescriptor[] getHTableDescriptorArray(GetTableDescriptorsResponse proto) {
- if (proto == null) return null;
-
- HTableDescriptor[] ret = new HTableDescriptor[proto.getTableSchemaCount()];
- for (int i = 0; i < proto.getTableSchemaCount(); ++i) {
- ret[i] = new ImmutableHTableDescriptor(convertToHTableDesc(proto.getTableSchema(i)));
- }
- return ret;
- }
-
- /**
* Get a list of TableDescriptor from GetTableDescriptorsResponse protobuf
*
* @param proto the GetTableDescriptorsResponse
@@ -450,7 +424,7 @@ public final class ProtobufUtil {
*/
public static List<TableDescriptor> toTableDescriptorList(GetTableDescriptorsResponse proto) {
if (proto == null) return new ArrayList<>();
- return proto.getTableSchemaList().stream().map(ProtobufUtil::convertToTableDesc)
+ return proto.getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor)
.collect(Collectors.toList());
}
@@ -2841,11 +2815,11 @@ public final class ProtobufUtil {
}
/**
- * Converts an HColumnDescriptor to ColumnFamilySchema
- * @param hcd the HColummnDescriptor
+ * Converts an ColumnFamilyDescriptor to ColumnFamilySchema
+ * @param hcd the ColumnFamilySchema
* @return Convert this instance to a the pb column family type
*/
- public static ColumnFamilySchema convertToColumnFamilySchema(ColumnFamilyDescriptor hcd) {
+ public static ColumnFamilySchema toColumnFamilySchema(ColumnFamilyDescriptor hcd) {
ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
builder.setName(UnsafeByteOperations.unsafeWrap(hcd.getName()));
for (Map.Entry<Bytes, Bytes> e : hcd.getValues().entrySet()) {
@@ -2864,31 +2838,11 @@ public final class ProtobufUtil {
}
/**
- * Converts a ColumnFamilySchema to HColumnDescriptor
- * @param cfs the ColumnFamilySchema
- * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
- */
- @Deprecated
- public static HColumnDescriptor convertToHColumnDesc(final ColumnFamilySchema cfs) {
- // Use the empty constructor so we preserve the initial values set on construction for things
- // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for
- // unrelated-looking test failures that are hard to trace back to here.
- HColumnDescriptor hcd = new HColumnDescriptor(cfs.getName().toByteArray());
- for (BytesBytesPair a: cfs.getAttributesList()) {
- hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
- }
- for (NameStringPair a: cfs.getConfigurationList()) {
- hcd.setConfiguration(a.getName(), a.getValue());
- }
- return hcd;
- }
-
- /**
- * Converts a ColumnFamilySchema to HColumnDescriptor
+ * Converts a ColumnFamilySchema to ColumnFamilyDescriptor
* @param cfs the ColumnFamilySchema
- * @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
+ * @return An {@link ColumnFamilyDescriptor} made from the passed in <code>cfs</code>
*/
- public static ColumnFamilyDescriptor convertToColumnDesc(final ColumnFamilySchema cfs) {
+ public static ColumnFamilyDescriptor toColumnFamilyDescriptor(final ColumnFamilySchema cfs) {
// Use the empty constructor so we preserve the initial values set on construction for things
// like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for
// unrelated-looking test failures that are hard to trace back to here.
@@ -2900,11 +2854,11 @@ public final class ProtobufUtil {
}
/**
- * Converts an HTableDescriptor to TableSchema
- * @param htd the HTableDescriptor
- * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
+ * Converts an TableDescriptor to TableSchema
+ * @param htd the TableDescriptor
+ * @return Convert the current {@link TableDescriptor} into a pb TableSchema instance.
*/
- public static TableSchema convertToTableSchema(TableDescriptor htd) {
+ public static TableSchema toTableSchema(TableDescriptor htd) {
TableSchema.Builder builder = TableSchema.newBuilder();
builder.setTableName(toProtoTableName(htd.getTableName()));
for (Map.Entry<Bytes, Bytes> e : htd.getValues().entrySet()) {
@@ -2914,7 +2868,7 @@ public final class ProtobufUtil {
builder.addAttributes(aBuilder.build());
}
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
- builder.addColumnFamilies(convertToColumnFamilySchema(hcd));
+ builder.addColumnFamilies(toColumnFamilySchema(hcd));
}
for (Map.Entry<String, String> e : htd.getConfiguration().entrySet()) {
NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
@@ -2926,43 +2880,16 @@ public final class ProtobufUtil {
}
/**
- * Converts a TableSchema to HTableDescriptor
- * @param ts A pb TableSchema instance.
- * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
- * @deprecated Use {@link #convertToTableDesc} after removing the HTableDescriptor
- */
- @Deprecated
- public static HTableDescriptor convertToHTableDesc(final TableSchema ts) {
- List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
- HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
- int index = 0;
- for (ColumnFamilySchema cfs: list) {
- hcds[index++] = ProtobufUtil.convertToHColumnDesc(cfs);
- }
- HTableDescriptor htd = new HTableDescriptor(ProtobufUtil.toTableName(ts.getTableName()));
- for (HColumnDescriptor hcd : hcds) {
- htd.addFamily(hcd);
- }
- for (BytesBytesPair a: ts.getAttributesList()) {
- htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
- }
- for (NameStringPair a: ts.getConfigurationList()) {
- htd.setConfiguration(a.getName(), a.getValue());
- }
- return htd;
- }
-
- /**
* Converts a TableSchema to TableDescriptor
* @param ts A pb TableSchema instance.
* @return An {@link TableDescriptor} made from the passed in pb <code>ts</code>.
*/
- public static TableDescriptor convertToTableDesc(final TableSchema ts) {
+ public static TableDescriptor toTableDescriptor(final TableSchema ts) {
TableDescriptorBuilder builder
= TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName()));
ts.getColumnFamiliesList()
.stream()
- .map(ProtobufUtil::convertToColumnDesc)
+ .map(ProtobufUtil::toColumnFamilyDescriptor)
.forEach(builder::addColumnFamily);
ts.getAttributesList()
.forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray()));
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 08ed3dc..a8a56c7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -1080,7 +1080,7 @@ public final class RequestConverter {
final long nonce) {
AddColumnRequest.Builder builder = AddColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
- builder.setColumnFamilies(ProtobufUtil.convertToColumnFamilySchema(column));
+ builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
@@ -1120,7 +1120,7 @@ public final class RequestConverter {
final long nonce) {
ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
- builder.setColumnFamilies(ProtobufUtil.convertToColumnFamilySchema(column));
+ builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
@@ -1306,28 +1306,28 @@ public final class RequestConverter {
/**
* Creates a protocol buffer CreateTableRequest
*
- * @param hTableDesc
+ * @param tableDescriptor
* @param splitKeys
* @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(
- final TableDescriptor hTableDesc,
+ final TableDescriptor tableDescriptor,
final byte [][] splitKeys,
final long nonceGroup,
final long nonce) {
- return buildCreateTableRequest(hTableDesc, Optional.ofNullable(splitKeys), nonceGroup, nonce);
+ return buildCreateTableRequest(tableDescriptor, Optional.ofNullable(splitKeys), nonceGroup, nonce);
}
/**
* Creates a protocol buffer CreateTableRequest
- * @param hTableDesc
+ * @param tableDescriptor
* @param splitKeys
* @return a CreateTableRequest
*/
- public static CreateTableRequest buildCreateTableRequest(TableDescriptor hTableDesc,
+ public static CreateTableRequest buildCreateTableRequest(TableDescriptor tableDescriptor,
Optional<byte[][]> splitKeys, long nonceGroup, long nonce) {
CreateTableRequest.Builder builder = CreateTableRequest.newBuilder();
- builder.setTableSchema(ProtobufUtil.convertToTableSchema(hTableDesc));
+ builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
splitKeys.ifPresent(keys -> Arrays.stream(keys).forEach(
key -> builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key))));
builder.setNonceGroup(nonceGroup);
@@ -1349,7 +1349,7 @@ public final class RequestConverter {
final long nonce) {
ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
- builder.setTableSchema(ProtobufUtil.convertToTableSchema(tableDesc));
+ builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index 7de2629..58b28e4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
/**
* Get, remove and modify table descriptors.
@@ -33,7 +34,7 @@ public interface TableDescriptors {
* @return TableDescriptor for tablename
* @throws IOException
*/
- HTableDescriptor get(final TableName tableName)
+ TableDescriptor get(final TableName tableName)
throws IOException;
/**
@@ -41,16 +42,16 @@ public interface TableDescriptors {
* @return Map of all descriptors.
* @throws IOException
*/
- Map<String, HTableDescriptor> getByNamespace(String name)
+ Map<String, TableDescriptor> getByNamespace(String name)
throws IOException;
/**
- * Get Map of all HTableDescriptors. Populates the descriptor cache as a
+ * Get Map of all TableDescriptors. Populates the descriptor cache as a
* side effect.
* @return Map of all descriptors.
* @throws IOException
*/
- Map<String, HTableDescriptor> getAll()
+ Map<String, TableDescriptor> getAll()
throws IOException;
/**
@@ -59,7 +60,7 @@ public interface TableDescriptors {
* @return Map of all descriptors.
* @throws IOException
*/
- Map<String, HTableDescriptor> getAllDescriptors()
+ Map<String, TableDescriptor> getAllDescriptors()
throws IOException;
/**
@@ -67,7 +68,7 @@ public interface TableDescriptors {
* @param htd Descriptor to set into TableDescriptors
* @throws IOException
*/
- void add(final HTableDescriptor htd)
+ void add(final TableDescriptor htd)
throws IOException;
/**
@@ -75,7 +76,7 @@ public interface TableDescriptors {
* @return Instance of table descriptor or null if none found.
* @throws IOException
*/
- HTableDescriptor remove(final TableName tablename)
+ TableDescriptor remove(final TableName tablename)
throws IOException;
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index 7ae0537..1d0d57b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -50,7 +49,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
List<Cell> values;
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
- Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
+ Path rootDir, TableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
throws IOException {
// region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
index b861969..bcd433c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -75,7 +74,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
private Path restoreDir;
private Scan scan;
private ArrayList<HRegionInfo> regions;
- private HTableDescriptor htd;
+ private TableDescriptor htd;
private ClientSideRegionScanner currentRegionScanner = null;
private int currentRegion = -1;
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index 2f6955e..bf11473 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.mapreduce;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -28,7 +29,6 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HDFSBlocksDistribution.HostAndWeight;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.ClientSideRegionScanner;
@@ -81,7 +81,7 @@ public class TableSnapshotInputFormatImpl {
*/
public static class InputSplit implements Writable {
- private HTableDescriptor htd;
+ private TableDescriptor htd;
private HRegionInfo regionInfo;
private String[] locations;
private String scan;
@@ -90,7 +90,7 @@ public class TableSnapshotInputFormatImpl {
// constructor for mapreduce framework / Writable
public InputSplit() {}
- public InputSplit(HTableDescriptor htd, HRegionInfo regionInfo, List<String> locations,
+ public InputSplit(TableDescriptor htd, HRegionInfo regionInfo, List<String> locations,
Scan scan, Path restoreDir) {
this.htd = htd;
this.regionInfo = regionInfo;
@@ -108,7 +108,7 @@ public class TableSnapshotInputFormatImpl {
this.restoreDir = restoreDir.toString();
}
- public HTableDescriptor getHtd() {
+ public TableDescriptor getHtd() {
return htd;
}
@@ -129,7 +129,7 @@ public class TableSnapshotInputFormatImpl {
return locations;
}
- public HTableDescriptor getTableDescriptor() {
+ public TableDescriptor getTableDescriptor() {
return htd;
}
@@ -142,7 +142,7 @@ public class TableSnapshotInputFormatImpl {
@Override
public void write(DataOutput out) throws IOException {
TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder()
- .setTable(ProtobufUtil.convertToTableSchema(htd))
+ .setTable(ProtobufUtil.toTableSchema(htd))
.setRegion(HRegionInfo.convert(regionInfo));
for (String location : locations) {
@@ -169,7 +169,7 @@ public class TableSnapshotInputFormatImpl {
byte[] buf = new byte[len];
in.readFully(buf);
TableSnapshotRegionSplit split = TableSnapshotRegionSplit.PARSER.parseFrom(buf);
- this.htd = ProtobufUtil.convertToHTableDesc(split.getTable());
+ this.htd = ProtobufUtil.toTableDescriptor(split.getTable());
this.regionInfo = HRegionInfo.convert(split.getRegion());
List<String> locationsList = split.getLocationsList();
this.locations = locationsList.toArray(new String[locationsList.size()]);
@@ -196,7 +196,7 @@ public class TableSnapshotInputFormatImpl {
public void initialize(InputSplit split, Configuration conf) throws IOException {
this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
this.split = split;
- HTableDescriptor htd = split.htd;
+ TableDescriptor htd = split.htd;
HRegionInfo hri = this.split.getRegionInfo();
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
@@ -311,7 +311,7 @@ public class TableSnapshotInputFormatImpl {
public static List<InputSplit> getSplits(Scan scan, SnapshotManifest manifest,
List<HRegionInfo> regionManifests, Path restoreDir, Configuration conf) throws IOException {
// load table descriptor
- HTableDescriptor htd = manifest.getTableDescriptor();
+ TableDescriptor htd = manifest.getTableDescriptor();
Path tableDir = FSUtils.getTableDir(restoreDir, htd.getTableName());
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 8daa7db..bcda145 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -31,21 +31,20 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.assignment.GCMergedRegionsProcedure;
import org.apache.hadoop.hbase.master.assignment.GCRegionProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.util.Bytes;
@@ -206,7 +205,7 @@ public class CatalogJanitor extends ScheduledChore {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
- HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
+ TableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
@@ -414,12 +413,12 @@ public class CatalogJanitor extends ScheduledChore {
}
boolean references = false;
- HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
+ TableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
this.services.getConfiguration(), fs, tabledir, daughter, true);
- for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
+ for (ColumnFamilyDescriptor family: parentDescriptor.getColumnFamilies()) {
if ((references = regionFs.hasReferences(family.getNameAsString()))) {
break;
}
@@ -432,7 +431,7 @@ public class CatalogJanitor extends ScheduledChore {
return new Pair<>(Boolean.TRUE, Boolean.valueOf(references));
}
- private HTableDescriptor getTableDescriptor(final TableName tableName)
+ private TableDescriptor getTableDescriptor(final TableName tableName)
throws FileNotFoundException, IOException {
return this.services.getTableDescriptors().get(tableName);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
index faa4f0e..c4438bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
@@ -23,11 +23,11 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner;
@@ -61,9 +61,9 @@ public class ExpiredMobFileCleanerChore extends ScheduledChore {
protected void chore() {
try {
TableDescriptors htds = master.getTableDescriptors();
- Map<String, HTableDescriptor> map = htds.getAll();
- for (HTableDescriptor htd : map.values()) {
- for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+ Map<String, TableDescriptor> map = htds.getAll();
+ for (TableDescriptor htd : map.values()) {
+ for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) {
// clean only for mob-enabled column.
// obtain a read table lock before cleaning, synchronize with MobFileCompactionChore.
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 6b4d4e9..93624de 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -41,6 +41,7 @@ import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Function;
import java.util.regex.Pattern;
import javax.servlet.ServletException;
@@ -60,10 +61,8 @@ import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -77,9 +76,12 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.coprocessor.BypassCoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -590,11 +592,9 @@ public class HMaster extends HRegionServer implements MasterServices {
return connector.getLocalPort();
}
- @Override
- protected TableDescriptors getFsTableDescriptors() throws IOException {
- return super.getFsTableDescriptors();
+ protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
+ return builder -> builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
}
-
/**
* For compatibility, if failed with regionserver credentials, try the master one
*/
@@ -761,9 +761,7 @@ public class HMaster extends HRegionServer implements MasterServices {
// enable table descriptors cache
this.tableDescriptors.setCacheOn();
- // set the META's descriptor to the correct replication
- this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
- conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
+
// warm-up HTDs cache on master initialization
if (preLoadTableDescriptors) {
status.setStatus("Pre-loading table descriptors");
@@ -1501,7 +1499,7 @@ public class HMaster extends HRegionServer implements MasterServices {
return false;
}
- HTableDescriptor tblDesc = getTableDescriptors().get(table);
+ TableDescriptor tblDesc = getTableDescriptors().get(table);
if (table.isSystemTable() || (tblDesc != null &&
!tblDesc.isNormalizationEnabled())) {
LOG.debug("Skipping normalization for table: " + table + ", as it's either system"
@@ -1712,34 +1710,34 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public long createTable(
- final HTableDescriptor hTableDescriptor,
+ final TableDescriptor tableDescriptor,
final byte [][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException {
checkInitialized();
- String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
+ String namespace = tableDescriptor.getTableName().getNamespaceAsString();
this.clusterSchemaService.getNamespace(namespace);
- HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
- sanityCheckTableDescriptor(hTableDescriptor);
+ HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(tableDescriptor, splitKeys);
+ sanityCheckTableDescriptor(tableDescriptor);
return MasterProcedureUtil.submitProcedure(
new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
@Override
protected void run() throws IOException {
- getMaster().getMasterCoprocessorHost().preCreateTable(hTableDescriptor, newRegions);
+ getMaster().getMasterCoprocessorHost().preCreateTable(tableDescriptor, newRegions);
- LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
+ LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
// TODO: We can handle/merge duplicate requests, and differentiate the case of
// TableExistsException by saying if the schema is the same or not.
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
submitProcedure(new CreateTableProcedure(
- procedureExecutor.getEnvironment(), hTableDescriptor, newRegions, latch));
+ procedureExecutor.getEnvironment(), tableDescriptor, newRegions, latch));
latch.await();
- getMaster().getMasterCoprocessorHost().postCreateTable(hTableDescriptor, newRegions);
+ getMaster().getMasterCoprocessorHost().postCreateTable(tableDescriptor, newRegions);
}
@Override
@@ -1750,25 +1748,25 @@ public class HMaster extends HRegionServer implements MasterServices {
}
@Override
- public long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException {
+ public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
if (isStopped()) {
throw new MasterNotRunningException();
}
- TableName tableName = hTableDescriptor.getTableName();
+ TableName tableName = tableDescriptor.getTableName();
if (!(tableName.isSystemTable())) {
throw new IllegalArgumentException(
"Only system table creation can use this createSystemTable API");
}
- HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null);
+ HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(tableDescriptor, null);
- LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
+ LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
// This special create table is called locally to master. Therefore, no RPC means no need
// to use nonce to detect duplicated RPC call.
long procId = this.procedureExecutor.submitProcedure(
- new CreateTableProcedure(procedureExecutor.getEnvironment(), hTableDescriptor, newRegions));
+ new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions));
return procId;
}
@@ -1778,7 +1776,7 @@ public class HMaster extends HRegionServer implements MasterServices {
* values (compression, etc) work. Throws an exception if something is wrong.
* @throws IOException
*/
- private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
+ private void sanityCheckTableDescriptor(final TableDescriptor htd) throws IOException {
final String CONF_KEY = "hbase.table.sanity.checks";
boolean logWarn = false;
if (!conf.getBoolean(CONF_KEY, true)) {
@@ -1848,7 +1846,7 @@ public class HMaster extends HRegionServer implements MasterServices {
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
- for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+ for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
if (hcd.getTimeToLive() <= 0) {
String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
@@ -1869,7 +1867,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
// max versions already being checked
- // HBASE-13776 Setting illegal versions for HColumnDescriptor
+ // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor
// does not throw IllegalArgumentException
// check minVersions <= maxVerions
if (hcd.getMinVersions() > hcd.getMaxVersions()) {
@@ -1893,7 +1891,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
- private void checkReplicationScope(HColumnDescriptor hcd) throws IOException{
+ private void checkReplicationScope(ColumnFamilyDescriptor hcd) throws IOException{
// check replication scope
WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(hcd.getScope());
if (scop == null) {
@@ -1905,7 +1903,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
- private void checkCompactionPolicy(Configuration conf, HTableDescriptor htd)
+ private void checkCompactionPolicy(Configuration conf, TableDescriptor htd)
throws IOException {
// FIFO compaction has some requirements
// Actually FCP ignores periodic major compactions
@@ -1925,7 +1923,7 @@ public class HMaster extends HRegionServer implements MasterServices {
blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
}
- for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+ for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
String compactionPolicy =
hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
if (compactionPolicy == null) {
@@ -1938,7 +1936,7 @@ public class HMaster extends HRegionServer implements MasterServices {
String message = null;
// 1. Check TTL
- if (hcd.getTimeToLive() == HColumnDescriptor.DEFAULT_TTL) {
+ if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
message = "Default TTL is not supported for FIFO compaction";
throw new IOException(message);
}
@@ -2040,36 +2038,36 @@ public class HMaster extends HRegionServer implements MasterServices {
}, getServerName().toShortString() + ".masterManager"));
}
- private void checkCompression(final HTableDescriptor htd)
+ private void checkCompression(final TableDescriptor htd)
throws IOException {
if (!this.masterCheckCompression) return;
- for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+ for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
checkCompression(hcd);
}
}
- private void checkCompression(final HColumnDescriptor hcd)
+ private void checkCompression(final ColumnFamilyDescriptor hcd)
throws IOException {
if (!this.masterCheckCompression) return;
CompressionTest.testCompression(hcd.getCompressionType());
CompressionTest.testCompression(hcd.getCompactionCompressionType());
}
- private void checkEncryption(final Configuration conf, final HTableDescriptor htd)
+ private void checkEncryption(final Configuration conf, final TableDescriptor htd)
throws IOException {
if (!this.masterCheckEncryption) return;
- for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+ for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
checkEncryption(conf, hcd);
}
}
- private void checkEncryption(final Configuration conf, final HColumnDescriptor hcd)
+ private void checkEncryption(final Configuration conf, final ColumnFamilyDescriptor hcd)
throws IOException {
if (!this.masterCheckEncryption) return;
EncryptionTest.testEncryption(conf, hcd.getEncryptionType(), hcd.getEncryptionKey());
}
- private void checkClassLoading(final Configuration conf, final HTableDescriptor htd)
+ private void checkClassLoading(final Configuration conf, final TableDescriptor htd)
throws IOException {
RegionSplitPolicy.getSplitPolicyClass(htd, conf);
RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
@@ -2143,7 +2141,7 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public long addColumn(
final TableName tableName,
- final HColumnDescriptor columnDescriptor,
+ final ColumnFamilyDescriptor columnDescriptor,
final long nonceGroup,
final long nonce)
throws IOException {
@@ -2179,7 +2177,7 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public long modifyColumn(
final TableName tableName,
- final HColumnDescriptor descriptor,
+ final ColumnFamilyDescriptor descriptor,
final long nonceGroup,
final long nonce)
throws IOException {
@@ -2373,7 +2371,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
@Override
- public long modifyTable(final TableName tableName, final HTableDescriptor descriptor,
+ public long modifyTable(final TableName tableName, final TableDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException {
checkInitialized();
sanityCheckTableDescriptor(descriptor);
@@ -3127,7 +3125,7 @@ public class HMaster extends HRegionServer implements MasterServices {
throws IOException {
if (tableNameList == null || tableNameList.isEmpty()) {
// request for all TableDescriptors
- Collection<HTableDescriptor> allHtds;
+ Collection<TableDescriptor> allHtds;
if (namespace != null && namespace.length() > 0) {
// Do a check on the namespace existence. Will fail if does not exist.
this.clusterSchemaService.getNamespace(namespace);
@@ -3135,7 +3133,7 @@ public class HMaster extends HRegionServer implements MasterServices {
} else {
allHtds = tableDescriptors.getAll().values();
}
- for (HTableDescriptor desc: allHtds) {
+ for (TableDescriptor desc: allHtds) {
if (tableStateManager.isTablePresent(desc.getTableName())
&& (includeSysTables || !desc.getTableName().isSystemTable())) {
htds.add(desc);
@@ -3144,7 +3142,7 @@ public class HMaster extends HRegionServer implements MasterServices {
} else {
for (TableName s: tableNameList) {
if (tableStateManager.isTablePresent(s)) {
- HTableDescriptor desc = tableDescriptors.get(s);
+ TableDescriptor desc = tableDescriptors.get(s);
if (desc != null) {
htds.add(desc);
}
@@ -3249,7 +3247,7 @@ public class HMaster extends HRegionServer implements MasterServices {
* @param allFiles Whether add all mob files into the compaction.
*/
public void requestMobCompaction(TableName tableName,
- List<HColumnDescriptor> columns, boolean allFiles) throws IOException {
+ List<ColumnFamilyDescriptor> columns, boolean allFiles) throws IOException {
mobCompactThread.requestMobCompaction(conf, fs, tableName, columns, allFiles);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index ee195cc..f9d47e0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -28,13 +28,15 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.ClusterId;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
@@ -387,10 +389,8 @@ public class MasterFileSystem {
// not make it in first place. Turn off block caching for bootstrap.
// Enable after.
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
- HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
- setInfoFamilyCachingForMeta(metaDescriptor, false);
- HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null);
- setInfoFamilyCachingForMeta(metaDescriptor, true);
+ TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
+ HRegion meta = HRegion.createHRegion(metaHRI, rd, c, setInfoFamilyCachingForMeta(metaDescriptor, false), null);
meta.close();
} catch (IOException e) {
e = e instanceof RemoteException ?
@@ -403,13 +403,17 @@ public class MasterFileSystem {
/**
* Enable in memory caching for hbase:meta
*/
- public static void setInfoFamilyCachingForMeta(HTableDescriptor metaDescriptor, final boolean b) {
- for (HColumnDescriptor hcd: metaDescriptor.getColumnFamilies()) {
+ public static TableDescriptor setInfoFamilyCachingForMeta(TableDescriptor metaDescriptor, final boolean b) {
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(metaDescriptor);
+ for (ColumnFamilyDescriptor hcd: metaDescriptor.getColumnFamilies()) {
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
- hcd.setBlockCacheEnabled(b);
- hcd.setInMemory(b);
+ builder.modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(hcd)
+ .setBlockCacheEnabled(b)
+ .setInMemory(b)
+ .build());
}
}
+ return builder.build();
}
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
index 2b1232a..d092efe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
@@ -31,9 +31,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.mob.MobUtils;
@@ -79,7 +79,7 @@ public class MasterMobCompactionThread {
* @param allFiles Whether add all mob files into the compaction.
*/
public void requestMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
- List<HColumnDescriptor> columns, boolean allFiles) throws IOException {
+ List<ColumnFamilyDescriptor> columns, boolean allFiles) throws IOException {
master.reportMobCompactionStart(tableName);
try {
masterMobPool.execute(new CompactionRunner(fs, tableName, columns,
@@ -102,11 +102,11 @@ public class MasterMobCompactionThread {
private class CompactionRunner implements Runnable {
private FileSystem fs;
private TableName tableName;
- private List<HColumnDescriptor> hcds;
+ private List<ColumnFamilyDescriptor> hcds;
private boolean allFiles;
private ExecutorService pool;
- public CompactionRunner(FileSystem fs, TableName tableName, List<HColumnDescriptor> hcds,
+ public CompactionRunner(FileSystem fs, TableName tableName, List<ColumnFamilyDescriptor> hcds,
boolean allFiles, ExecutorService pool) {
super();
this.fs = fs;
@@ -123,7 +123,7 @@ public class MasterMobCompactionThread {
MobUtils.getTableLockName(tableName), LockProcedure.LockType.EXCLUSIVE,
this.getClass().getName() + ": mob compaction");
try {
- for (HColumnDescriptor hcd : hcds) {
+ for (ColumnFamilyDescriptor hcd : hcds) {
MobUtils.doMobCompaction(conf, fs, tableName, hcd, pool, allFiles, lock);
}
} catch (IOException e) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 6e9b1e2..3ec2c45 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -31,10 +31,8 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
@@ -43,6 +41,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
@@ -363,7 +362,7 @@ public class MasterRpcServices extends RSRpcServices
try {
long procId = master.addColumn(
ProtobufUtil.toTableName(req.getTableName()),
- ProtobufUtil.convertToHColumnDesc(req.getColumnFamilies()),
+ ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
req.getNonceGroup(),
req.getNonce());
if (procId == -1) {
@@ -439,11 +438,11 @@ public class MasterRpcServices extends RSRpcServices
@Override
public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
throws ServiceException {
- HTableDescriptor hTableDescriptor = ProtobufUtil.convertToHTableDesc(req.getTableSchema());
+ TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema());
byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
try {
long procId =
- master.createTable(hTableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
+ master.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
return CreateTableResponse.newBuilder().setProcId(procId).build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
@@ -865,7 +864,7 @@ public class MasterRpcServices extends RSRpcServices
if (descriptors != null && descriptors.size() > 0) {
// Add the table descriptors to the response
for (TableDescriptor htd: descriptors) {
- builder.addTableSchema(ProtobufUtil.convertToTableSchema(htd));
+ builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
}
}
return builder.build();
@@ -1118,7 +1117,7 @@ public class MasterRpcServices extends RSRpcServices
ListTableDescriptorsByNamespaceResponse.newBuilder();
for (TableDescriptor htd : master
.listTableDescriptorsByNamespace(request.getNamespaceName())) {
- b.addTableSchema(ProtobufUtil.convertToTableSchema(htd));
+ b.addTableSchema(ProtobufUtil.toTableSchema(htd));
}
return b.build();
} catch (IOException e) {
@@ -1147,7 +1146,7 @@ public class MasterRpcServices extends RSRpcServices
try {
long procId = master.modifyColumn(
ProtobufUtil.toTableName(req.getTableName()),
- ProtobufUtil.convertToHColumnDesc(req.getColumnFamilies()),
+ ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
req.getNonceGroup(),
req.getNonce());
if (procId == -1) {
@@ -1181,7 +1180,7 @@ public class MasterRpcServices extends RSRpcServices
try {
long procId = master.modifyTable(
ProtobufUtil.toTableName(req.getTableName()),
- ProtobufUtil.convertToHTableDesc(req.getTableSchema()),
+ ProtobufUtil.toTableDescriptor(req.getTableSchema()),
req.getNonceGroup(),
req.getNonce());
return ModifyTableResponse.newBuilder().setProcId(procId).build();
@@ -1532,12 +1531,12 @@ public class MasterRpcServices extends RSRpcServices
throw new DoNotRetryIOException("Table " + tableName + " is not enabled");
}
boolean allFiles = false;
- List<HColumnDescriptor> compactedColumns = new ArrayList<>();
- HColumnDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
+ List<ColumnFamilyDescriptor> compactedColumns = new ArrayList<>();
+ ColumnFamilyDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
byte[] family = null;
if (request.hasFamily()) {
family = request.getFamily().toByteArray();
- for (HColumnDescriptor hcd : hcds) {
+ for (ColumnFamilyDescriptor hcd : hcds) {
if (Bytes.equals(family, hcd.getName())) {
if (!hcd.isMobEnabled()) {
LOG.error("Column family " + hcd.getNameAsString() + " is not a mob column family");
@@ -1548,7 +1547,7 @@ public class MasterRpcServices extends RSRpcServices
}
}
} else {
- for (HColumnDescriptor hcd : hcds) {
+ for (ColumnFamilyDescriptor hcd : hcds) {
if (hcd.isMobEnabled()) {
compactedColumns.add(hcd);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 6e97bf4..cde9e34 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -21,9 +21,7 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.List;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
@@ -32,6 +30,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -159,17 +158,17 @@ public interface MasterServices extends Server {
* a single region is created.
*/
long createTable(
- final HTableDescriptor desc,
+ final TableDescriptor desc,
final byte[][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException;
/**
* Create a system table using the given table definition.
- * @param hTableDescriptor The system table definition
+ * @param tableDescriptor The system table definition
* a single region is created.
*/
- long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException;
+ long createSystemTable(final TableDescriptor tableDescriptor) throws IOException;
/**
* Delete a table
@@ -207,7 +206,7 @@ public interface MasterServices extends Server {
*/
long modifyTable(
final TableName tableName,
- final HTableDescriptor descriptor,
+ final TableDescriptor descriptor,
final long nonceGroup,
final long nonce)
throws IOException;
@@ -247,7 +246,7 @@ public interface MasterServices extends Server {
*/
long addColumn(
final TableName tableName,
- final HColumnDescriptor column,
+ final ColumnFamilyDescriptor column,
final long nonceGroup,
final long nonce)
throws IOException;
@@ -262,7 +261,7 @@ public interface MasterServices extends Server {
*/
long modifyColumn(
final TableName tableName,
- final HColumnDescriptor descriptor,
+ final ColumnFamilyDescriptor descriptor,
final long nonceGroup,
final long nonce)
throws IOException;
[7/8] hbase git commit: HBASE-18503 Change ***Util and Master to use
TableDescriptor and ColumnFamilyDescriptor
Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
index 42a5445..476c65c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
@@ -24,11 +24,11 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
@@ -55,8 +55,8 @@ public class MobCompactionChore extends ScheduledChore {
protected void chore() {
try {
TableDescriptors htds = master.getTableDescriptors();
- Map<String, HTableDescriptor> map = htds.getAll();
- for (HTableDescriptor htd : map.values()) {
+ Map<String, TableDescriptor> map = htds.getAll();
+ for (TableDescriptor htd : map.values()) {
if (!master.getTableStateManager().isTableState(htd.getTableName(),
TableState.State.ENABLED)) {
continue;
@@ -66,7 +66,7 @@ public class MobCompactionChore extends ScheduledChore {
final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
MobUtils.getTableLockName(htd.getTableName()), LockProcedure.LockType.EXCLUSIVE,
this.getClass().getName() + ": mob compaction");
- for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+ for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
if (!hcd.isMobEnabled()) {
continue;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index 18f6856..fb83971 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -24,12 +24,12 @@ import java.util.Set;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
@@ -198,7 +198,7 @@ public class TableStateManager {
public static void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
throws IOException {
- final Map<String, HTableDescriptor> allDescriptors =
+ final Map<String, TableDescriptor> allDescriptors =
tableDescriptors.getAllDescriptors();
final Map<String, TableState> states = new HashMap<>();
MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() {
@@ -210,7 +210,7 @@ public class TableStateManager {
return true;
}
});
- for (Map.Entry<String, HTableDescriptor> entry : allDescriptors.entrySet()) {
+ for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) {
String table = entry.getKey();
if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
continue;
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 9aaf297..c398c9a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -31,18 +31,18 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaMutationAnnotation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.master.CatalogJanitor;
@@ -603,10 +603,10 @@ public class MergeTableRegionsProcedure
throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Configuration conf = env.getMasterConfiguration();
- final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
+ final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
for (String family: regionFs.getFamilies()) {
- final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
+ final ColumnFamilyDescriptor hcd = htd.getColumnFamily(family.getBytes());
final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
if (storeFiles != null && storeFiles.size() > 0) {
@@ -682,7 +682,7 @@ public class MergeTableRegionsProcedure
}
private int getRegionReplication(final MasterProcedureEnv env) throws IOException {
- final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
+ final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
return htd.getRegionReplication();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 627eb57..072800b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
@@ -38,6 +37,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.RegionState.State;
@@ -221,7 +221,7 @@ public class RegionStateStore {
// ============================================================================================
public void splitRegion(final HRegionInfo parent, final HRegionInfo hriA,
final HRegionInfo hriB, final ServerName serverName) throws IOException {
- final HTableDescriptor htd = getTableDescriptor(parent.getTable());
+ final TableDescriptor htd = getTableDescriptor(parent.getTable());
MetaTableAccessor.splitRegion(master.getConnection(), parent, hriA, hriB, serverName,
getRegionReplication(htd), hasSerialReplicationScope(htd));
}
@@ -231,7 +231,7 @@ public class RegionStateStore {
// ============================================================================================
public void mergeRegions(final HRegionInfo parent, final HRegionInfo hriA,
final HRegionInfo hriB, final ServerName serverName) throws IOException {
- final HTableDescriptor htd = getTableDescriptor(parent.getTable());
+ final TableDescriptor htd = getTableDescriptor(parent.getTable());
MetaTableAccessor.mergeRegions(master.getConnection(), parent, hriA, hriB, serverName,
getRegionReplication(htd), EnvironmentEdgeManager.currentTime(),
hasSerialReplicationScope(htd));
@@ -255,15 +255,15 @@ public class RegionStateStore {
return hasSerialReplicationScope(getTableDescriptor(tableName));
}
- private boolean hasSerialReplicationScope(final HTableDescriptor htd) {
+ private boolean hasSerialReplicationScope(final TableDescriptor htd) {
return (htd != null)? htd.hasSerialReplicationScope(): false;
}
- private int getRegionReplication(final HTableDescriptor htd) {
+ private int getRegionReplication(final TableDescriptor htd) {
return (htd != null) ? htd.getRegionReplication() : 1;
}
- private HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
+ private TableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
return master.getTableDescriptors().get(tableName);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
index d9a1ab8..5e3d8c4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
@@ -34,10 +34,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -194,7 +194,7 @@ class RegionLocationFinder {
*/
protected HDFSBlocksDistribution internalGetTopBlockLocation(HRegionInfo region) {
try {
- HTableDescriptor tableDescriptor = getTableDescriptor(region.getTable());
+ TableDescriptor tableDescriptor = getTableDescriptor(region.getTable());
if (tableDescriptor != null) {
HDFSBlocksDistribution blocksDistribution =
HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
@@ -209,14 +209,14 @@ class RegionLocationFinder {
}
/**
- * return HTableDescriptor for a given tableName
+ * return TableDescriptor for a given tableName
*
* @param tableName the table name
- * @return HTableDescriptor
+ * @return TableDescriptor
* @throws IOException
*/
- protected HTableDescriptor getTableDescriptor(TableName tableName) throws IOException {
- HTableDescriptor tableDescriptor = null;
+ protected TableDescriptor getTableDescriptor(TableName tableName) throws IOException {
+ TableDescriptor tableDescriptor = null;
try {
if (this.services != null && this.services.getTableDescriptors() != null) {
tableDescriptor = this.services.getTableDescriptors().get(tableName);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java
index 45b2401..f1ff936 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java
@@ -27,17 +27,17 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.util.Bytes;
@@ -63,11 +63,11 @@ public class ReplicationMetaCleaner extends ScheduledChore {
@Override
protected void chore() {
try {
- Map<String, HTableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
+ Map<String, TableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
Map<String, Set<String>> serialTables = new HashMap<>();
- for (Map.Entry<String, HTableDescriptor> entry : tables.entrySet()) {
+ for (Map.Entry<String, TableDescriptor> entry : tables.entrySet()) {
boolean hasSerialScope = false;
- for (HColumnDescriptor column : entry.getValue().getFamilies()) {
+ for (ColumnFamilyDescriptor column : entry.getValue().getColumnFamilies()) {
if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL) {
hasSerialScope = true;
break;
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
index 34c1853..f19195e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
@@ -25,12 +25,13 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -45,30 +46,30 @@ public class AddColumnFamilyProcedure
private static final Log LOG = LogFactory.getLog(AddColumnFamilyProcedure.class);
private TableName tableName;
- private HTableDescriptor unmodifiedHTableDescriptor;
- private HColumnDescriptor cfDescriptor;
+ private TableDescriptor unmodifiedTableDescriptor;
+ private ColumnFamilyDescriptor cfDescriptor;
private List<HRegionInfo> regionInfoList;
private Boolean traceEnabled;
public AddColumnFamilyProcedure() {
super();
- this.unmodifiedHTableDescriptor = null;
+ this.unmodifiedTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
public AddColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
- final HColumnDescriptor cfDescriptor) throws IOException {
+ final ColumnFamilyDescriptor cfDescriptor) throws IOException {
this(env, tableName, cfDescriptor, null);
}
public AddColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
- final HColumnDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
+ final ColumnFamilyDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
super(env, latch);
this.tableName = tableName;
this.cfDescriptor = cfDescriptor;
- this.unmodifiedHTableDescriptor = null;
+ this.unmodifiedTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
@@ -172,10 +173,10 @@ public class AddColumnFamilyProcedure
MasterProcedureProtos.AddColumnFamilyStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
- .setColumnfamilySchema(ProtobufUtil.convertToColumnFamilySchema(cfDescriptor));
- if (unmodifiedHTableDescriptor != null) {
+ .setColumnfamilySchema(ProtobufUtil.toColumnFamilySchema(cfDescriptor));
+ if (unmodifiedTableDescriptor != null) {
addCFMsg
- .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
+ .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
}
addCFMsg.build().writeDelimitedTo(stream);
@@ -189,9 +190,9 @@ public class AddColumnFamilyProcedure
MasterProcedureProtos.AddColumnFamilyStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(addCFMsg.getUserInfo()));
tableName = ProtobufUtil.toTableName(addCFMsg.getTableName());
- cfDescriptor = ProtobufUtil.convertToHColumnDesc(addCFMsg.getColumnfamilySchema());
+ cfDescriptor = ProtobufUtil.toColumnFamilyDescriptor(addCFMsg.getColumnfamilySchema());
if (addCFMsg.hasUnmodifiedTableSchema()) {
- unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(addCFMsg.getUnmodifiedTableSchema());
+ unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(addCFMsg.getUnmodifiedTableSchema());
}
}
@@ -229,11 +230,11 @@ public class AddColumnFamilyProcedure
checkTableModifiable(env);
// In order to update the descriptor, we need to retrieve the old descriptor for comparison.
- unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
- if (unmodifiedHTableDescriptor == null) {
- throw new IOException("HTableDescriptor missing for " + tableName);
+ unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (unmodifiedTableDescriptor == null) {
+ throw new IOException("TableDescriptor missing for " + tableName);
}
- if (unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) {
+ if (unmodifiedTableDescriptor.hasColumnFamily(cfDescriptor.getName())) {
throw new InvalidFamilyOperationException("Column family '" + getColumnFamilyName()
+ "' in table '" + tableName + "' already exists so cannot be added");
}
@@ -258,17 +259,18 @@ public class AddColumnFamilyProcedure
// Update table descriptor
LOG.info("AddColumn. Table = " + tableName + " HCD = " + cfDescriptor.toString());
- HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+ TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
- if (htd.hasFamily(cfDescriptor.getName())) {
+ if (htd.hasColumnFamily(cfDescriptor.getName())) {
// It is possible to reach this situation, as we could already add the column family
// to table descriptor, but the master failover happens before we complete this state.
// We should be able to handle running this function multiple times without causing problem.
return;
}
- htd.addFamily(cfDescriptor);
- env.getMasterServices().getTableDescriptors().add(htd);
+ env.getMasterServices().getTableDescriptors().add(
+ TableDescriptorBuilder.newBuilder(htd)
+ .addColumnFamily(cfDescriptor).build());
}
/**
@@ -277,14 +279,14 @@ public class AddColumnFamilyProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
- HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
- if (htd.hasFamily(cfDescriptor.getName())) {
+ TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (htd.hasColumnFamily(cfDescriptor.getName())) {
// Remove the column family from file system and update the table descriptor to
// the before-add-column-family-state
MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName,
getRegionInfoList(env), cfDescriptor.getName(), cfDescriptor.isMobEnabled());
- env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index afe72e2..cc39f53 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -33,11 +33,12 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -67,7 +68,7 @@ public class CloneSnapshotProcedure
extends AbstractStateMachineTableProcedure<CloneSnapshotState> {
private static final Log LOG = LogFactory.getLog(CloneSnapshotProcedure.class);
- private HTableDescriptor hTableDescriptor;
+ private TableDescriptor tableDescriptor;
private SnapshotDescription snapshot;
private boolean restoreAcl;
private List<HRegionInfo> newRegions = null;
@@ -85,21 +86,21 @@ public class CloneSnapshotProcedure
}
public CloneSnapshotProcedure(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) {
- this(env, hTableDescriptor, snapshot, false);
+ final TableDescriptor tableDescriptor, final SnapshotDescription snapshot) {
+ this(env, tableDescriptor, snapshot, false);
}
/**
* Constructor
* @param env MasterProcedureEnv
- * @param hTableDescriptor the table to operate on
+ * @param tableDescriptor the table to operate on
* @param snapshot snapshot to clone from
*/
public CloneSnapshotProcedure(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot,
+ final TableDescriptor tableDescriptor, final SnapshotDescription snapshot,
final boolean restoreAcl) {
super(env);
- this.hTableDescriptor = hTableDescriptor;
+ this.tableDescriptor = tableDescriptor;
this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
@@ -121,7 +122,7 @@ public class CloneSnapshotProcedure
Configuration conf = env.getMasterServices().getConfiguration();
if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils.isSecurityAvailable(conf)) {
- RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, hTableDescriptor.getTableName(), conf);
+ RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, tableDescriptor.getTableName(), conf);
}
}
@@ -141,7 +142,7 @@ public class CloneSnapshotProcedure
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_WRITE_FS_LAYOUT);
break;
case CLONE_SNAPSHOT_WRITE_FS_LAYOUT:
- newRegions = createFilesystemLayout(env, hTableDescriptor, newRegions);
+ newRegions = createFilesystemLayout(env, tableDescriptor, newRegions);
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META);
break;
case CLONE_SNAPSHOT_ADD_TO_META:
@@ -224,7 +225,7 @@ public class CloneSnapshotProcedure
@Override
public TableName getTableName() {
- return hTableDescriptor.getTableName();
+ return tableDescriptor.getTableName();
}
@Override
@@ -250,7 +251,7 @@ public class CloneSnapshotProcedure
MasterProcedureProtos.CloneSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
- .setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
+ .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
if (newRegions != null) {
for (HRegionInfo hri: newRegions) {
cloneSnapshotMsg.addRegionInfo(HRegionInfo.convert(hri));
@@ -281,7 +282,7 @@ public class CloneSnapshotProcedure
MasterProcedureProtos.CloneSnapshotStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(cloneSnapshotMsg.getUserInfo()));
snapshot = cloneSnapshotMsg.getSnapshot();
- hTableDescriptor = ProtobufUtil.convertToHTableDesc(cloneSnapshotMsg.getTableSchema());
+ tableDescriptor = ProtobufUtil.toTableDescriptor(cloneSnapshotMsg.getTableSchema());
if (cloneSnapshotMsg.getRegionInfoCount() == 0) {
newRegions = null;
} else {
@@ -341,7 +342,7 @@ public class CloneSnapshotProcedure
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
- cpHost.preCreateTableAction(hTableDescriptor, null, getUser());
+ cpHost.preCreateTableAction(tableDescriptor, null, getUser());
}
}
@@ -357,7 +358,7 @@ public class CloneSnapshotProcedure
if (cpHost != null) {
final HRegionInfo[] regions = (newRegions == null) ? null :
newRegions.toArray(new HRegionInfo[newRegions.size()]);
- cpHost.postCompletedCreateTableAction(hTableDescriptor, regions, getUser());
+ cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser());
}
}
@@ -368,9 +369,9 @@ public class CloneSnapshotProcedure
*/
private List<HRegionInfo> createFilesystemLayout(
final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor,
+ final TableDescriptor tableDescriptor,
final List<HRegionInfo> newRegions) throws IOException {
- return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {
+ return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() {
@Override
public List<HRegionInfo> createHdfsRegions(
final MasterProcedureEnv env,
@@ -390,7 +391,7 @@ public class CloneSnapshotProcedure
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
- conf, fs, manifest, hTableDescriptor, tableRootDir, monitorException, monitorStatus);
+ conf, fs, manifest, tableDescriptor, tableRootDir, monitorException, monitorStatus);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
// Clone operation should not have stuff to restore or remove
@@ -429,7 +430,7 @@ public class CloneSnapshotProcedure
*/
private List<HRegionInfo> createFsLayout(
final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor,
+ final TableDescriptor tableDescriptor,
List<HRegionInfo> newRegions,
final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
@@ -437,17 +438,17 @@ public class CloneSnapshotProcedure
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
- HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor);
- final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
+ final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
- .createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false);
+ .createTableDescriptorForTableDirectory(tempTableDir,
+ TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(
- env, tempdir, hTableDescriptor.getTableName(), newRegions);
+ env, tempdir, tableDescriptor.getTableName(), newRegions);
// 3. Move Table temp directory to the hbase root location
- CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
+ CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
return newRegions;
}
@@ -458,11 +459,11 @@ public class CloneSnapshotProcedure
* @throws IOException
*/
private void addRegionsToMeta(final MasterProcedureEnv env) throws IOException {
- newRegions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, newRegions);
+ newRegions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, newRegions);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
new RestoreSnapshotHelper.RestoreMetaChanges(
- hTableDescriptor, parentsToChildrenPairMap);
+ tableDescriptor, parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(env.getMasterServices().getConnection(), newRegions);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index cf55463..14604fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -30,12 +30,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -55,7 +55,7 @@ public class CreateTableProcedure
extends AbstractStateMachineTableProcedure<CreateTableState> {
private static final Log LOG = LogFactory.getLog(CreateTableProcedure.class);
- private HTableDescriptor hTableDescriptor;
+ private TableDescriptor tableDescriptor;
private List<HRegionInfo> newRegions;
public CreateTableProcedure() {
@@ -64,15 +64,15 @@ public class CreateTableProcedure
}
public CreateTableProcedure(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions) {
- this(env, hTableDescriptor, newRegions, null);
+ final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions) {
+ this(env, tableDescriptor, newRegions, null);
}
public CreateTableProcedure(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
+ final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
final ProcedurePrepareLatch syncLatch) {
super(env, syncLatch);
- this.hTableDescriptor = hTableDescriptor;
+ this.tableDescriptor = tableDescriptor;
this.newRegions = newRegions != null ? Lists.newArrayList(newRegions) : null;
}
@@ -98,11 +98,11 @@ public class CreateTableProcedure
setNextState(CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT);
break;
case CREATE_TABLE_WRITE_FS_LAYOUT:
- newRegions = createFsLayout(env, hTableDescriptor, newRegions);
+ newRegions = createFsLayout(env, tableDescriptor, newRegions);
setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META);
break;
case CREATE_TABLE_ADD_TO_META:
- newRegions = addTableToMeta(env, hTableDescriptor, newRegions);
+ newRegions = addTableToMeta(env, tableDescriptor, newRegions);
setNextState(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS);
break;
case CREATE_TABLE_ASSIGN_REGIONS:
@@ -174,7 +174,7 @@ public class CreateTableProcedure
@Override
public TableName getTableName() {
- return hTableDescriptor.getTableName();
+ return tableDescriptor.getTableName();
}
@Override
@@ -189,7 +189,7 @@ public class CreateTableProcedure
MasterProcedureProtos.CreateTableStateData.Builder state =
MasterProcedureProtos.CreateTableStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
- .setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
+ .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
if (newRegions != null) {
for (HRegionInfo hri: newRegions) {
state.addRegionInfo(HRegionInfo.convert(hri));
@@ -205,7 +205,7 @@ public class CreateTableProcedure
MasterProcedureProtos.CreateTableStateData state =
MasterProcedureProtos.CreateTableStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo()));
- hTableDescriptor = ProtobufUtil.convertToHTableDesc(state.getTableSchema());
+ tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema());
if (state.getRegionInfoCount() == 0) {
newRegions = null;
} else {
@@ -235,7 +235,7 @@ public class CreateTableProcedure
}
// check that we have at least 1 CF
- if (hTableDescriptor.getColumnFamilyCount() == 0) {
+ if (tableDescriptor.getColumnFamilyCount() == 0) {
setFailure("master-create-table", new DoNotRetryIOException("Table " +
getTableName().toString() + " should have at least one column family."));
return false;
@@ -256,7 +256,7 @@ public class CreateTableProcedure
if (cpHost != null) {
final HRegionInfo[] regions = newRegions == null ? null :
newRegions.toArray(new HRegionInfo[newRegions.size()]);
- cpHost.preCreateTableAction(hTableDescriptor, regions, getUser());
+ cpHost.preCreateTableAction(tableDescriptor, regions, getUser());
}
}
@@ -266,7 +266,7 @@ public class CreateTableProcedure
if (cpHost != null) {
final HRegionInfo[] regions = (newRegions == null) ? null :
newRegions.toArray(new HRegionInfo[newRegions.size()]);
- cpHost.postCompletedCreateTableAction(hTableDescriptor, regions, getUser());
+ cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser());
}
}
@@ -277,9 +277,9 @@ public class CreateTableProcedure
}
protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor, final List<HRegionInfo> newRegions)
+ final TableDescriptor tableDescriptor, final List<HRegionInfo> newRegions)
throws IOException {
- return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {
+ return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() {
@Override
public List<HRegionInfo> createHdfsRegions(final MasterProcedureEnv env,
final Path tableRootDir, final TableName tableName,
@@ -287,40 +287,40 @@ public class CreateTableProcedure
HRegionInfo[] regions = newRegions != null ?
newRegions.toArray(new HRegionInfo[newRegions.size()]) : null;
return ModifyRegionUtils.createRegions(env.getMasterConfiguration(),
- tableRootDir, hTableDescriptor, regions, null);
+ tableRootDir, tableDescriptor, regions, null);
}
});
}
protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor, List<HRegionInfo> newRegions,
+ final TableDescriptor tableDescriptor, List<HRegionInfo> newRegions,
final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tempdir = mfs.getTempDir();
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
- final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
+ final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
.createTableDescriptorForTableDirectory(
- tempTableDir, hTableDescriptor, false);
+ tempTableDir, tableDescriptor, false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
- hTableDescriptor.getTableName(), newRegions);
+ tableDescriptor.getTableName(), newRegions);
// 3. Move Table temp directory to the hbase root location
- moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
+ moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
return newRegions;
}
protected static void moveTempDirectoryToHBaseRoot(
final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor,
+ final TableDescriptor tableDescriptor,
final Path tempTableDir) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
- final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), hTableDescriptor.getTableName());
+ final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
FileSystem fs = mfs.getFileSystem();
if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
throw new IOException("Couldn't delete " + tableDir);
@@ -332,20 +332,20 @@ public class CreateTableProcedure
}
protected static List<HRegionInfo> addTableToMeta(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor,
+ final TableDescriptor tableDescriptor,
final List<HRegionInfo> regions) throws IOException {
assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions;
ProcedureSyncWait.waitMetaRegions(env);
// Add replicas if needed
- List<HRegionInfo> newRegions = addReplicas(env, hTableDescriptor, regions);
+ List<HRegionInfo> newRegions = addReplicas(env, tableDescriptor, regions);
// Add regions to META
- addRegionsToMeta(env, hTableDescriptor, newRegions);
+ addRegionsToMeta(env, tableDescriptor, newRegions);
// Setup replication for region replicas if needed
- if (hTableDescriptor.getRegionReplication() > 1) {
+ if (tableDescriptor.getRegionReplication() > 1) {
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
}
return newRegions;
@@ -354,14 +354,14 @@ public class CreateTableProcedure
/**
* Create any replicas for the regions (the default replicas that was
* already created is passed to the method)
- * @param hTableDescriptor descriptor to use
+ * @param tableDescriptor descriptor to use
* @param regions default replicas
* @return the combined list of default and non-default replicas
*/
private static List<HRegionInfo> addReplicas(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor,
+ final TableDescriptor tableDescriptor,
final List<HRegionInfo> regions) {
- int numRegionReplicas = hTableDescriptor.getRegionReplication() - 1;
+ int numRegionReplicas = tableDescriptor.getRegionReplication() - 1;
if (numRegionReplicas <= 0) {
return regions;
}
@@ -394,10 +394,10 @@ public class CreateTableProcedure
* Add the specified set of regions to the hbase:meta table.
*/
private static void addRegionsToMeta(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor,
+ final TableDescriptor tableDescriptor,
final List<HRegionInfo> regionInfos) throws IOException {
MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(),
- regionInfos, hTableDescriptor.getRegionReplication());
+ regionInfos, tableDescriptor.getRegionReplication());
}
protected static void updateTableDescCache(final MasterProcedureEnv env,
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
index 78bd715..9ec814a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
@@ -26,10 +26,11 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -45,7 +46,7 @@ public class DeleteColumnFamilyProcedure
extends AbstractStateMachineTableProcedure<DeleteColumnFamilyState> {
private static final Log LOG = LogFactory.getLog(DeleteColumnFamilyProcedure.class);
- private HTableDescriptor unmodifiedHTableDescriptor;
+ private TableDescriptor unmodifiedTableDescriptor;
private TableName tableName;
private byte [] familyName;
private boolean hasMob;
@@ -55,7 +56,7 @@ public class DeleteColumnFamilyProcedure
public DeleteColumnFamilyProcedure() {
super();
- this.unmodifiedHTableDescriptor = null;
+ this.unmodifiedTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
@@ -70,7 +71,7 @@ public class DeleteColumnFamilyProcedure
super(env, latch);
this.tableName = tableName;
this.familyName = familyName;
- this.unmodifiedHTableDescriptor = null;
+ this.unmodifiedTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
@@ -179,9 +180,9 @@ public class DeleteColumnFamilyProcedure
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
.setColumnfamilyName(UnsafeByteOperations.unsafeWrap(familyName));
- if (unmodifiedHTableDescriptor != null) {
+ if (unmodifiedTableDescriptor != null) {
deleteCFMsg
- .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
+ .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
}
deleteCFMsg.build().writeDelimitedTo(stream);
@@ -197,7 +198,7 @@ public class DeleteColumnFamilyProcedure
familyName = deleteCFMsg.getColumnfamilyName().toByteArray();
if (deleteCFMsg.hasUnmodifiedTableSchema()) {
- unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(deleteCFMsg.getUnmodifiedTableSchema());
+ unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(deleteCFMsg.getUnmodifiedTableSchema());
}
}
@@ -235,22 +236,22 @@ public class DeleteColumnFamilyProcedure
checkTableModifiable(env);
// In order to update the descriptor, we need to retrieve the old descriptor for comparison.
- unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
- if (unmodifiedHTableDescriptor == null) {
- throw new IOException("HTableDescriptor missing for " + tableName);
+ unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (unmodifiedTableDescriptor == null) {
+ throw new IOException("TableDescriptor missing for " + tableName);
}
- if (!unmodifiedHTableDescriptor.hasFamily(familyName)) {
+ if (!unmodifiedTableDescriptor.hasColumnFamily(familyName)) {
throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ "' does not exist, so it cannot be deleted");
}
- if (unmodifiedHTableDescriptor.getColumnFamilyCount() == 1) {
+ if (unmodifiedTableDescriptor.getColumnFamilyCount() == 1) {
throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ "' is the only column family in the table, so it cannot be deleted");
}
// whether mob family
- hasMob = unmodifiedHTableDescriptor.getFamily(familyName).isMobEnabled();
+ hasMob = unmodifiedTableDescriptor.getColumnFamily(familyName).isMobEnabled();
}
/**
@@ -272,17 +273,17 @@ public class DeleteColumnFamilyProcedure
// Update table descriptor
LOG.info("DeleteColumn. Table = " + tableName + " family = " + getColumnFamilyName());
- HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+ TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
- if (!htd.hasFamily(familyName)) {
+ if (!htd.hasColumnFamily(familyName)) {
// It is possible to reach this situation, as we could already delete the column family
// from table descriptor, but the master failover happens before we complete this state.
// We should be able to handle running this function multiple times without causing problem.
return;
}
- htd.removeFamily(familyName);
- env.getMasterServices().getTableDescriptors().add(htd);
+ env.getMasterServices().getTableDescriptors().add(
+ TableDescriptorBuilder.newBuilder(htd).removeColumnFamily(familyName).build());
}
/**
@@ -291,7 +292,7 @@ public class DeleteColumnFamilyProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
index 622c19f..ac86dab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
@@ -24,11 +24,12 @@ import java.io.OutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -43,28 +44,28 @@ public class ModifyColumnFamilyProcedure
private static final Log LOG = LogFactory.getLog(ModifyColumnFamilyProcedure.class);
private TableName tableName;
- private HTableDescriptor unmodifiedHTableDescriptor;
- private HColumnDescriptor cfDescriptor;
+ private TableDescriptor unmodifiedtableDescriptor;
+ private ColumnFamilyDescriptor cfDescriptor;
private Boolean traceEnabled;
public ModifyColumnFamilyProcedure() {
super();
- this.unmodifiedHTableDescriptor = null;
+ this.unmodifiedtableDescriptor = null;
this.traceEnabled = null;
}
public ModifyColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
- final HColumnDescriptor cfDescriptor) {
+ final ColumnFamilyDescriptor cfDescriptor) {
this(env, tableName, cfDescriptor, null);
}
public ModifyColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
- final HColumnDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
+ final ColumnFamilyDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
super(env, latch);
this.tableName = tableName;
this.cfDescriptor = cfDescriptor;
- this.unmodifiedHTableDescriptor = null;
+ this.unmodifiedtableDescriptor = null;
this.traceEnabled = null;
}
@@ -165,10 +166,10 @@ public class ModifyColumnFamilyProcedure
MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
- .setColumnfamilySchema(ProtobufUtil.convertToColumnFamilySchema(cfDescriptor));
- if (unmodifiedHTableDescriptor != null) {
+ .setColumnfamilySchema(ProtobufUtil.toColumnFamilySchema(cfDescriptor));
+ if (unmodifiedtableDescriptor != null) {
modifyCFMsg
- .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
+ .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedtableDescriptor));
}
modifyCFMsg.build().writeDelimitedTo(stream);
@@ -182,9 +183,9 @@ public class ModifyColumnFamilyProcedure
MasterProcedureProtos.ModifyColumnFamilyStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(modifyCFMsg.getUserInfo()));
tableName = ProtobufUtil.toTableName(modifyCFMsg.getTableName());
- cfDescriptor = ProtobufUtil.convertToHColumnDesc(modifyCFMsg.getColumnfamilySchema());
+ cfDescriptor = ProtobufUtil.toColumnFamilyDescriptor(modifyCFMsg.getColumnfamilySchema());
if (modifyCFMsg.hasUnmodifiedTableSchema()) {
- unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(modifyCFMsg.getUnmodifiedTableSchema());
+ unmodifiedtableDescriptor = ProtobufUtil.toTableDescriptor(modifyCFMsg.getUnmodifiedTableSchema());
}
}
@@ -221,11 +222,11 @@ public class ModifyColumnFamilyProcedure
// Checks whether the table is allowed to be modified.
checkTableModifiable(env);
- unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
- if (unmodifiedHTableDescriptor == null) {
- throw new IOException("HTableDescriptor missing for " + tableName);
+ unmodifiedtableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (unmodifiedtableDescriptor == null) {
+ throw new IOException("TableDescriptor missing for " + tableName);
}
- if (!unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) {
+ if (!unmodifiedtableDescriptor.hasColumnFamily(cfDescriptor.getName())) {
throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ "' does not exist, so it cannot be modified");
}
@@ -250,9 +251,9 @@ public class ModifyColumnFamilyProcedure
// Update table descriptor
LOG.info("ModifyColumnFamily. Table = " + tableName + " HCD = " + cfDescriptor.toString());
- HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
- htd.modifyFamily(cfDescriptor);
- env.getMasterServices().getTableDescriptors().add(htd);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(env.getMasterServices().getTableDescriptors().get(tableName));
+ builder.modifyColumnFamily(cfDescriptor);
+ env.getMasterServices().getTableDescriptors().add(builder.build());
}
/**
@@ -261,7 +262,7 @@ public class ModifyColumnFamilyProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(unmodifiedtableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 20a6a03..9741faa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -30,7 +30,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
@@ -40,6 +39,7 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -52,8 +52,8 @@ public class ModifyTableProcedure
extends AbstractStateMachineTableProcedure<ModifyTableState> {
private static final Log LOG = LogFactory.getLog(ModifyTableProcedure.class);
- private HTableDescriptor unmodifiedHTableDescriptor = null;
- private HTableDescriptor modifiedHTableDescriptor;
+ private TableDescriptor unmodifiedTableDescriptor = null;
+ private TableDescriptor modifiedTableDescriptor;
private boolean deleteColumnFamilyInModify;
private List<HRegionInfo> regionInfoList;
@@ -64,19 +64,19 @@ public class ModifyTableProcedure
initilize();
}
- public ModifyTableProcedure(final MasterProcedureEnv env, final HTableDescriptor htd) {
+ public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd) {
this(env, htd, null);
}
- public ModifyTableProcedure(final MasterProcedureEnv env, final HTableDescriptor htd,
+ public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd,
final ProcedurePrepareLatch latch) {
super(env, latch);
initilize();
- this.modifiedHTableDescriptor = htd;
+ this.modifiedTableDescriptor = htd;
}
private void initilize() {
- this.unmodifiedHTableDescriptor = null;
+ this.unmodifiedTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
this.deleteColumnFamilyInModify = false;
@@ -104,7 +104,7 @@ public class ModifyTableProcedure
setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN);
break;
case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
- updateReplicaColumnsIfNeeded(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
+ updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
if (deleteColumnFamilyInModify) {
setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
} else {
@@ -112,7 +112,7 @@ public class ModifyTableProcedure
}
break;
case MODIFY_TABLE_DELETE_FS_LAYOUT:
- deleteFromFs(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
+ deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
break;
case MODIFY_TABLE_POST_OPERATION:
@@ -191,12 +191,12 @@ public class ModifyTableProcedure
MasterProcedureProtos.ModifyTableStateData.Builder modifyTableMsg =
MasterProcedureProtos.ModifyTableStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
- .setModifiedTableSchema(ProtobufUtil.convertToTableSchema(modifiedHTableDescriptor))
+ .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor))
.setDeleteColumnFamilyInModify(deleteColumnFamilyInModify);
- if (unmodifiedHTableDescriptor != null) {
+ if (unmodifiedTableDescriptor != null) {
modifyTableMsg
- .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
+ .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
}
modifyTableMsg.build().writeDelimitedTo(stream);
@@ -209,18 +209,18 @@ public class ModifyTableProcedure
MasterProcedureProtos.ModifyTableStateData modifyTableMsg =
MasterProcedureProtos.ModifyTableStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(modifyTableMsg.getUserInfo()));
- modifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(modifyTableMsg.getModifiedTableSchema());
+ modifiedTableDescriptor = ProtobufUtil.toTableDescriptor(modifyTableMsg.getModifiedTableSchema());
deleteColumnFamilyInModify = modifyTableMsg.getDeleteColumnFamilyInModify();
if (modifyTableMsg.hasUnmodifiedTableSchema()) {
- unmodifiedHTableDescriptor =
- ProtobufUtil.convertToHTableDesc(modifyTableMsg.getUnmodifiedTableSchema());
+ unmodifiedTableDescriptor =
+ ProtobufUtil.toTableDescriptor(modifyTableMsg.getUnmodifiedTableSchema());
}
}
@Override
public TableName getTableName() {
- return modifiedHTableDescriptor.getTableName();
+ return modifiedTableDescriptor.getTableName();
}
@Override
@@ -240,27 +240,27 @@ public class ModifyTableProcedure
}
// check that we have at least 1 CF
- if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
+ if (modifiedTableDescriptor.getColumnFamilyCount() == 0) {
throw new DoNotRetryIOException("Table " + getTableName().toString() +
" should have at least one column family.");
}
// In order to update the descriptor, we need to retrieve the old descriptor for comparison.
- this.unmodifiedHTableDescriptor =
+ this.unmodifiedTableDescriptor =
env.getMasterServices().getTableDescriptors().get(getTableName());
if (env.getMasterServices().getTableStateManager()
.isTableState(getTableName(), TableState.State.ENABLED)) {
- if (modifiedHTableDescriptor.getRegionReplication() != unmodifiedHTableDescriptor
+ if (modifiedTableDescriptor.getRegionReplication() != unmodifiedTableDescriptor
.getRegionReplication()) {
throw new IOException("REGION_REPLICATION change is not supported for enabled tables");
}
}
- // Find out whether all column families in unmodifiedHTableDescriptor also exists in
- // the modifiedHTableDescriptor. This is to determine whether we are safe to rollback.
- final Set<byte[]> oldFamilies = unmodifiedHTableDescriptor.getFamiliesKeys();
- final Set<byte[]> newFamilies = modifiedHTableDescriptor.getFamiliesKeys();
+ // Find out whether all column families in unmodifiedTableDescriptor also exists in
+ // the modifiedTableDescriptor. This is to determine whether we are safe to rollback.
+ final Set<byte[]> oldFamilies = unmodifiedTableDescriptor.getColumnFamilyNames();
+ final Set<byte[]> newFamilies = modifiedTableDescriptor.getColumnFamilyNames();
for (byte[] familyName : oldFamilies) {
if (!newFamilies.contains(familyName)) {
this.deleteColumnFamilyInModify = true;
@@ -287,7 +287,7 @@ public class ModifyTableProcedure
* @throws IOException
**/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor);
}
/**
@@ -296,10 +296,10 @@ public class ModifyTableProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor);
- // delete any new column families from the modifiedHTableDescriptor.
- deleteFromFs(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor);
+ // delete any new column families from the modifiedTableDescriptor.
+ deleteFromFs(env, modifiedTableDescriptor, unmodifiedTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);
@@ -312,18 +312,17 @@ public class ModifyTableProcedure
* @throws IOException
*/
private void deleteFromFs(final MasterProcedureEnv env,
- final HTableDescriptor oldHTableDescriptor, final HTableDescriptor newHTableDescriptor)
+ final TableDescriptor oldTableDescriptor, final TableDescriptor newTableDescriptor)
throws IOException {
- final Set<byte[]> oldFamilies = oldHTableDescriptor.getFamiliesKeys();
- final Set<byte[]> newFamilies = newHTableDescriptor.getFamiliesKeys();
+ final Set<byte[]> oldFamilies = oldTableDescriptor.getColumnFamilyNames();
+ final Set<byte[]> newFamilies = newTableDescriptor.getColumnFamilyNames();
for (byte[] familyName : oldFamilies) {
if (!newFamilies.contains(familyName)) {
MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(
env,
getTableName(),
getRegionInfoList(env),
- familyName,
- oldHTableDescriptor.getFamily(familyName).isMobEnabled());
+ familyName, oldTableDescriptor.getColumnFamily(familyName).isMobEnabled());
}
}
}
@@ -335,10 +334,10 @@ public class ModifyTableProcedure
*/
private void updateReplicaColumnsIfNeeded(
final MasterProcedureEnv env,
- final HTableDescriptor oldHTableDescriptor,
- final HTableDescriptor newHTableDescriptor) throws IOException {
- final int oldReplicaCount = oldHTableDescriptor.getRegionReplication();
- final int newReplicaCount = newHTableDescriptor.getRegionReplication();
+ final TableDescriptor oldTableDescriptor,
+ final TableDescriptor newTableDescriptor) throws IOException {
+ final int oldReplicaCount = oldTableDescriptor.getRegionReplication();
+ final int newReplicaCount = newTableDescriptor.getRegionReplication();
if (newReplicaCount < oldReplicaCount) {
Set<byte[]> tableRows = new HashSet<>();
@@ -402,10 +401,10 @@ public class ModifyTableProcedure
if (cpHost != null) {
switch (state) {
case MODIFY_TABLE_PRE_OPERATION:
- cpHost.preModifyTableAction(getTableName(), modifiedHTableDescriptor, getUser());
+ cpHost.preModifyTableAction(getTableName(), modifiedTableDescriptor, getUser());
break;
case MODIFY_TABLE_POST_OPERATION:
- cpHost.postCompletedModifyTableAction(getTableName(), modifiedHTableDescriptor,getUser());
+ cpHost.postCompletedModifyTableAction(getTableName(), modifiedTableDescriptor,getUser());
break;
default:
throw new UnsupportedOperationException(this + " unhandled state=" + state);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
index cfd9df9..4930396 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
@@ -33,12 +33,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -61,7 +61,7 @@ public class RestoreSnapshotProcedure
extends AbstractStateMachineTableProcedure<RestoreSnapshotState> {
private static final Log LOG = LogFactory.getLog(RestoreSnapshotProcedure.class);
- private HTableDescriptor modifiedHTableDescriptor;
+ private TableDescriptor modifiedTableDescriptor;
private List<HRegionInfo> regionsToRestore = null;
private List<HRegionInfo> regionsToRemove = null;
private List<HRegionInfo> regionsToAdd = null;
@@ -82,24 +82,24 @@ public class RestoreSnapshotProcedure
}
public RestoreSnapshotProcedure(final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) {
- this(env, hTableDescriptor, snapshot, false);
+ final TableDescriptor tableDescriptor, final SnapshotDescription snapshot) {
+ this(env, tableDescriptor, snapshot, false);
}
/**
* Constructor
* @param env MasterProcedureEnv
- * @param hTableDescriptor the table to operate on
+ * @param tableDescriptor the table to operate on
* @param snapshot snapshot to restore from
* @throws IOException
*/
public RestoreSnapshotProcedure(
final MasterProcedureEnv env,
- final HTableDescriptor hTableDescriptor,
+ final TableDescriptor tableDescriptor,
final SnapshotDescription snapshot,
final boolean restoreAcl) {
super(env);
// This is the new schema we are going to write out as this modification.
- this.modifiedHTableDescriptor = hTableDescriptor;
+ this.modifiedTableDescriptor = tableDescriptor;
// Snapshot information
this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
@@ -204,7 +204,7 @@ public class RestoreSnapshotProcedure
@Override
public TableName getTableName() {
- return modifiedHTableDescriptor.getTableName();
+ return modifiedTableDescriptor.getTableName();
}
@Override
@@ -236,7 +236,7 @@ public class RestoreSnapshotProcedure
MasterProcedureProtos.RestoreSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
- .setModifiedTableSchema(ProtobufUtil.convertToTableSchema(modifiedHTableDescriptor));
+ .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor));
if (regionsToRestore != null) {
for (HRegionInfo hri: regionsToRestore) {
@@ -278,8 +278,8 @@ public class RestoreSnapshotProcedure
MasterProcedureProtos.RestoreSnapshotStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo()));
snapshot = restoreSnapshotMsg.getSnapshot();
- modifiedHTableDescriptor =
- ProtobufUtil.convertToHTableDesc(restoreSnapshotMsg.getModifiedTableSchema());
+ modifiedTableDescriptor =
+ ProtobufUtil.toTableDescriptor(restoreSnapshotMsg.getModifiedTableSchema());
if (restoreSnapshotMsg.getRegionInfoForRestoreCount() == 0) {
regionsToRestore = null;
@@ -333,7 +333,7 @@ public class RestoreSnapshotProcedure
env.getMasterServices().checkTableModifiable(tableName);
// Check that we have at least 1 CF
- if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
+ if (modifiedTableDescriptor.getColumnFamilyCount() == 0) {
throw new DoNotRetryIOException("Table " + getTableName().toString() +
" should have at least one column family.");
}
@@ -363,7 +363,7 @@ public class RestoreSnapshotProcedure
* @throws IOException
**/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
- env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor);
+ env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor);
}
/**
@@ -386,7 +386,7 @@ public class RestoreSnapshotProcedure
env.getMasterServices().getConfiguration(),
fs,
manifest,
- modifiedHTableDescriptor,
+ modifiedTableDescriptor,
rootDir,
monitorException,
getMonitorStatus());
@@ -440,19 +440,19 @@ public class RestoreSnapshotProcedure
MetaTableAccessor.addRegionsToMeta(
conn,
regionsToAdd,
- modifiedHTableDescriptor.getRegionReplication());
+ modifiedTableDescriptor.getRegionReplication());
}
if (regionsToRestore != null) {
MetaTableAccessor.overwriteRegions(
conn,
regionsToRestore,
- modifiedHTableDescriptor.getRegionReplication());
+ modifiedTableDescriptor.getRegionReplication());
}
RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
new RestoreSnapshotHelper.RestoreMetaChanges(
- modifiedHTableDescriptor, parentsToChildrenPairMap);
+ modifiedTableDescriptor, parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(conn, regionsToAdd);
// At this point the restore is complete.
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
index e7f5ead..506c67d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
@@ -28,11 +28,11 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.HBaseException;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -48,7 +48,7 @@ public class TruncateTableProcedure
private boolean preserveSplits;
private List<HRegionInfo> regions;
- private HTableDescriptor hTableDescriptor;
+ private TableDescriptor tableDescriptor;
private TableName tableName;
public TruncateTableProcedure() {
@@ -95,7 +95,7 @@ public class TruncateTableProcedure
setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META);
break;
case TRUNCATE_TABLE_REMOVE_FROM_META:
- hTableDescriptor = env.getMasterServices().getTableDescriptors()
+ tableDescriptor = env.getMasterServices().getTableDescriptors()
.get(tableName);
DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
DeleteTableProcedure.deleteAssignmentState(env, getTableName());
@@ -105,26 +105,26 @@ public class TruncateTableProcedure
DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
if (!preserveSplits) {
// if we are not preserving splits, generate a new single region
- regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null));
+ regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(tableDescriptor, null));
} else {
regions = recreateRegionInfo(regions);
}
setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT);
break;
case TRUNCATE_TABLE_CREATE_FS_LAYOUT:
- regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, regions);
+ regions = CreateTableProcedure.createFsLayout(env, tableDescriptor, regions);
CreateTableProcedure.updateTableDescCache(env, getTableName());
setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META);
break;
case TRUNCATE_TABLE_ADD_TO_META:
- regions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, regions);
+ regions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, regions);
setNextState(TruncateTableState.TRUNCATE_TABLE_ASSIGN_REGIONS);
break;
case TRUNCATE_TABLE_ASSIGN_REGIONS:
CreateTableProcedure.setEnablingState(env, getTableName());
addChildProcedure(env.getAssignmentManager().createAssignProcedures(regions));
setNextState(TruncateTableState.TRUNCATE_TABLE_POST_OPERATION);
- hTableDescriptor = null;
+ tableDescriptor = null;
regions = null;
break;
case TRUNCATE_TABLE_POST_OPERATION:
@@ -216,8 +216,8 @@ public class TruncateTableProcedure
MasterProcedureProtos.TruncateTableStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setPreserveSplits(preserveSplits);
- if (hTableDescriptor != null) {
- state.setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
+ if (tableDescriptor != null) {
+ state.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
} else {
state.setTableName(ProtobufUtil.toProtoTableName(tableName));
}
@@ -237,8 +237,8 @@ public class TruncateTableProcedure
MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo()));
if (state.hasTableSchema()) {
- hTableDescriptor = ProtobufUtil.convertToHTableDesc(state.getTableSchema());
- tableName = hTableDescriptor.getTableName();
+ tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema());
+ tableName = tableDescriptor.getTableName();
} else {
tableName = ProtobufUtil.toTableName(state.getTableName());
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index 0448f92..e8131af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -30,9 +30,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -137,16 +137,16 @@ public final class MasterSnapshotVerifier {
* @param manifest snapshot manifest to inspect
*/
private void verifyTableInfo(final SnapshotManifest manifest) throws IOException {
- HTableDescriptor htd = manifest.getTableDescriptor();
+ TableDescriptor htd = manifest.getTableDescriptor();
if (htd == null) {
throw new CorruptedSnapshotException("Missing Table Descriptor",
ProtobufUtil.createSnapshotDesc(snapshot));
}
- if (!htd.getNameAsString().equals(snapshot.getTable())) {
+ if (!htd.getTableName().getNameAsString().equals(snapshot.getTable())) {
throw new CorruptedSnapshotException(
"Invalid Table Descriptor. Expected " + snapshot.getTable() + " name, got "
- + htd.getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot));
+ + htd.getTableName().getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot));
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index b81c7db..b503d61 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -38,12 +38,13 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -555,7 +556,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
cleanupSentinels();
// check to see if the table exists
- HTableDescriptor desc = null;
+ TableDescriptor desc = null;
try {
desc = master.getTableDescriptors().get(
TableName.valueOf(snapshot.getTable()));
@@ -679,10 +680,10 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @throws IOException
*/
private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
- final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc,
+ final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
final NonceKey nonceKey, final boolean restoreAcl) throws IOException {
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
- HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc);
+ TableDescriptor htd = TableDescriptorBuilder.copy(tableName, snapshotTableDesc);
if (cpHost != null) {
cpHost.preCloneSnapshot(reqSnapshot, htd);
}
@@ -707,14 +708,14 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* The operation will fail if the destination table has a snapshot or restore in progress.
*
* @param snapshot Snapshot Descriptor
- * @param hTableDescriptor Table Descriptor of the table to create
+ * @param tableDescriptor Table Descriptor of the table to create
* @param nonceKey unique identifier to prevent duplicated RPC
* @return procId the ID of the clone snapshot procedure
*/
synchronized long cloneSnapshot(final SnapshotDescription snapshot,
- final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
+ final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
throws HBaseSnapshotException {
- TableName tableName = hTableDescriptor.getTableName();
+ TableName tableName = tableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table
if (isTakingSnapshot(tableName)) {
@@ -729,7 +730,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(
new CloneSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
- hTableDescriptor, snapshot, restoreAcl),
+ tableDescriptor, snapshot, restoreAcl),
nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;
@@ -765,7 +766,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs,
snapshotDir, snapshot);
- HTableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
+ TableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
TableName tableName = TableName.valueOf(reqSnapshot.getTable());
// stop tracking "abandoned" handlers
@@ -799,7 +800,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @throws IOException
*/
private long restoreSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
- final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc,
+ final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
final NonceKey nonceKey, final boolean restoreAcl) throws IOException {
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
@@ -836,15 +837,15 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* Restore the specified snapshot. The restore will fail if the destination table has a snapshot
* or restore in progress.
* @param snapshot Snapshot Descriptor
- * @param hTableDescriptor Table Descriptor
+ * @param tableDescriptor Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
* @param restoreAcl true to restore acl of snapshot
* @return procId the ID of the restore snapshot procedure
*/
private synchronized long restoreSnapshot(final SnapshotDescription snapshot,
- final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
+ final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
throws HBaseSnapshotException {
- final TableName tableName = hTableDescriptor.getTableName();
+ final TableName tableName = tableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table
if (isTakingSnapshot(tableName)) {
@@ -859,7 +860,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(
new RestoreSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
- hTableDescriptor, snapshot, restoreAcl),
+ tableDescriptor, snapshot, restoreAcl),
nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;
[5/8] hbase git commit: HBASE-18503 Change ***Util and Master to use
TableDescriptor and ColumnFamilyDescriptor
Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 18b1114..28d2a24 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
@@ -466,10 +467,20 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* @return META table descriptor
+ * @deprecated since 2.0 version and will be removed in 3.0 version.
+ * use {@link #getMetaDescriptor()}
*/
+ @Deprecated
public HTableDescriptor getMetaTableDescriptor() {
+ return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
+ }
+
+ /**
+ * @return META table descriptor
+ */
+ public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
try {
- return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
+ return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
} catch (IOException e) {
throw new RuntimeException("Unable to create META table descriptor", e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
index 7457f43..95997f2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
@@ -25,10 +25,13 @@ import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.junit.*;
+import org.junit.Rule;
+import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
@@ -46,9 +49,9 @@ public class TestFSTableDescriptorForceCreation {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
- assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
+ assertTrue("Should create new table descriptor",
+ fstd.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(), false));
}
@Test
@@ -59,7 +62,7 @@ public class TestFSTableDescriptorForceCreation {
// Cleanup old tests if any detritus laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
fstd.add(htd);
assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
}
@@ -71,7 +74,7 @@ public class TestFSTableDescriptorForceCreation {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
fstd.createTableDescriptor(htd, false);
assertTrue("Should create new table descriptor",
fstd.createTableDescriptor(htd, true));
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
index 20cf8bb..d85326f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
@@ -24,6 +24,8 @@ import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -152,22 +154,22 @@ public class TestHColumnDescriptorDefaultVersions {
Admin admin = TEST_UTIL.getAdmin();
// Verify descriptor from master
- HTableDescriptor htd = admin.getTableDescriptor(tableName);
- HColumnDescriptor[] hcds = htd.getColumnFamilies();
+ TableDescriptor htd = admin.listTableDescriptor(tableName);
+ ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
- HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
+ TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
hcds = td.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
}
- private void verifyHColumnDescriptor(int expected, final HColumnDescriptor[] hcds,
+ private void verifyHColumnDescriptor(int expected, final ColumnFamilyDescriptor[] hcds,
final TableName tableName,
final byte[]... families) {
- for (HColumnDescriptor hcd : hcds) {
+ for (ColumnFamilyDescriptor hcd : hcds) {
assertEquals(expected, hcd.getMaxVersions());
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index d17c782..121647e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -38,14 +38,11 @@ import java.util.regex.Pattern;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -751,7 +748,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
- HTableDescriptor td =
+ TableDescriptor td =
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td, tableName, families);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 27c9a5f..9f4ce35 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -23,15 +23,14 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -75,7 +74,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
@Override
public long createTable(
- final HTableDescriptor desc,
+ final TableDescriptor desc,
final byte[][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException {
@@ -84,7 +83,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
- public long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException {
+ public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
return -1;
}
@@ -267,7 +266,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
@Override
public long modifyTable(
final TableName tableName,
- final HTableDescriptor descriptor,
+ final TableDescriptor descriptor,
final long nonceGroup,
final long nonce) throws IOException {
return -1;
@@ -290,13 +289,13 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
- public long addColumn(final TableName tableName, final HColumnDescriptor columnDescriptor,
+ public long addColumn(final TableName tableName, final ColumnFamilyDescriptor columnDescriptor,
final long nonceGroup, final long nonce) throws IOException {
return -1;
}
@Override
- public long modifyColumn(final TableName tableName, final HColumnDescriptor descriptor,
+ public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException {
return -1;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 48386a6..9101d5e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -24,19 +24,19 @@ import java.util.NavigableMap;
import java.util.SortedSet;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
@@ -300,36 +300,36 @@ public class MockMasterServices extends MockNoopMasterServices {
public TableDescriptors getTableDescriptors() {
return new TableDescriptors() {
@Override
- public HTableDescriptor remove(TableName tablename) throws IOException {
+ public TableDescriptor remove(TableName tablename) throws IOException {
// noop
return null;
}
@Override
- public Map<String, HTableDescriptor> getAll() throws IOException {
+ public Map<String, TableDescriptor> getAll() throws IOException {
// noop
return null;
}
- @Override public Map<String, HTableDescriptor> getAllDescriptors() throws IOException {
+ @Override public Map<String, TableDescriptor> getAllDescriptors() throws IOException {
// noop
return null;
}
@Override
- public HTableDescriptor get(TableName tablename) throws IOException {
- HTableDescriptor htd = new HTableDescriptor(tablename);
- htd.addFamily(new HColumnDescriptor(DEFAULT_COLUMN_FAMILY_NAME));
- return htd;
+ public TableDescriptor get(TableName tablename) throws IOException {
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tablename);
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(DEFAULT_COLUMN_FAMILY_NAME));
+ return builder.build();
}
@Override
- public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
+ public Map<String, TableDescriptor> getByNamespace(String name) throws IOException {
return null;
}
@Override
- public void add(HTableDescriptor htd) throws IOException {
+ public void add(TableDescriptor htd) throws IOException {
// noop
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 6dfcad1..226f9f1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -33,20 +33,23 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterMetaBootstrap;
@@ -61,6 +64,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+@InterfaceAudience.Private
public class MasterProcedureTestingUtility {
private static final Log LOG = LogFactory.getLog(MasterProcedureTestingUtility.class);
@@ -136,17 +140,17 @@ public class MasterProcedureTestingUtility {
// ==========================================================================
// Table Helpers
// ==========================================================================
- public static HTableDescriptor createHTD(final TableName tableName, final String... family) {
- HTableDescriptor htd = new HTableDescriptor(tableName);
+ public static TableDescriptor createHTD(final TableName tableName, final String... family) {
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
for (int i = 0; i < family.length; ++i) {
- htd.addFamily(new HColumnDescriptor(family[i]));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family[i]));
}
- return htd;
+ return builder.build();
}
public static HRegionInfo[] createTable(final ProcedureExecutor<MasterProcedureEnv> procExec,
final TableName tableName, final byte[][] splitKeys, String... family) throws IOException {
- HTableDescriptor htd = createHTD(tableName, family);
+ TableDescriptor htd = createHTD(tableName, family);
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = ProcedureTestingUtility.submitAndWait(procExec,
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
@@ -194,12 +198,12 @@ public class MasterProcedureTestingUtility {
assertEquals(regions.length, countMetaRegions(master, tableName));
// check htd
- HTableDescriptor htd = master.getTableDescriptors().get(tableName);
+ TableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue("table descriptor not found", htd != null);
for (int i = 0; i < family.length; ++i) {
- assertTrue("family not found " + family[i], htd.getFamily(Bytes.toBytes(family[i])) != null);
+ assertTrue("family not found " + family[i], htd.getColumnFamily(Bytes.toBytes(family[i])) != null);
}
- assertEquals(family.length, htd.getFamilies().size());
+ assertEquals(family.length, htd.getColumnFamilyCount());
}
public static void validateTableDeletion(
@@ -267,18 +271,18 @@ public class MasterProcedureTestingUtility {
public static void validateColumnFamilyAddition(final HMaster master, final TableName tableName,
final String family) throws IOException {
- HTableDescriptor htd = master.getTableDescriptors().get(tableName);
+ TableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
- assertTrue(htd.hasFamily(family.getBytes()));
+ assertTrue(htd.hasColumnFamily(family.getBytes()));
}
public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
final String family) throws IOException {
// verify htd
- HTableDescriptor htd = master.getTableDescriptors().get(tableName);
+ TableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
- assertFalse(htd.hasFamily(family.getBytes()));
+ assertFalse(htd.hasColumnFamily(family.getBytes()));
// verify fs
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
@@ -290,13 +294,13 @@ public class MasterProcedureTestingUtility {
}
public static void validateColumnFamilyModification(final HMaster master,
- final TableName tableName, final String family, HColumnDescriptor columnDescriptor)
+ final TableName tableName, final String family, ColumnFamilyDescriptor columnDescriptor)
throws IOException {
- HTableDescriptor htd = master.getTableDescriptors().get(tableName);
+ TableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
- HColumnDescriptor hcfd = htd.getFamily(family.getBytes());
- assertTrue(hcfd.equals(columnDescriptor));
+ ColumnFamilyDescriptor hcfd = htd.getColumnFamily(family.getBytes());
+ assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(hcfd, columnDescriptor));
}
public static void loadData(final Connection connection, final TableName tableName,
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index 177d862..eda7fcd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -22,9 +22,11 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -76,10 +78,11 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final TableName tableName = TableName.valueOf(name.getMethodName());
// create table with 0 families will fail
- final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName);
+ final TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName));
// disable sanity check
- htd.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString());
+ builder.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString());
+ TableDescriptor htd = builder.build();
final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
long procId =
@@ -96,7 +99,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
public void testCreateExisting() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
- final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
+ final TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
// create the table
@@ -125,7 +128,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
- HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
@@ -138,18 +141,21 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
@Test(timeout=90000)
public void testRollbackAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
- testRollbackAndDoubleExecution(MasterProcedureTestingUtility.createHTD(tableName, F1, F2));
+ testRollbackAndDoubleExecution(TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName, F1, F2)));
}
@Test(timeout=90000)
public void testRollbackAndDoubleExecutionOnMobTable() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
- HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, F1, F2);
- htd.getFamily(Bytes.toBytes(F1)).setMobEnabled(true);
- testRollbackAndDoubleExecution(htd);
+ TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, F1, F2);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd)
+ .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(Bytes.toBytes(F1)))
+ .setMobEnabled(true)
+ .build());
+ testRollbackAndDoubleExecution(builder);
}
- private void testRollbackAndDoubleExecution(HTableDescriptor htd) throws Exception {
+ private void testRollbackAndDoubleExecution(TableDescriptorBuilder builder) throws Exception {
// create the table
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
@@ -158,7 +164,8 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
final byte[][] splitKeys = new byte[][] {
Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
};
- htd.setRegionReplication(3);
+ builder.setRegionReplication(3);
+ TableDescriptor htd = builder.build();
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
@@ -181,9 +188,9 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
splitKeys[i] = Bytes.toBytes(String.format("%08d", i));
}
- final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(
+ final TableDescriptor htd = MasterProcedureTestingUtility.createHTD(
TableName.valueOf("TestMRegions"), F1, F2);
- UTIL.getHBaseAdmin().createTableAsync(htd, splitKeys)
+ UTIL.getAdmin().createTableAsync(htd, splitKeys)
.get(10, java.util.concurrent.TimeUnit.HOURS);
LOG.info("TABLE CREATED");
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
index d2df2bf..db5eafa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -112,7 +112,7 @@ public class TestMasterFailoverWithProcedures {
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
- HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
index a75cbc1..68013fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@@ -135,7 +135,7 @@ public class TestMasterProcedureWalLease {
backupStore3.recoverLease();
// Try to trigger a command on the master (WAL lease expired on the active one)
- HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf(name.getMethodName()), "f");
+ TableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf(name.getMethodName()), "f");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
LOG.debug("submit proc");
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
index 77e1fc9..9d60bd8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -274,14 +275,14 @@ public class TestTableDescriptorModificationFromClient {
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
- HTableDescriptor td =
+ TableDescriptor td =
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td, tableName, families);
}
- private void verifyTableDescriptor(final HTableDescriptor htd,
+ private void verifyTableDescriptor(final TableDescriptor htd,
final TableName tableName, final byte[]... families) {
- Set<byte[]> htdFamilies = htd.getFamiliesKeys();
+ Set<byte[]> htdFamilies = htd.getColumnFamilyNames();
assertEquals(tableName, htd.getTableName());
assertEquals(families.length, htdFamilies.size());
for (byte[] familyName: families) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
index f93ce98..2fe8085 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -514,7 +515,7 @@ public class TestPartitionedMobCompactor {
CacheConfig cacheConfig = null;
MyPartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName,
- HColumnDescriptor column, ExecutorService pool, final int delPartitionSize,
+ ColumnFamilyDescriptor column, ExecutorService pool, final int delPartitionSize,
final CacheConfig cacheConf, final int PartitionsIncludeDelFiles)
throws IOException {
super(conf, fs, tableName, column, pool);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index 570d2d8..6b01256 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -79,10 +80,11 @@ public class TestGetClosestAtOrBefore {
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = UTIL.getDataTestDirOnTestFS();
// Up flush size else we bind up when we use default catalog flush of 16k.
- UTIL.getMetaTableDescriptor().setMemStoreFlushSize(64 * 1024 * 1024);
+ TableDescriptorBuilder metaBuilder = UTIL.getMetaTableDescriptorBuilder()
+ .setMemStoreFlushSize(64 * 1024 * 1024);
Region mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO,
- rootdir, this.conf, UTIL.getMetaTableDescriptor());
+ rootdir, this.conf, metaBuilder.build());
try {
// Write rows for three tables 'A', 'B', and 'C'.
for (char c = 'A'; c < 'D'; c++) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index 11c985d..e40bb43 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -39,22 +39,22 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterRpcServices;
@@ -207,7 +207,7 @@ public class TestRegionMergeTransactionOnCluster {
List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
.getTableRegionsAndLocations(MASTER.getConnection(), tableName);
HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
- HTableDescriptor tableDescriptor = MASTER.getTableDescriptors().get(
+ TableDescriptor tableDescriptor = MASTER.getTableDescriptors().get(
tableName);
Result mergedRegionResult = MetaTableAccessor.getRegionResult(
MASTER.getConnection(), mergedRegionInfo.getRegionName());
@@ -231,11 +231,11 @@ public class TestRegionMergeTransactionOnCluster {
assertTrue(fs.exists(regionAdir));
assertTrue(fs.exists(regionBdir));
- HColumnDescriptor[] columnFamilies = tableDescriptor.getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies = tableDescriptor.getColumnFamilies();
HRegionFileSystem hrfs = new HRegionFileSystem(
TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo);
int count = 0;
- for(HColumnDescriptor colFamily : columnFamilies) {
+ for(ColumnFamilyDescriptor colFamily : columnFamilies) {
count += hrfs.getStoreFiles(colFamily.getName()).size();
}
ADMIN.compactRegion(mergedRegionInfo.getRegionName());
@@ -244,7 +244,7 @@ public class TestRegionMergeTransactionOnCluster {
long timeout = System.currentTimeMillis() + waitTime;
int newcount = 0;
while (System.currentTimeMillis() < timeout) {
- for(HColumnDescriptor colFamily : columnFamilies) {
+ for(ColumnFamilyDescriptor colFamily : columnFamilies) {
newcount += hrfs.getStoreFiles(colFamily.getName()).size();
}
if(newcount > count) {
@@ -263,7 +263,7 @@ public class TestRegionMergeTransactionOnCluster {
}
while (System.currentTimeMillis() < timeout) {
int newcount1 = 0;
- for(HColumnDescriptor colFamily : columnFamilies) {
+ for(ColumnFamilyDescriptor colFamily : columnFamilies) {
newcount1 += hrfs.getStoreFiles(colFamily.getName()).size();
}
if(newcount1 <= 1) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index 89598ad..3b66a1d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -26,13 +26,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -261,7 +261,7 @@ public class TestRegionServerNoMaster {
hri.getEncodedNameAsBytes()));
// Let's start the open handler
- HTableDescriptor htd = getRS().tableDescriptors.get(hri.getTable());
+ TableDescriptor htd = getRS().tableDescriptors.get(hri.getTable());
getRS().service.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, -1));
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
index 18290f5..126c4e4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
@@ -36,20 +36,20 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.Waiter.Predicate;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -652,31 +652,30 @@ public class SecureTestUtil {
public static Table createTable(HBaseTestingUtility testUtil, TableName tableName,
byte[][] families) throws Exception {
- HTableDescriptor htd = new HTableDescriptor(tableName);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
for (byte[] family : families) {
- HColumnDescriptor hcd = new HColumnDescriptor(family);
- htd.addFamily(hcd);
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
}
- createTable(testUtil, testUtil.getAdmin(), htd);
- return testUtil.getConnection().getTable(htd.getTableName());
+ createTable(testUtil, testUtil.getAdmin(), builder.build());
+ return testUtil.getConnection().getTable(tableName);
}
- public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd)
+ public static void createTable(HBaseTestingUtility testUtil, TableDescriptor htd)
throws Exception {
createTable(testUtil, testUtil.getAdmin(), htd);
}
- public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd,
+ public static void createTable(HBaseTestingUtility testUtil, TableDescriptor htd,
byte[][] splitKeys) throws Exception {
createTable(testUtil, testUtil.getAdmin(), htd, splitKeys);
}
- public static void createTable(HBaseTestingUtility testUtil, Admin admin, HTableDescriptor htd)
+ public static void createTable(HBaseTestingUtility testUtil, Admin admin, TableDescriptor htd)
throws Exception {
createTable(testUtil, admin, htd, null);
}
- public static void createTable(HBaseTestingUtility testUtil, Admin admin, HTableDescriptor htd,
+ public static void createTable(HBaseTestingUtility testUtil, Admin admin, TableDescriptor htd,
byte[][] splitKeys) throws Exception {
// NOTE: We need a latch because admin is not sync,
// so the postOp coprocessor method may be called after the admin operation returned.
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
index 3e1abb9..1a33f13 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
@@ -24,18 +24,20 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Assert;
public class MobSnapshotTestingUtils {
@@ -60,15 +62,17 @@ public class MobSnapshotTestingUtils {
private static void createMobTable(final HBaseTestingUtility util,
final TableName tableName, final byte[][] splitKeys, int regionReplication,
final byte[]... families) throws IOException, InterruptedException {
- HTableDescriptor htd = new HTableDescriptor(tableName);
- htd.setRegionReplication(regionReplication);
+ TableDescriptorBuilder builder
+ = TableDescriptorBuilder.newBuilder(tableName)
+ .setRegionReplication(regionReplication);
for (byte[] family : families) {
- HColumnDescriptor hcd = new HColumnDescriptor(family);
- hcd.setMobEnabled(true);
- hcd.setMobThreshold(0L);
- htd.addFamily(hcd);
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder
+ .newBuilder(family)
+ .setMobEnabled(true)
+ .setMobThreshold(0L)
+ .build());
}
- util.getAdmin().createTable(htd, splitKeys);
+ util.getAdmin().createTable(builder.build(), splitKeys);
SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
assertEquals((splitKeys.length + 1) * regionReplication, util
.getAdmin().getTableRegions(tableName).size());
@@ -80,29 +84,29 @@ public class MobSnapshotTestingUtils {
* @param util
* @param tableName
* @param families
- * @return An HTable instance for the created table.
+ * @return An Table instance for the created table.
* @throws IOException
*/
public static Table createMobTable(final HBaseTestingUtility util,
final TableName tableName, final byte[]... families) throws IOException {
- HTableDescriptor htd = new HTableDescriptor(tableName);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
for (byte[] family : families) {
- HColumnDescriptor hcd = new HColumnDescriptor(family);
// Disable blooms (they are on by default as of 0.95) but we disable them
// here because
// tests have hard coded counts of what to expect in block cache, etc.,
// and blooms being
// on is interfering.
- hcd.setBloomFilterType(BloomType.NONE);
- hcd.setMobEnabled(true);
- hcd.setMobThreshold(0L);
- htd.addFamily(hcd);
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
+ .setBloomFilterType(BloomType.NONE)
+ .setMobEnabled(true)
+ .setMobThreshold(0L)
+ .build());
}
- util.getAdmin().createTable(htd);
+ util.getAdmin().createTable(builder.build());
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait
// until they are assigned
- util.waitUntilAllRegionsAssigned(htd.getTableName());
- return ConnectionFactory.createConnection(util.getConfiguration()).getTable(htd.getTableName());
+ util.waitUntilAllRegionsAssigned(tableName);
+ return ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName);
}
/**
@@ -146,13 +150,14 @@ public class MobSnapshotTestingUtils {
}
@Override
- public HTableDescriptor createHtd(final String tableName) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
- HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
- hcd.setMobEnabled(true);
- hcd.setMobThreshold(0L);
- htd.addFamily(hcd);
- return htd;
+ public TableDescriptor createHtd(final String tableName) {
+ return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder
+ .newBuilder(Bytes.toBytes(TEST_FAMILY))
+ .setMobEnabled(true)
+ .setMobThreshold(0L)
+ .build())
+ .build();
}
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index dab55f6..71dac9c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -40,36 +40,35 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.client.SnapshotDescription;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSVisitor;
@@ -492,7 +491,7 @@ public final class SnapshotTestingUtils {
public static class SnapshotBuilder {
private final RegionData[] tableRegions;
private final SnapshotProtos.SnapshotDescription desc;
- private final HTableDescriptor htd;
+ private final TableDescriptor htd;
private final Configuration conf;
private final FileSystem fs;
private final Path rootDir;
@@ -500,7 +499,7 @@ public final class SnapshotTestingUtils {
private int snapshotted = 0;
public SnapshotBuilder(final Configuration conf, final FileSystem fs,
- final Path rootDir, final HTableDescriptor htd,
+ final Path rootDir, final TableDescriptor htd,
final SnapshotProtos.SnapshotDescription desc, final RegionData[] tableRegions)
throws IOException {
this.fs = fs;
@@ -514,7 +513,7 @@ public final class SnapshotTestingUtils {
.createTableDescriptorForTableDirectory(snapshotDir, htd, false);
}
- public HTableDescriptor getTableDescriptor() {
+ public TableDescriptor getTableDescriptor() {
return this.htd;
}
@@ -680,11 +679,11 @@ public final class SnapshotTestingUtils {
private SnapshotBuilder createSnapshot(final String snapshotName, final String tableName,
final int numRegions, final int version) throws IOException {
- HTableDescriptor htd = createHtd(tableName);
+ TableDescriptor htd = createHtd(tableName);
RegionData[] regions = createTable(htd, numRegions);
SnapshotProtos.SnapshotDescription desc = SnapshotProtos.SnapshotDescription.newBuilder()
- .setTable(htd.getNameAsString())
+ .setTable(htd.getTableName().getNameAsString())
.setName(snapshotName)
.setVersion(version)
.build();
@@ -694,13 +693,13 @@ public final class SnapshotTestingUtils {
return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
}
- public HTableDescriptor createHtd(final String tableName) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
- htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
- return htd;
+ public TableDescriptor createHtd(final String tableName) {
+ return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY))
+ .build();
}
- private RegionData[] createTable(final HTableDescriptor htd, final int nregions)
+ private RegionData[] createTable(final TableDescriptor htd, final int nregions)
throws IOException {
Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false);
@@ -766,14 +765,15 @@ public final class SnapshotTestingUtils {
public static void createTable(final HBaseTestingUtility util, final TableName tableName,
int regionReplication, int nRegions, final byte[]... families)
throws IOException, InterruptedException {
- HTableDescriptor htd = new HTableDescriptor(tableName);
- htd.setRegionReplication(regionReplication);
+ TableDescriptorBuilder builder
+ = TableDescriptorBuilder
+ .newBuilder(tableName)
+ .setRegionReplication(regionReplication);
for (byte[] family : families) {
- HColumnDescriptor hcd = new HColumnDescriptor(family);
- htd.addFamily(hcd);
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
}
byte[][] splitKeys = getSplitKeys(nRegions);
- util.createTable(htd, splitKeys);
+ util.createTable(builder.build(), splitKeys);
assertEquals((splitKeys.length + 1) * regionReplication,
util.getAdmin().getTableRegions(tableName).size());
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
index 4b684e3..b7110b2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -104,11 +104,11 @@ public class TestRestoreSnapshotHelper {
builder.addRegionV2();
builder.addRegionV1();
Path snapshotDir = builder.commit();
- HTableDescriptor htd = builder.getTableDescriptor();
+ TableDescriptor htd = builder.getTableDescriptor();
SnapshotDescription desc = builder.getSnapshotDescription();
// Test clone a snapshot
- HTableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
+ TableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
testRestore(snapshotDir, desc, htdClone);
verifyRestore(rootDir, htd, htdClone);
@@ -118,13 +118,13 @@ public class TestRestoreSnapshotHelper {
.setTable("testtb-clone")
.build();
Path cloneDir = FSUtils.getTableDir(rootDir, htdClone.getTableName());
- HTableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
+ TableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
testRestore(cloneDir, cloneDesc, htdClone2);
verifyRestore(rootDir, htd, htdClone2);
}
- private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
- final HTableDescriptor htdClone) throws IOException {
+ private void verifyRestore(final Path rootDir, final TableDescriptor sourceHtd,
+ final TableDescriptor htdClone) throws IOException {
List<String> files = SnapshotTestingUtils.listHFileNames(fs,
FSUtils.getTableDir(rootDir, htdClone.getTableName()));
assertEquals(12, files.size());
@@ -148,7 +148,7 @@ public class TestRestoreSnapshotHelper {
* @param htdClone The HTableDescriptor of the table to restore/clone.
*/
private void testRestore(final Path snapshotDir, final SnapshotDescription sd,
- final HTableDescriptor htdClone) throws IOException {
+ final TableDescriptor htdClone) throws IOException {
LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
@@ -164,7 +164,7 @@ public class TestRestoreSnapshotHelper {
* Initialize the restore helper, based on the snapshot and table information provided.
*/
private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir,
- final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
+ final SnapshotDescription sd, final TableDescriptor htdClone) throws IOException {
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
MonitoredTask status = Mockito.mock(MonitoredTask.class);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
index 0ee28d1..8ba4262 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java
@@ -28,9 +28,9 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
@@ -129,7 +129,7 @@ public class TestSnapshotManifest {
SnapshotRegionManifest.Builder dataRegionManifestBuilder =
SnapshotRegionManifest.newBuilder();
- for (HColumnDescriptor hcd: builder.getTableDescriptor().getFamilies()) {
+ for (ColumnFamilyDescriptor hcd: builder.getTableDescriptor().getColumnFamilies()) {
SnapshotRegionManifest.FamilyFiles.Builder family =
SnapshotRegionManifest.FamilyFiles.newBuilder();
family.setFamilyName(UnsafeByteOperations.unsafeWrap(hcd.getName()));
@@ -150,7 +150,7 @@ public class TestSnapshotManifest {
}
dataManifestBuilder
- .setTableSchema(ProtobufUtil.convertToTableSchema(builder.getTableDescriptor()));
+ .setTableSchema(ProtobufUtil.toTableSchema(builder.getTableDescriptor()));
SnapshotDataManifest dataManifest = dataManifestBuilder.build();
return writeDataManifest(dataManifest);
@@ -163,7 +163,7 @@ public class TestSnapshotManifest {
SnapshotRegionManifest.Builder dataRegionManifestBuilder = SnapshotRegionManifest.newBuilder();
dataRegionManifestBuilder.setRegionInfo(HRegionInfo.convert(regionInfo));
- for (HColumnDescriptor hcd: builder.getTableDescriptor().getFamilies()) {
+ for (ColumnFamilyDescriptor hcd: builder.getTableDescriptor().getColumnFamilies()) {
SnapshotRegionManifest.FamilyFiles.Builder family =
SnapshotRegionManifest.FamilyFiles.newBuilder();
family.setFamilyName(UnsafeByteOperations.unsafeWrap(hcd.getName()));
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index 8337eb0..30a7cd6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -40,11 +40,12 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -78,7 +79,7 @@ public class TestFSTableDescriptors {
@Test
public void testCreateAndUpdate() throws IOException {
Path testdir = UTIL.getDataTestDir(name.getMethodName());
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
assertTrue(fstd.createTableDescriptor(htd));
@@ -98,7 +99,7 @@ public class TestFSTableDescriptors {
@Test
public void testSequenceIdAdvancesOnTableInfo() throws IOException {
Path testdir = UTIL.getDataTestDir(name.getMethodName());
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
Path p0 = fstd.updateTableDescriptor(htd);
@@ -118,7 +119,7 @@ public class TestFSTableDescriptors {
assertTrue(!fs.exists(p2));
int i3 = FSTableDescriptors.getTableInfoSequenceId(p3);
assertTrue(i3 == i2 + 1);
- HTableDescriptor descriptor = fstd.get(htd.getTableName());
+ TableDescriptor descriptor = fstd.get(htd.getTableName());
assertEquals(descriptor, htd);
}
@@ -161,7 +162,7 @@ public class TestFSTableDescriptors {
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
htds.add(htd);
assertNotNull(htds.remove(htd.getTableName()));
assertNull(htds.remove(htd.getTableName()));
@@ -170,11 +171,11 @@ public class TestFSTableDescriptors {
@Test public void testReadingHTDFromFS() throws IOException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
Path rootdir = UTIL.getDataTestDir(name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
fstd.createTableDescriptor(htd);
- HTableDescriptor td2 =
+ TableDescriptor td2 =
FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName());
assertTrue(htd.equals(td2));
}
@@ -184,25 +185,25 @@ public class TestFSTableDescriptors {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = UTIL.getDataTestDir(name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
Path descriptorFile = fstd.updateTableDescriptor(htd);
try (FSDataOutputStream out = fs.create(descriptorFile, true)) {
- out.write(htd.toByteArray());
+ out.write(TableDescriptorBuilder.toByteArray(htd));
}
FSTableDescriptors fstd2 = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- HTableDescriptor td2 = fstd2.get(htd.getTableName());
+ TableDescriptor td2 = fstd2.get(htd.getTableName());
assertEquals(htd, td2);
FileStatus descriptorFile2 =
FSTableDescriptors.getTableInfoPath(fs, fstd2.getTableDir(htd.getTableName()));
- byte[] buffer = htd.toByteArray();
+ byte[] buffer = TableDescriptorBuilder.toByteArray(htd);
try (FSDataInputStream in = fs.open(descriptorFile2.getPath())) {
in.readFully(buffer);
}
- HTableDescriptor td3 = HTableDescriptor.parseFrom(buffer);
+ TableDescriptor td3 = TableDescriptorBuilder.parseFrom(buffer);
assertEquals(htd, td3);
}
- @Test public void testHTableDescriptors()
+ @Test public void testTableDescriptors()
throws IOException, InterruptedException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@@ -210,7 +211,7 @@ public class TestFSTableDescriptors {
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir) {
@Override
- public HTableDescriptor get(TableName tablename)
+ public TableDescriptor get(TableName tablename)
throws TableExistsException, FileNotFoundException, IOException {
LOG.info(tablename + ", cachehits=" + this.cachehits);
return super.get(tablename);
@@ -219,9 +220,7 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
- HTableDescriptor htd = new HTableDescriptor(
- new HTableDescriptor(TableName.valueOf(name + i)));
- htds.createTableDescriptor(htd);
+ htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
}
for (int i = 0; i < count; i++) {
@@ -232,9 +231,9 @@ public class TestFSTableDescriptors {
}
// Update the table infos
for (int i = 0; i < count; i++) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
- htd.addFamily(new HColumnDescriptor("" + i));
- htds.updateTableDescriptor(htd);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
+ htds.updateTableDescriptor(builder.build());
}
// Wait a while so mod time we write is for sure different.
Thread.sleep(100);
@@ -250,7 +249,7 @@ public class TestFSTableDescriptors {
}
@Test
- public void testHTableDescriptorsNoCache()
+ public void testTableDescriptorsNoCache()
throws IOException, InterruptedException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@@ -261,8 +260,7 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
- htds.createTableDescriptor(htd);
+ htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
}
for (int i = 0; i < 2 * count; i++) {
@@ -270,14 +268,14 @@ public class TestFSTableDescriptors {
}
// Update the table infos
for (int i = 0; i < count; i++) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
- htd.addFamily(new HColumnDescriptor("" + i));
- htds.updateTableDescriptor(htd);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
+ htds.updateTableDescriptor(builder.build());
}
for (int i = 0; i < count; i++) {
assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i)));
assertTrue("Column Family " + i + " missing",
- htds.get(TableName.valueOf(name + i)).hasFamily(Bytes.toBytes("" + i)));
+ htds.get(TableName.valueOf(name + i)).hasColumnFamily(Bytes.toBytes("" + i)));
}
assertEquals(count * 4, htds.invocations);
assertEquals("expected=0, actual=" + htds.cachehits, 0, htds.cachehits);
@@ -294,12 +292,10 @@ public class TestFSTableDescriptors {
final int count = 4;
// Write out table infos.
for (int i = 0; i < count; i++) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
- htds.createTableDescriptor(htd);
+ htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
}
// add hbase:meta
- HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
- htds.createTableDescriptor(htd);
+ htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build());
assertEquals("getAll() didn't return all TableDescriptors, expected: " +
(count + 1) + " got: " + htds.getAll().size(),
@@ -321,8 +317,7 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos via non-cached FSTableDescriptors
for (int i = 0; i < count; i++) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
- nonchtds.createTableDescriptor(htd);
+ nonchtds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
}
// Calls to getAll() won't increase the cache counter, do per table.
@@ -333,15 +328,15 @@ public class TestFSTableDescriptors {
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
// add a new entry for hbase:meta
- HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build();
nonchtds.createTableDescriptor(htd);
// hbase:meta will only increase the cachehit by 1
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
- for (Map.Entry entry: nonchtds.getAll().entrySet()) {
+ for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
String t = (String) entry.getKey();
- HTableDescriptor nchtd = (HTableDescriptor) entry.getValue();
+ TableDescriptor nchtd = entry.getValue();
assertTrue("expected " + htd.toString() +
" got: " + chtds.get(TableName.valueOf(t)).toString(),
(nchtd.equals(chtds.get(TableName.valueOf(t)))));
@@ -366,7 +361,7 @@ public class TestFSTableDescriptors {
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
htds.add(htd);
htds.add(htd);
htds.add(htd);
@@ -415,12 +410,14 @@ public class TestFSTableDescriptors {
@Test
public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
Path testdir = UTIL.getDataTestDir(name.getMethodName());
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
- htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
+ htd = TableDescriptorBuilder.newBuilder(htd)
+ .setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"))
+ .build();
assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
Path tableDir = fstd.getTableDir(htd.getTableName());
Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
@@ -443,10 +440,10 @@ public class TestFSTableDescriptors {
}
@Override
- public HTableDescriptor get(TableName tablename)
+ public TableDescriptor get(TableName tablename)
throws TableExistsException, FileNotFoundException, IOException {
LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") +
- " HTableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits);
+ " TableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits);
return super.get(tablename);
}
}