You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/07/13 09:20:25 UTC

svn commit: r1361058 - /hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java

Author: stack
Date: Fri Jul 13 07:20:25 2012
New Revision: 1361058

URL: http://svn.apache.org/viewvc?rev=1361058&view=rev
Log:
HBASE-6370 Add compression codec test at HMaster when createTable/modifyColumn/modifyTable

Modified:
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1361058&r1=1361057&r2=1361058&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Fri Jul 13 07:20:25 2012
@@ -101,6 +101,7 @@ import org.apache.hadoop.hbase.monitorin
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.InfoServer;
@@ -285,6 +286,9 @@ Server {
    */
   private ObjectName mxBean = null;
 
+  //should we check the compression codec type at master side, default true, HBASE-6370
+  private final boolean masterCheckCompression;
+
   /**
    * Initializes the HMaster. The steps are as follows:
    * <p>
@@ -352,6 +356,9 @@ Server {
     this.metrics = new MasterMetrics(getServerName().toString());
     // metrics interval: using the same property as region server.
     this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000);
+
+    //should we check the compression codec type at master side, default true, HBASE-6370
+    this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
   }
 
   /**
@@ -1376,6 +1383,7 @@ Server {
 
     HRegionInfo [] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
     checkInitialized();
+    checkCompression(hTableDescriptor);
     if (cpHost != null) {
       cpHost.preCreateTable(hTableDescriptor, newRegions);
     }
@@ -1389,6 +1397,21 @@ Server {
 
   }
 
+  private void checkCompression(final HTableDescriptor htd)
+  throws IOException {
+    if (!this.masterCheckCompression) return;
+    for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+      checkCompression(hcd);
+    }
+  }
+
+  private void checkCompression(final HColumnDescriptor hcd)
+  throws IOException {
+    if (!this.masterCheckCompression) return;
+    CompressionTest.testCompression(hcd.getCompression());
+    CompressionTest.testCompression(hcd.getCompactionCompression());
+  }
+
   @Override
   public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
   throws ServiceException {
@@ -1505,6 +1528,7 @@ Server {
 
     try {
       checkInitialized();
+      checkCompression(descriptor);
       if (cpHost != null) {
         if (cpHost.preModifyColumn(tableName, descriptor)) {
           return ModifyColumnResponse.newBuilder().build();
@@ -1626,6 +1650,7 @@ Server {
     HTableDescriptor htd = HTableDescriptor.convert(req.getTableSchema());
     try {
       checkInitialized();
+      checkCompression(htd);
       if (cpHost != null) {
         cpHost.preModifyTable(tableName, htd);
       }