You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by an...@apache.org on 2015/05/29 08:42:58 UTC

hbase git commit: HBASE-13790 Remove the DeleteTableHandler. (Jingcheng)

Repository: hbase
Updated Branches:
  refs/heads/hbase-11339 b31a6acf4 -> a84e829e1


HBASE-13790 Remove the DeleteTableHandler. (Jingcheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a84e829e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a84e829e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a84e829e

Branch: refs/heads/hbase-11339
Commit: a84e829e127865a5fc3d54db9d02f091fc89f97b
Parents: b31a6ac
Author: anoopsjohn <an...@gmail.com>
Authored: Fri May 29 12:12:40 2015 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Fri May 29 12:12:40 2015 +0530

----------------------------------------------------------------------
 .../master/handler/DeleteTableHandler.java      | 243 -------------------
 .../master/procedure/DeleteTableProcedure.java  |   3 +-
 2 files changed, 1 insertion(+), 245 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a84e829e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
deleted file mode 100644
index 6069eba..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.handler;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-import org.apache.hadoop.hbase.master.MasterFileSystem;
-import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.mob.MobConstants;
-import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.master.RegionStates;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-@InterfaceAudience.Private
-public class DeleteTableHandler extends TableEventHandler {
-  private static final Log LOG = LogFactory.getLog(DeleteTableHandler.class);
-
-  protected HTableDescriptor hTableDescriptor = null;
-
-  public DeleteTableHandler(TableName tableName, Server server,
-      final MasterServices masterServices) {
-    super(EventType.C_M_DELETE_TABLE, tableName, server, masterServices);
-  }
-
-  @Override
-  protected void prepareWithTableLock() throws IOException {
-    // The next call fails if no such table.
-    hTableDescriptor = getTableDescriptor().getHTableDescriptor();
-  }
-
-  protected void waitRegionInTransition(final List<HRegionInfo> regions)
-      throws IOException, CoordinatedStateException {
-    AssignmentManager am = this.masterServices.getAssignmentManager();
-    RegionStates states = am.getRegionStates();
-    long waitTime = server.getConfiguration().
-      getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
-    for (HRegionInfo region : regions) {
-      long done = EnvironmentEdgeManager.currentTime() + waitTime;
-      while (EnvironmentEdgeManager.currentTime() < done) {
-        if (states.isRegionInState(region, State.FAILED_OPEN)) {
-          am.regionOffline(region);
-        }
-        if (!states.isRegionInTransition(region)) break;
-        try {
-          Thread.sleep(waitingTimeForEvents);
-        } catch (InterruptedException e) {
-          LOG.warn("Interrupted while sleeping");
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        LOG.debug("Waiting on region to clear regions in transition; "
-          + am.getRegionStates().getRegionTransitionState(region));
-      }
-      if (states.isRegionInTransition(region)) {
-        throw new IOException("Waited hbase.master.wait.on.region (" +
-          waitTime + "ms) for region to leave region " +
-          region.getRegionNameAsString() + " in transitions");
-      }
-    }
-  }
-
-  @Override
-  protected void handleTableOperation(List<HRegionInfo> regions)
-      throws IOException, CoordinatedStateException {
-    MasterCoprocessorHost cpHost = ((HMaster) this.server).getMasterCoprocessorHost();
-    if (cpHost != null) {
-      cpHost.preDeleteTableHandler(this.tableName);
-    }
-
-    // 1. Wait because of region in transition
-    waitRegionInTransition(regions);
-
-      // 2. Remove table from hbase:meta and HDFS
-    removeTableData(regions);
-
-    if (cpHost != null) {
-      cpHost.postDeleteTableHandler(this.tableName);
-    }
-    ((HMaster) this.server).getMasterQuotaManager().removeTableFromNamespaceQuota(tableName);
-  }
-
-  private void cleanupTableState() throws IOException {
-    // 3. Update table descriptor cache
-    LOG.debug("Removing '" + tableName + "' descriptor.");
-    this.masterServices.getTableDescriptors().remove(tableName);
-
-    AssignmentManager am = this.masterServices.getAssignmentManager();
-
-    // 4. Clean up regions of the table in RegionStates.
-    LOG.debug("Removing '" + tableName + "' from region states.");
-    am.getRegionStates().tableDeleted(tableName);
-
-
-    // 5.Clean any remaining rows for this table.
-    cleanAnyRemainingRows();
-
-    // 6. If entry for this table states, remove it.
-    LOG.debug("Marking '" + tableName + "' as deleted.");
-    am.getTableStateManager().setDeletedTable(tableName);
-  }
-
-  /**
-   * There may be items for this table still up in hbase:meta in the case where the
-   * info:regioninfo column was empty because of some write error. Remove ALL rows from hbase:meta
-   * that have to do with this table. See HBASE-12980.
-   * @throws IOException
-   */
-  private void cleanAnyRemainingRows() throws IOException {
-    ClusterConnection connection = this.masterServices.getConnection();
-    Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName);
-    try (Table metaTable =
-        connection.getTable(TableName.META_TABLE_NAME)) {
-      List<Delete> deletes = new ArrayList<Delete>();
-      try (ResultScanner resScanner = metaTable.getScanner(tableScan)) {
-        for (Result result : resScanner) {
-          deletes.add(new Delete(result.getRow()));
-        }
-      }
-      if (!deletes.isEmpty()) {
-        LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + this.tableName +
-          " from " + TableName.META_TABLE_NAME);
-        if (LOG.isDebugEnabled()) {
-          for (Delete d: deletes) LOG.debug("Purging " + d);
-        }
-        metaTable.delete(deletes);
-      }
-    }
-  }
-
-  /**
-   * Removes the table from hbase:meta and archives the HDFS files.
-   */
-  protected void removeTableData(final List<HRegionInfo> regions)
-      throws IOException, CoordinatedStateException {
-    try {
-      // 1. Remove regions from META
-      LOG.debug("Deleting regions from META");
-      MetaTableAccessor.deleteRegions(this.server.getConnection(), regions);
-
-      // -----------------------------------------------------------------------
-      // NOTE: At this point we still have data on disk, but nothing in hbase:meta
-      //       if the rename below fails, hbck will report an inconsistency.
-      // -----------------------------------------------------------------------
-
-      // 2. Move the table in /hbase/.tmp
-      MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
-      Path tempTableDir = mfs.moveTableToTemp(tableName);
-
-      // 3. Archive regions from FS (temp directory)
-      FileSystem fs = mfs.getFileSystem();
-      for (HRegionInfo hri : regions) {
-        LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
-        HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
-            tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
-      }
-
-      // Archive the mob data if there is a mob-enabled column
-      boolean hasMob = MobUtils.hasMobColumns(hTableDescriptor);
-      Path mobTableDir = null;
-      if (hasMob) {
-        // Archive mob data
-        mobTableDir = FSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME),
-                tableName);
-        Path regionDir =
-                new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName());
-        if (fs.exists(regionDir)) {
-          HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir);
-        }
-      }
-      // 4. Delete table directory from FS (temp directory)
-      if (!fs.delete(tempTableDir, true)) {
-        LOG.error("Couldn't delete " + tempTableDir);
-      }
-      // Delete the table directory where the mob files are saved
-      if (hasMob && mobTableDir != null && fs.exists(mobTableDir)) {
-        if (!fs.delete(mobTableDir, true)) {
-          LOG.error("Couldn't delete " + mobTableDir);
-        }
-      }
-
-      LOG.debug("Table '" + tableName + "' archived!");
-    } finally {
-      cleanupTableState();
-    }
-  }
-
-  @Override
-  protected void releaseTableLock() {
-    super.releaseTableLock();
-    try {
-      masterServices.getTableLockManager().tableDeleted(tableName);
-    } catch (IOException ex) {
-      LOG.warn("Received exception from TableLockManager.tableDeleted:", ex); //not critical
-    }
-  }
-
-  @Override
-  public String toString() {
-    String name = "UnknownServerName";
-    if(server != null && server.getServerName() != null) {
-      name = server.getServerName().toString();
-    }
-    return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a84e829e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 0e561d7..45b7b51 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -357,7 +357,6 @@ public class DeleteTableProcedure
       }
     }
 
-
     // Delete table directory from FS (temp directory)
     if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) {
       throw new IOException("Couldn't delete " + tempTableDir);
@@ -366,7 +365,7 @@ public class DeleteTableProcedure
     // Delete the table directory where the mob files are saved
     if (hasMob && mobTableDir != null && fs.exists(mobTableDir)) {
       if (!fs.delete(mobTableDir, true)) {
-        LOG.error("Couldn't delete " + mobTableDir);
+        throw new IOException("Couldn't delete mob dir " + mobTableDir);
       }
     }
   }