You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2020/08/29 14:27:11 UTC

[hbase] branch master updated: HBASE-24949 Optimize FSTableDescriptors.get to not always go to fs when cache miss (#2317)

This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
     new ef5da4a  HBASE-24949 Optimize FSTableDescriptors.get to not always go to fs when cache miss (#2317)
ef5da4a is described below

commit ef5da4a61df19d6b4e571fdf47c7d65ad57ad89a
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Sat Aug 29 22:26:36 2020 +0800

    HBASE-24949 Optimize FSTableDescriptors.get to not always go to fs when cache miss (#2317)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../org/apache/hadoop/hbase/TableDescriptors.java  |  31 +--
 .../org/apache/hadoop/hbase/master/HMaster.java    |  10 +-
 .../master/procedure/CloneSnapshotProcedure.java   |   4 +-
 .../master/procedure/CreateTableProcedure.java     |   9 +-
 .../hbase/master/procedure/InitMetaProcedure.java  |  11 +-
 .../master/procedure/TruncateTableProcedure.java   |   2 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |   8 +-
 .../hadoop/hbase/util/FSTableDescriptors.java      | 225 +++++++++------------
 .../master/assignment/MockMasterServices.java      |  10 +-
 .../TestReadAndWriteRegionInfoFile.java            |   2 +-
 .../hadoop/hbase/util/TestFSTableDescriptors.java  |  27 +--
 11 files changed, 159 insertions(+), 180 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index 8598581..e82082c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -31,7 +31,7 @@ public interface TableDescriptors {
   /**
    * @return TableDescriptor for tablename
    */
-  TableDescriptor get(final TableName tableName) throws IOException;
+  TableDescriptor get(TableName tableName) throws IOException;
 
   /**
    * Get Map of all NamespaceDescriptors for a given namespace.
@@ -40,8 +40,8 @@ public interface TableDescriptors {
   Map<String, TableDescriptor> getByNamespace(String name) throws IOException;
 
   /**
-   * Get Map of all TableDescriptors. Populates the descriptor cache as a
-   * side effect.
+   * Get Map of all TableDescriptors. Populates the descriptor cache as a side effect.
+   * </p>
    * Notice: the key of map is the table name which contains namespace. It was generated by
    * {@link TableName#getNameWithNamespaceInclAsString()}.
    * @return Map of all descriptors.
@@ -49,23 +49,24 @@ public interface TableDescriptors {
   Map<String, TableDescriptor> getAll() throws IOException;
 
   /**
-   * Add or update descriptor
-   * @param htd Descriptor to set into TableDescriptors
+   * Add or update descriptor. Just call {@link #update(TableDescriptor, boolean)} with
+   * {@code cacheOnly} as {@code false}.
    */
-  void update(final TableDescriptor htd) throws IOException;
+  default void update(TableDescriptor htd) throws IOException {
+    update(htd, false);
+  }
 
   /**
-   * @return Instance of table descriptor or null if none found.
-   */
-  TableDescriptor remove(final TableName tablename) throws IOException;
-
-  /**
-   * Enables the tabledescriptor cache
+   * Add or update descriptor
+   * @param htd Descriptor to set into TableDescriptors
+   * @param cacheOnly only add the given {@code htd} to cache, without updating the storage. For
+   *          example, when creating table, we will write the descriptor to fs when creating the fs
+   *          layout, so we do not need to update the fs again.
    */
-  void setCacheOn() throws IOException;
+  void update(TableDescriptor htd, boolean cacheOnly) throws IOException;
 
   /**
-   * Disables the tabledescriptor cache
+   * @return Instance of table descriptor or null if none found.
    */
-  void setCacheOff() throws IOException;
+  TableDescriptor remove(TableName tablename) throws IOException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index a70dc48..f1453f3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -222,6 +222,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
@@ -233,6 +234,7 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server;
 import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector;
 import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder;
 import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext;
+
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@@ -760,6 +762,11 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
+  protected boolean cacheTableDescriptor() {
+    return true;
+  }
+
+  @Override
   protected RSRpcServices createRpcServices() throws IOException {
     return new MasterRpcServices(this);
   }
@@ -924,9 +931,6 @@ public class HMaster extends HRegionServer implements MasterServices {
     this.fileSystemManager = new MasterFileSystem(conf);
     this.walManager = new MasterWalManager(this);
 
-    // enable table descriptors cache
-    this.tableDescriptors.setCacheOn();
-
     // warm-up HTDs cache on master initialization
     if (preLoadTableDescriptors) {
       status.setStatus("Pre-loading table descriptors");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index 1c2bddc..3b483e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -141,6 +141,7 @@ public class CloneSnapshotProcedure
           break;
         case CLONE_SNAPSHOT_WRITE_FS_LAYOUT:
           newRegions = createFilesystemLayout(env, tableDescriptor, newRegions);
+          env.getMasterServices().getTableDescriptors().update(tableDescriptor, true);
           setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META);
           break;
         case CLONE_SNAPSHOT_ADD_TO_META:
@@ -172,8 +173,9 @@ public class CloneSnapshotProcedure
           setNextState(CloneSnapshotState.CLONE_SNAPSHOT_UPDATE_DESC_CACHE);
           break;
         case CLONE_SNAPSHOT_UPDATE_DESC_CACHE:
+          // XXX: this stage should be named as set table enabled, as now we will cache the
+          // descriptor after writing fs layout.
           CreateTableProcedure.setEnabledState(env, getTableName());
-          CreateTableProcedure.updateTableDescCache(env, getTableName());
           setNextState(CloneSnapshotState.CLONE_SNAPHOST_RESTORE_ACL);
           break;
         case CLONE_SNAPHOST_RESTORE_ACL:
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index b48cef8..113f96c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -101,6 +101,7 @@ public class CreateTableProcedure
         case CREATE_TABLE_WRITE_FS_LAYOUT:
           DeleteTableProcedure.deleteFromFs(env, getTableName(), newRegions, true);
           newRegions = createFsLayout(env, tableDescriptor, newRegions);
+          env.getMasterServices().getTableDescriptors().update(tableDescriptor, true);
           setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META);
           break;
         case CREATE_TABLE_ADD_TO_META:
@@ -114,8 +115,9 @@ public class CreateTableProcedure
           setNextState(CreateTableState.CREATE_TABLE_UPDATE_DESC_CACHE);
           break;
         case CREATE_TABLE_UPDATE_DESC_CACHE:
+          // XXX: this stage should be named as set table enabled, as now we will cache the
+          // descriptor after writing fs layout.
           setEnabledState(env, getTableName());
-          updateTableDescCache(env, getTableName());
           setNextState(CreateTableState.CREATE_TABLE_POST_OPERATION);
           break;
         case CREATE_TABLE_POST_OPERATION:
@@ -406,11 +408,6 @@ public class CreateTableProcedure
       regionInfos, tableDescriptor.getRegionReplication());
   }
 
-  protected static void updateTableDescCache(final MasterProcedureEnv env,
-      final TableName tableName) throws IOException {
-    env.getMasterServices().getTableDescriptors().get(tableName);
-  }
-
   @Override
   protected boolean shouldWaitClientAck(MasterProcedureEnv env) {
     // system tables are created on bootstrap internally by the system
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
index cdf78cc..f158452 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
@@ -72,7 +72,7 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMe
     return TableOperationType.CREATE;
   }
 
-  private static void writeFsLayout(Path rootDir, Configuration conf) throws IOException {
+  private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) throws IOException {
     LOG.info("BOOTSTRAP: creating hbase:meta region");
     FileSystem fs = rootDir.getFileSystem(conf);
     Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME);
@@ -83,13 +83,13 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMe
     // created here in bootstrap and it'll need to be cleaned up. Better to
     // not make it in first place. Turn off block caching for bootstrap.
     // Enable after.
-    FSTableDescriptors.tryUpdateMetaTableDescriptor(conf, fs, rootDir,
-      builder -> builder.setRegionReplication(
+    TableDescriptor metaDescriptor = FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs,
+      rootDir, builder -> builder.setRegionReplication(
         conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM)));
-    TableDescriptor metaDescriptor = new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
     HRegion
       .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null)
       .close();
+    return metaDescriptor;
   }
 
   @Override
@@ -101,7 +101,8 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMe
         case INIT_META_WRITE_FS_LAYOUT:
           Configuration conf = env.getMasterConfiguration();
           Path rootDir = CommonFSUtils.getRootDir(conf);
-          writeFsLayout(rootDir, conf);
+          TableDescriptor td = writeFsLayout(rootDir, conf);
+          env.getMasterServices().getTableDescriptors().update(td, true);
           setNextState(InitMetaState.INIT_META_ASSIGN_META);
           return Flow.HAS_MORE_STATE;
         case INIT_META_ASSIGN_META:
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
index 7381e12..290e0c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
@@ -130,7 +130,7 @@ public class TruncateTableProcedure
         case TRUNCATE_TABLE_CREATE_FS_LAYOUT:
           DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
           regions = CreateTableProcedure.createFsLayout(env, tableDescriptor, regions);
-          CreateTableProcedure.updateTableDescCache(env, getTableName());
+          env.getMasterServices().getTableDescriptors().update(tableDescriptor, true);
           setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META);
           break;
         case TRUNCATE_TABLE_ADD_TO_META:
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index d1bcda3..6dd2e52 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -731,8 +731,8 @@ public class HRegionServer extends Thread implements
     CommonFSUtils.setFsDefault(this.conf, CommonFSUtils.getRootDir(this.conf));
     this.dataFs = new HFileSystem(this.conf, useHBaseChecksum);
     this.dataRootDir = CommonFSUtils.getRootDir(this.conf);
-    this.tableDescriptors =
-      new FSTableDescriptors(this.dataFs, this.dataRootDir, !canUpdateTableDescriptor(), false);
+    this.tableDescriptors = new FSTableDescriptors(this.dataFs, this.dataRootDir,
+      !canUpdateTableDescriptor(), cacheTableDescriptor());
   }
 
   protected void login(UserProvider user, String host) throws IOException {
@@ -758,6 +758,10 @@ public class HRegionServer extends Thread implements
     return false;
   }
 
+  protected boolean cacheTableDescriptor() {
+    return false;
+  }
+
   protected RSRpcServices createRpcServices() throws IOException {
     return new RSRpcServices(this);
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index bee3114..afff1c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.util;
 
 import edu.umd.cs.findbugs.annotations.Nullable;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Comparator;
 import java.util.List;
@@ -79,7 +78,7 @@ public class FSTableDescriptors implements TableDescriptors {
   private final FileSystem fs;
   private final Path rootdir;
   private final boolean fsreadonly;
-  private volatile boolean usecache;
+  private final boolean usecache;
   private volatile boolean fsvisited;
 
   @VisibleForTesting
@@ -121,15 +120,16 @@ public class FSTableDescriptors implements TableDescriptors {
 
   @VisibleForTesting
   public static void tryUpdateMetaTableDescriptor(Configuration conf) throws IOException {
-    tryUpdateMetaTableDescriptor(conf, CommonFSUtils.getCurrentFileSystem(conf),
+    tryUpdateAndGetMetaTableDescriptor(conf, CommonFSUtils.getCurrentFileSystem(conf),
       CommonFSUtils.getRootDir(conf), null);
   }
 
-  public static void tryUpdateMetaTableDescriptor(Configuration conf, FileSystem fs, Path rootdir,
-      Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
+  public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration conf,
+    FileSystem fs, Path rootdir,
+    Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
     // see if we already have meta descriptor on fs. Write one if not.
     try {
-      getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME);
+      return getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME);
     } catch (TableInfoMissingException e) {
       TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf);
       if (metaObserver != null) {
@@ -144,6 +144,7 @@ public class FSTableDescriptors implements TableDescriptors {
         throw new IOException("Failed update hbase:meta table descriptor");
       }
       LOG.info("Updated hbase:meta table descriptor to {}", p);
+      return td;
     }
   }
 
@@ -196,57 +197,45 @@ public class FSTableDescriptors implements TableDescriptors {
         .setPriority(Coprocessor.PRIORITY_SYSTEM).build());
   }
 
-  @Override
-  public void setCacheOn() throws IOException {
-    this.cache.clear();
-    this.usecache = true;
-  }
-
-  @Override
-  public void setCacheOff() throws IOException {
-    this.usecache = false;
-    this.cache.clear();
-  }
-
   @VisibleForTesting
-  public boolean isUsecache() {
+  protected boolean isUsecache() {
     return this.usecache;
   }
 
   /**
    * Get the current table descriptor for the given table, or null if none exists.
-   *
-   * Uses a local cache of the descriptor but still checks the filesystem on each call
-   * to see if a newer file has been created since the cached one was read.
+   * <p/>
+   * Uses a local cache of the descriptor but still checks the filesystem on each call if
+   * {@link #fsvisited} is not {@code true}, i.e, we haven't done a full scan yet, to see if a newer
+   * file has been created since the cached one was read.
    */
   @Override
   @Nullable
-  public TableDescriptor get(final TableName tablename)
-  throws IOException {
+  public TableDescriptor get(TableName tableName) {
     invocations++;
     if (usecache) {
       // Look in cache of descriptors.
-      TableDescriptor cachedtdm = this.cache.get(tablename);
+      TableDescriptor cachedtdm = this.cache.get(tableName);
       if (cachedtdm != null) {
         cachehits++;
         return cachedtdm;
       }
+      // we do not need to go to fs any more
+      if (fsvisited) {
+        return null;
+      }
     }
     TableDescriptor tdmt = null;
     try {
-      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
-    } catch (NullPointerException e) {
-      LOG.debug("Exception during readTableDecriptor. Current table name = "
-          + tablename, e);
+      tdmt = getTableDescriptorFromFs(fs, rootdir, tableName);
     } catch (TableInfoMissingException e) {
       // ignore. This is regular operation
-    } catch (IOException ioe) {
-      LOG.debug("Exception during readTableDecriptor. Current table name = "
-          + tablename, ioe);
+    } catch (NullPointerException | IOException ioe) {
+      LOG.debug("Exception during readTableDecriptor. Current table name = " + tableName, ioe);
     }
     // last HTD written wins
     if (usecache && tdmt != null) {
-      this.cache.put(tablename, tdmt);
+      this.cache.put(tableName, tdmt);
     }
 
     return tdmt;
@@ -258,29 +247,22 @@ public class FSTableDescriptors implements TableDescriptors {
   @Override
   public Map<String, TableDescriptor> getAll() throws IOException {
     Map<String, TableDescriptor> tds = new TreeMap<>();
-    if (fsvisited && usecache) {
+    if (fsvisited) {
       for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
         tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
       }
     } else {
       LOG.trace("Fetching table descriptors from the filesystem.");
-      boolean allvisited = true;
+      boolean allvisited = usecache;
       for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
-        TableDescriptor htd = null;
-        try {
-          htd = get(CommonFSUtils.getTableName(d));
-        } catch (FileNotFoundException fnfe) {
-          // inability of retrieving one HTD shouldn't stop getting the remaining
-          LOG.warn("Trouble retrieving htd", fnfe);
-        }
+        TableDescriptor htd = get(CommonFSUtils.getTableName(d));
         if (htd == null) {
           allvisited = false;
-          continue;
         } else {
           tds.put(htd.getTableName().getNameWithNamespaceInclAsString(), htd);
         }
-        fsvisited = allvisited;
       }
+      fsvisited = allvisited;
     }
     return tds;
   }
@@ -290,35 +272,48 @@ public class FSTableDescriptors implements TableDescriptors {
     * @see #get(org.apache.hadoop.hbase.TableName)
     */
   @Override
-  public Map<String, TableDescriptor> getByNamespace(String name)
-  throws IOException {
+  public Map<String, TableDescriptor> getByNamespace(String name) throws IOException {
     Map<String, TableDescriptor> htds = new TreeMap<>();
     List<Path> tableDirs =
-        FSUtils.getLocalTableDirs(fs, CommonFSUtils.getNamespaceDir(rootdir, name));
-    for (Path d: tableDirs) {
-      TableDescriptor htd = null;
-      try {
-        htd = get(CommonFSUtils.getTableName(d));
-      } catch (FileNotFoundException fnfe) {
-        // inability of retrieving one HTD shouldn't stop getting the remaining
-        LOG.warn("Trouble retrieving htd", fnfe);
+      FSUtils.getLocalTableDirs(fs, CommonFSUtils.getNamespaceDir(rootdir, name));
+    for (Path d : tableDirs) {
+      TableDescriptor htd = get(CommonFSUtils.getTableName(d));
+      if (htd == null) {
+        continue;
       }
-      if (htd == null) continue;
       htds.put(CommonFSUtils.getTableName(d).getNameAsString(), htd);
     }
     return htds;
   }
 
-  /**
-   * Adds (or updates) the table descriptor to the FileSystem
-   * and updates the local cache with it.
-   */
   @Override
-  public void update(TableDescriptor htd) throws IOException {
+  public void update(TableDescriptor td, boolean cacheOnly) throws IOException {
+    // TODO: in fact this method will only be called at master side, so fsreadonly and usecache will
+    // always be true. In general, we'd better have a ReadOnlyFSTableDesciptors for HRegionServer
+    // but now, HMaster extends HRegionServer, so unless making use of generic, we can not have
+    // different implementations for HMaster and HRegionServer. Revisit this when we make HMaster
+    // not extend HRegionServer in the future.
     if (fsreadonly) {
-      throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
+      throw new UnsupportedOperationException("Cannot add a table descriptor - in read only mode");
+    }
+    if (!cacheOnly) {
+      updateTableDescriptor(td);
+    }
+    if (usecache) {
+      this.cache.put(td.getTableName(), td);
+    }
+  }
+
+  @VisibleForTesting
+  Path updateTableDescriptor(TableDescriptor td) throws IOException {
+    TableName tableName = td.getTableName();
+    Path tableDir = getTableDir(tableName);
+    Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
+    if (p == null) {
+      throw new IOException("Failed update");
     }
-    updateTableDescriptor(htd);
+    LOG.info("Updated tableinfo=" + p);
+    return p;
   }
 
   /**
@@ -380,22 +375,21 @@ public class FSTableDescriptors implements TableDescriptors {
 
   /**
    * Find the most current table info file in the given directory
-   *
-   * Looks within the given directory for any table info files
-   * and takes the 'current' one - meaning the one with the highest sequence number if present
-   * or no sequence number at all if none exist (for backward compatibility from before there
-   * were sequence numbers).
-   * If there are multiple possible files found
-   * and the we're not in read only mode it also deletes the older files.
-   *
+   * <p/>
+   * Looks within the given directory for any table info files and takes the 'current' one - meaning
+   * the one with the highest sequence number if present or no sequence number at all if none exist
+   * (for backward compatibility from before there were sequence numbers).
+   * <p/>
+   * If there are multiple possible files found and the we're not in read only mode it also deletes
+   * the older files.
    * @return The file status of the current table info file or null if it does not exist
-   * @throws IOException
    */
-  // only visible for FSTableDescriptorMigrationToSubdir, can be removed with that
-  static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir, boolean removeOldFiles)
-    throws IOException {
+  private static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir,
+    boolean removeOldFiles) throws IOException {
     FileStatus[] status = CommonFSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
-    if (status == null || status.length < 1) return null;
+    if (status == null || status.length < 1) {
+      return null;
+    }
     FileStatus mostCurrent = null;
     for (FileStatus file : status) {
       if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) {
@@ -419,16 +413,16 @@ public class FSTableDescriptors implements TableDescriptors {
   }
 
   /**
-   * Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in
-   * reverse order.
+   * Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in reverse order.
    */
   @VisibleForTesting
   static final Comparator<FileStatus> TABLEINFO_FILESTATUS_COMPARATOR =
-  new Comparator<FileStatus>() {
-    @Override
-    public int compare(FileStatus left, FileStatus right) {
-      return right.compareTo(left);
-    }};
+    new Comparator<FileStatus>() {
+      @Override
+      public int compare(FileStatus left, FileStatus right) {
+        return right.compareTo(left);
+      }
+    };
 
   /**
    * Return the table directory in HDFS
@@ -448,12 +442,13 @@ public class FSTableDescriptors implements TableDescriptors {
   /**
    * Width of the sequenceid that is a suffix on a tableinfo file.
    */
-  @VisibleForTesting static final int WIDTH_OF_SEQUENCE_ID = 10;
+  @VisibleForTesting
+  static final int WIDTH_OF_SEQUENCE_ID = 10;
 
-  /*
+  /**
    * @param number Number to use as suffix.
-   * @return Returns zero-prefixed decimal version of passed
-   * number (Does absolute in case number is negative).
+   * @return Returns zero-prefixed decimal version of passed number (Does absolute in case number is
+   *         negative).
    */
   private static String formatTableInfoSequenceId(final int number) {
     byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
@@ -477,12 +472,19 @@ public class FSTableDescriptors implements TableDescriptors {
    * @param p Path to a <code>.tableinfo</code> file.
    * @return The current editid or 0 if none found.
    */
-  @VisibleForTesting static int getTableInfoSequenceId(final Path p) {
-    if (p == null) return 0;
+  @VisibleForTesting
+  static int getTableInfoSequenceId(final Path p) {
+    if (p == null) {
+      return 0;
+    }
     Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName());
-    if (!m.matches()) throw new IllegalArgumentException(p.toString());
+    if (!m.matches()) {
+      throw new IllegalArgumentException(p.toString());
+    }
     String suffix = m.group(2);
-    if (suffix == null || suffix.length() <= 0) return 0;
+    if (suffix == null || suffix.length() <= 0) {
+      return 0;
+    }
     return Integer.parseInt(m.group(2));
   }
 
@@ -490,7 +492,8 @@ public class FSTableDescriptors implements TableDescriptors {
    * @param sequenceid
    * @return Name of tableinfo file.
    */
-  @VisibleForTesting static String getTableInfoFileName(final int sequenceid) {
+  @VisibleForTesting
+  static String getTableInfoFileName(final int sequenceid) {
     return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceid);
   }
 
@@ -511,7 +514,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws TableInfoMissingException if there is no descriptor
    */
   public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
-  throws IOException {
+    throws IOException {
     FileStatus status = getTableInfoPath(fs, tableDir, false);
     if (status == null) {
       throw new TableInfoMissingException("No table descriptor file under " + tableDir);
@@ -539,29 +542,6 @@ public class FSTableDescriptors implements TableDescriptors {
   }
 
   /**
-   * Update table descriptor on the file system
-   * @throws IOException Thrown if failed update.
-   * @throws NotImplementedException if in read only mode
-   */
-  @VisibleForTesting
-  Path updateTableDescriptor(TableDescriptor td) throws IOException {
-    if (fsreadonly) {
-      throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
-    }
-    TableName tableName = td.getTableName();
-    Path tableDir = getTableDir(tableName);
-    Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
-    if (p == null) {
-      throw new IOException("Failed update");
-    }
-    LOG.info("Updated tableinfo=" + p);
-    if (usecache) {
-      this.cache.put(td.getTableName(), td);
-    }
-    return p;
-  }
-
-  /**
    * Deletes files matching the table info file pattern within the given directory
    * whose sequenceId is at most the given max sequenceId.
    */
@@ -583,18 +563,15 @@ public class FSTableDescriptors implements TableDescriptors {
   }
 
   /**
-   * Attempts to write a new table descriptor to the given table's directory.
-   * It first writes it to the .tmp dir then uses an atomic rename to move it into place.
-   * It begins at the currentSequenceId + 1 and tries 10 times to find a new sequence number
-   * not already in use.
+   * Attempts to write a new table descriptor to the given table's directory. It first writes it to
+   * the .tmp dir then uses an atomic rename to move it into place. It begins at the
+   * currentSequenceId + 1 and tries 10 times to find a new sequence number not already in use.
+   * <p/>
    * Removes the current descriptor file if passed in.
-   *
    * @return Descriptor file or null if we failed write.
    */
-  private static Path writeTableDescriptor(final FileSystem fs,
-    final TableDescriptor htd, final Path tableDir,
-    final FileStatus currentDescriptorFile)
-  throws IOException {
+  private static Path writeTableDescriptor(final FileSystem fs, final TableDescriptor htd,
+    final Path tableDir, final FileStatus currentDescriptorFile) throws IOException {
     // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
     // This directory is never removed to avoid removing it out from under a concurrent writer.
     Path tmpTableDir = new Path(tableDir, TMP_DIR);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 8c94909..5e78c3d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -331,17 +331,9 @@ public class MockMasterServices extends MockNoopMasterServices {
       }
 
       @Override
-      public void update(TableDescriptor htd) throws IOException {
+      public void update(TableDescriptor htd, boolean cacheOnly) throws IOException {
         // noop
       }
-
-      @Override
-      public void setCacheOn() throws IOException {
-      }
-
-      @Override
-      public void setCacheOff() throws IOException {
-      }
     };
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
index 103fb73..7d6c55b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
@@ -71,7 +71,7 @@ public class TestReadAndWriteRegionInfoFile {
     RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
     // Create a region. That'll write the .regioninfo file.
     FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(FS, ROOT_DIR);
-    FSTableDescriptors.tryUpdateMetaTableDescriptor(CONF, FS, ROOT_DIR, null);
+    FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR, null);
     HRegion r = HBaseTestingUtility.createRegionAndWAL(ri, ROOT_DIR, CONF,
       fsTableDescriptors.get(TableName.META_TABLE_NAME));
     // Get modtime on the file.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index 554cb9d..996be3e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Comparator;
@@ -38,7 +37,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -93,7 +91,7 @@ public class TestFSTableDescriptors {
     FileStatus[] statuses = fs.listStatus(testdir);
     assertTrue("statuses.length=" + statuses.length, statuses.length == 1);
     for (int i = 0; i < 10; i++) {
-      fstd.updateTableDescriptor(htd);
+      fstd.update(htd);
     }
     statuses = fs.listStatus(testdir);
     assertTrue(statuses.length == 1);
@@ -218,8 +216,7 @@ public class TestFSTableDescriptors {
     Path rootdir = new Path(UTIL.getDataTestDir(), name);
     FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) {
       @Override
-      public TableDescriptor get(TableName tablename)
-          throws TableExistsException, FileNotFoundException, IOException {
+      public TableDescriptor get(TableName tablename) {
         LOG.info(tablename + ", cachehits=" + this.cachehits);
         return super.get(tablename);
       }
@@ -240,7 +237,7 @@ public class TestFSTableDescriptors {
     for (int i = 0; i < count; i++) {
       TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
       builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
-      htds.updateTableDescriptor(builder.build());
+      htds.update(builder.build());
     }
     // Wait a while so mod time we write is for sure different.
     Thread.sleep(100);
@@ -276,7 +273,7 @@ public class TestFSTableDescriptors {
     for (int i = 0; i < count; i++) {
       TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
       builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
-      htds.updateTableDescriptor(builder.build());
+      htds.update(builder.build());
     }
     for (int i = 0; i < count; i++) {
       assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i)));
@@ -371,13 +368,18 @@ public class TestFSTableDescriptors {
     // random will only increase the cachehit by 1
     assertEquals(nonchtds.getAll().size(), chtds.getAll().size() + 1);
 
-    for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
+    for (Map.Entry<String, TableDescriptor> entry : chtds.getAll().entrySet()) {
       String t = (String) entry.getKey();
       TableDescriptor nchtd = entry.getValue();
-      assertTrue("expected " + htd.toString() +
-                   " got: " + chtds.get(TableName.valueOf(t)).toString(),
-                 (nchtd.equals(chtds.get(TableName.valueOf(t)))));
+      assertTrue(
+        "expected " + htd.toString() + " got: " + chtds.get(TableName.valueOf(t)).toString(),
+        (nchtd.equals(chtds.get(TableName.valueOf(t)))));
     }
+    // this is by design, for FSTableDescriptor with cache enabled, once we have done a full scan
+    // and load all the table descriptors to cache, we will not go to file system again, as the only
+    // way to update table descriptor is to through us so we can cache it when updating.
+    assertNotNull(nonchtds.get(random));
+    assertNull(chtds.get(random));
   }
 
   @Test
@@ -474,8 +476,7 @@ public class TestFSTableDescriptors {
     }
 
     @Override
-    public TableDescriptor get(TableName tablename)
-      throws TableExistsException, FileNotFoundException, IOException {
+    public TableDescriptor get(TableName tablename) {
       LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") +
                  " TableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits);
       return super.get(tablename);