You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by wc...@apache.org on 2020/12/21 12:23:45 UTC

[hbase-operator-tools] branch master updated: HBASE-25297 [HBCK2] Regenerate missing table descriptors by hbck2 (#79)

This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
     new b461d58  HBASE-25297 [HBCK2] Regenerate missing table descriptors by hbck2 (#79)
b461d58 is described below

commit b461d58833cee465b18e46780ebb85c7b159df44
Author: Mate Szalay-Beko <sz...@gmail.com>
AuthorDate: Mon Dec 21 13:23:35 2020 +0100

    HBASE-25297 [HBCK2] Regenerate missing table descriptors by hbck2 (#79)
    
    Signed-off-by: Wellington Chevreuil <wc...@apache.org>
---
 hbase-hbck2/README.md                              |  20 +
 .../src/main/java/org/apache/hbase/HBCK2.java      |  42 +-
 .../apache/hbase/HBCKAbstractFileStatusFilter.java |  72 ++++
 .../org/apache/hbase/HBCKFileStatusFilter.java     |  40 ++
 .../org/apache/hbase/HBCKFsTableDescriptors.java   | 445 +++++++++++++++++++++
 .../main/java/org/apache/hbase/HBCKFsUtils.java    | 260 ++++++++++++
 .../hbase/MissingTableDescriptorGenerator.java     | 193 +++++++++
 .../java/org/apache/hbase/hbck1/HBaseFsck.java     |  14 +-
 .../org/apache/hbase/hbck1/HBaseFsckRepair.java    |   1 -
 .../org/apache/hbase/hbck1/OfflineMetaRepair.java  |   1 -
 .../src/test/java/org/apache/hbase/TestHBCK2.java  |   5 +-
 .../TestHBCKFsTableDescriptorForceCreation.java    |  98 +++++
 .../apache/hbase/TestHBCKFsTableDescriptors.java   | 181 +++++++++
 .../hbase/TestMissingTableDescriptorGenerator.java | 155 +++++++
 .../org/apache/hbase/TestSchedulingRecoveries.java |   6 +-
 15 files changed, 1524 insertions(+), 9 deletions(-)

diff --git a/hbase-hbck2/README.md b/hbase-hbck2/README.md
index e7c01ff..c8ebb79 100644
--- a/hbase-hbck2/README.md
+++ b/hbase-hbck2/README.md
@@ -200,6 +200,26 @@ Command:
    for how to generate new report.
    SEE ALSO: reportMissingRegionsInMeta
 
+ generateMissingTableDescriptorFile <TABLENAME>
+   Trying to fix an orphan table by generating a missing table descriptor
+   file. This command will have no effect if the table folder is missing
+   or if the .tableinfo is present (we don't override existing table
+   descriptors). This command will first check it the TableDescriptor is
+   cached in HBase Master in which case it will recover the .tableinfo
+   accordingly. If TableDescriptor is not cached in master then it will
+   create a default .tableinfo file with the following items:
+     - the table name
+     - the column family list determined based on the file system
+     - the default properties for both TableDescriptor and
+       ColumnFamilyDescriptors
+   If the .tableinfo file was generated using default parameters then
+   make sure you check the table / column family properties later (and
+   change them if needed).
+   This method does not change anything in HBase, only writes the new
+   .tableinfo file to the file system. Orphan tables can cause e.g.
+   ServerCrashProcedures to stuck, you might need to fix these still
+   after you generated the missing table info files.
+
  replication [OPTIONS] [<TABLENAME>...]
    Options:
     -f, --fix    fix any replication issues found.
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java b/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
index 4ab1b4b..87b20b4 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
@@ -17,7 +17,11 @@
  */
 package org.apache.hbase;
 
-import java.io.*;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -95,6 +99,7 @@ public class HBCK2 extends Configured implements org.apache.hadoop.util.Tool {
   private static final String VERSION = "version";
   private static final String SET_REGION_STATE = "setRegionState";
   private static final String SCHEDULE_RECOVERIES = "scheduleRecoveries";
+  private static final String GENERATE_TABLE_INFO = "generateMissingTableDescriptorFile";
   private static final String FIX_META = "fixMeta";
   // TODO update this map in case of the name of a method changes in Hbck interface
   //  in org.apache.hadoop.hbase.client package. Or a new command is added and the hbck command
@@ -413,6 +418,8 @@ public class HBCK2 extends Configured implements org.apache.hadoop.util.Tool {
     writer.println();
     usageFixMeta(writer);
     writer.println();
+    usageGenerateMissingTableInfo(writer);
+    writer.println();
     usageReplication(writer);
     writer.println();
     usageReportMissingRegionsInMeta(writer);
@@ -520,6 +527,28 @@ public class HBCK2 extends Configured implements org.apache.hadoop.util.Tool {
     writer.println("   SEE ALSO: " + REPORT_MISSING_REGIONS_IN_META);
   }
 
+  private static void usageGenerateMissingTableInfo(PrintWriter writer) {
+    writer.println(" " + GENERATE_TABLE_INFO + " <TABLENAME>");
+    writer.println("   Trying to fix an orphan table by generating a missing table descriptor");
+    writer.println("   file. This command will have no effect if the table folder is missing");
+    writer.println("   or if the .tableinfo is present (we don't override existing table");
+    writer.println("   descriptors). This command will first check it the TableDescriptor is");
+    writer.println("   cached in HBase Master in which case it will recover the .tableinfo");
+    writer.println("   accordingly. If TableDescriptor is not cached in master then it will");
+    writer.println("   create a default .tableinfo file with the following items:");
+    writer.println("     - the table name");
+    writer.println("     - the column family list determined based on the file system");
+    writer.println("     - the default properties for both TableDescriptor and");
+    writer.println("       ColumnFamilyDescriptors");
+    writer.println("   If the .tableinfo file was generated using default parameters then");
+    writer.println("   make sure you check the table / column family properties later (and");
+    writer.println("   change them if needed).");
+    writer.println("   This method does not change anything in HBase, only writes the new");
+    writer.println("   .tableinfo file to the file system. Orphan tables can cause e.g.");
+    writer.println("   ServerCrashProcedures to stuck, you might need to fix these still");
+    writer.println("   after you generated the missing table info files.");
+  }
+
   private static void usageReplication(PrintWriter writer) {
     writer.println(" " + REPLICATION + " [OPTIONS] [<TABLENAME>...]");
     writer.println("   Options:");
@@ -760,6 +789,7 @@ public class HBCK2 extends Configured implements org.apache.hadoop.util.Tool {
   /**
    * Process parsed command-line. General options have already been processed by caller.
    */
+  @SuppressWarnings("checkstyle:methodlength")
   private int doCommandLine(CommandLine commandLine, Options options) throws IOException {
     // Now process command.
     String[] commands = commandLine.getArgs();
@@ -916,6 +946,16 @@ public class HBCK2 extends Configured implements org.apache.hadoop.util.Tool {
         }
         break;
 
+      case GENERATE_TABLE_INFO:
+        if(commands.length != 2) {
+          showErrorMessage(command + " takes one table name as argument.");
+          return EXIT_FAILURE;
+        }
+        MissingTableDescriptorGenerator tableInfoGenerator =
+          new MissingTableDescriptorGenerator(getConf());
+        tableInfoGenerator.generateTableDescriptorFileIfMissing(commands[1].trim());
+        break;
+
       default:
         showErrorMessage("Unsupported command: " + command);
         return EXIT_FAILURE;
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCKAbstractFileStatusFilter.java b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKAbstractFileStatusFilter.java
new file mode 100644
index 0000000..d9ea58e
--- /dev/null
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKAbstractFileStatusFilter.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase;
+
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+
+
+/**
+ *
+ * COPIED from org.apache.hadoop.hbase.util.AbstractFileStatusFilter
+ * because the original class was tagged with @InterfaceAudience.Private.
+ *
+ * Typical base class for file status filter.  Works more efficiently when
+ * filtering file statuses, otherwise implementation will need to lookup filestatus
+ * for the path which will be expensive.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class HBCKAbstractFileStatusFilter implements PathFilter, HBCKFileStatusFilter {
+
+  /**
+   * Filters out a path.  Can be given an optional directory hint to avoid
+   * filestatus lookup.
+   *
+   * @param p       A filesystem path
+   * @param isDir   An optional boolean indicating whether the path is a directory or not
+   * @return        true if the path is accepted, false if the path is filtered out
+   */
+  protected abstract boolean accept(Path p, @CheckForNull Boolean isDir);
+
+  @Override
+  public boolean accept(FileStatus f) {
+    return accept(f.getPath(), f.isDirectory());
+  }
+
+  @Override
+  public boolean accept(Path p) {
+    return accept(p, null);
+  }
+
+  protected boolean isFile(FileSystem fs, @CheckForNull Boolean isDir, Path p) throws IOException {
+    return !isDirectory(fs, isDir, p);
+  }
+
+  protected boolean isDirectory(FileSystem fs, @CheckForNull Boolean isDir, Path p)
+    throws IOException {
+    return isDir != null ? isDir : fs.isDirectory(p);
+  }
+}
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFileStatusFilter.java b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFileStatusFilter.java
new file mode 100644
index 0000000..29aab1f
--- /dev/null
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFileStatusFilter.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+
+/**
+ * COPIED from org.apache.hadoop.hbase.util.FileStatusFilter
+ * because the original class was tagged with @InterfaceAudience.Private.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface HBCKFileStatusFilter {
+  /**
+   * Tests whether or not the specified filestatus should be
+   * included in a filestatus list.
+   *
+   * @param  f  The filestatus to be tested
+   * @return  <code>true</code> if and only if the filestatus
+   *          should be included
+   */
+  boolean accept(FileStatus f);
+}
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsTableDescriptors.java b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsTableDescriptors.java
new file mode 100644
index 0000000..1fe2cad
--- /dev/null
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsTableDescriptors.java
@@ -0,0 +1,445 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase;
+
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableInfoMissingException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
+
+/**
+ *
+ * COPIED (partially) from org.apache.hadoop.hbase.util.FSTableDescriptors
+ * because the original class was tagged with @InterfaceAudience.Private.
+ *
+ * We only kept the public methods we are using in HBCK. Also removed the cache
+ * and readonly features.
+ *
+ * Implementation of {@link TableDescriptors} that reads descriptors from the
+ * passed filesystem.  It expects descriptors to be in a file in the
+ * {@link #TABLEINFO_DIR} subdir of the table's directory in FS.
+ * *
+ * <p>Also has utility for keeping up the table descriptors tableinfo file.
+ * The table schema file is kept in the {@link #TABLEINFO_DIR} subdir
+ * of the table directory in the filesystem.
+ * It has a {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the
+ * edit sequenceid: e.g. <code>.tableinfo.0000000003</code>.  This sequenceid
+ * is always increasing.  It starts at zero.  The table schema file with the
+ * highest sequenceid has the most recent schema edit. Usually there is one file
+ * only, the most recent but there may be short periods where there are more
+ * than one file. Old files are eventually cleaned.  Presumption is that there
+ * will not be lots of concurrent clients making table schema edits.  If so,
+ * the below needs a bit of a reworking and perhaps some supporting api in hdfs.
+ */
+@InterfaceAudience.Private
+public class HBCKFsTableDescriptors  {
+  private static final Logger LOG = LoggerFactory.getLogger(HBCKFsTableDescriptors.class);
+  private final FileSystem fs;
+  private final Path rootdir;
+
+  /**
+   * The file name prefix used to store HTD in HDFS
+   */
+  static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
+  static final String TABLEINFO_DIR = ".tabledesc";
+  static final String TMP_DIR = ".tmp";
+
+
+  public HBCKFsTableDescriptors(final FileSystem fs, final Path rootdir) {
+    this.fs = fs;
+    this.rootdir = rootdir;
+  }
+
+  /**
+   * Find the most current table info file for the table located in the given table directory.
+   *
+   * Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
+   * files and takes the 'current' one - meaning the one with the highest sequence number if present
+   * or no sequence number at all if none exist (for backward compatibility from before there
+   * were sequence numbers).
+   *
+   * @return The file status of the current table info file or null if it does not exist
+   * @throws IOException for IO errors
+   */
+  public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir)
+    throws IOException {
+    return getTableInfoPath(fs, tableDir, false);
+  }
+
+  /**
+   * Find the most current table info file for the table in the given table directory.
+   *
+   * Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
+   * files and takes the 'current' one - meaning the one with the highest sequence number if
+   * present or no sequence number at all if none exist (for backward compatibility from before
+   * there were sequence numbers).
+   * If there are multiple table info files found and removeOldFiles is true it also deletes the
+   * older files.
+   *
+   * @return The file status of the current table info file or null if none exist
+   * @throws IOException for IO errors
+   */
+  private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles)
+    throws IOException {
+    Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
+    return getCurrentTableInfoStatus(fs, tableInfoDir, removeOldFiles);
+  }
+
+  /**
+   * Find the most current table info file in the given directory
+   *
+   * Looks within the given directory for any table info files
+   * and takes the 'current' one - meaning the one with the highest sequence number if present
+   * or no sequence number at all if none exist (for backward compatibility from before there
+   * were sequence numbers).
+   * If there are multiple possible files found
+   * and the we're not in read only mode it also deletes the older files.
+   *
+   * @return The file status of the current table info file or null if it does not exist
+   * @throws IOException for IO errors
+   */
+  // only visible for FSTableDescriptorMigrationToSubdir, can be removed with that
+  static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir, boolean removeOldFiles)
+    throws IOException {
+    FileStatus [] status = HBCKFsUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
+    if (status == null || status.length < 1) {
+      return null;
+    }
+    FileStatus mostCurrent = null;
+    for (FileStatus file : status) {
+      if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) {
+        mostCurrent = file;
+      }
+    }
+    if (removeOldFiles && status.length > 1) {
+      // Clean away old versions
+      for (FileStatus file : status) {
+        Path path = file.getPath();
+        if (!file.equals(mostCurrent)) {
+          if (!fs.delete(file.getPath(), false)) {
+            LOG.warn("Failed cleanup of " + path);
+          } else {
+            LOG.debug("Cleaned up old tableinfo file " + path);
+          }
+        }
+      }
+    }
+    return mostCurrent;
+  }
+
+  /**
+   * Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in
+   * reverse order.
+   */
+  @VisibleForTesting
+  static final Comparator<FileStatus> TABLEINFO_FILESTATUS_COMPARATOR =
+    new Comparator<FileStatus>() {
+      @Override
+      public int compare(FileStatus left, FileStatus right) {
+        return right.compareTo(left);
+      }};
+
+  /**
+   * Return the table directory in HDFS
+   */
+  @VisibleForTesting Path getTableDir(final TableName tableName) {
+    return HBCKFsUtils.getTableDir(rootdir, tableName);
+  }
+
+  private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() {
+    @Override
+    public boolean accept(Path p) {
+      // Accept any file that starts with TABLEINFO_NAME
+      return p.getName().startsWith(TABLEINFO_FILE_PREFIX);
+    }};
+
+  /**
+   * Width of the sequenceid that is a suffix on a tableinfo file.
+   */
+  @VisibleForTesting static final int WIDTH_OF_SEQUENCE_ID = 10;
+
+  /*
+   * @param number Number to use as suffix.
+   * @return Returns zero-prefixed decimal version of passed
+   * number (Does absolute in case number is negative).
+   */
+  private static String formatTableInfoSequenceId(final int number) {
+    byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
+    int d = Math.abs(number);
+    for (int i = b.length - 1; i >= 0; i--) {
+      b[i] = (byte)((d % 10) + '0');
+      d /= 10;
+    }
+    return Bytes.toString(b);
+  }
+
+  /**
+   * Regex to eat up sequenceid suffix on a .tableinfo file.
+   * Use regex because may encounter oldstyle .tableinfos where there is no
+   * sequenceid on the end.
+   */
+  private static final Pattern TABLEINFO_FILE_REGEX =
+    Pattern.compile(TABLEINFO_FILE_PREFIX + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
+
+  /**
+   * @param p Path to a <code>.tableinfo</code> file.
+   * @return The current editid or 0 if none found.
+   */
+  @VisibleForTesting static int getTableInfoSequenceId(final Path p) {
+    if (p == null) {
+      return 0;
+    }
+    Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName());
+    if (!m.matches()) {
+      throw new IllegalArgumentException(p.toString());
+    }
+    String suffix = m.group(2);
+    if (suffix == null || suffix.length() <= 0) {
+      return 0;
+    }
+    return Integer.parseInt(m.group(2));
+  }
+
+  /**
+   * @param sequenceid sequence id
+   * @return Name of tableinfo file.
+   */
+  @VisibleForTesting static String getTableInfoFileName(final int sequenceid) {
+    return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceid);
+  }
+
+  /**
+   * Returns the latest table descriptor for the given table directly from the file system
+   * if it exists, bypassing the local cache.
+   * Returns null if it's not found.
+   */
+  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
+    Path hbaseRootDir, TableName tableName) throws IOException {
+    Path tableDir = HBCKFsUtils.getTableDir(hbaseRootDir, tableName);
+    return getTableDescriptorFromFs(fs, tableDir);
+  }
+
+  /**
+   * Returns the latest table descriptor for the table located at the given directory
+   * directly from the file system if it exists.
+   * @throws TableInfoMissingException if there is no descriptor
+   */
+  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
+    throws IOException {
+    FileStatus status = getTableInfoPath(fs, tableDir, false);
+    if (status == null) {
+      throw new TableInfoMissingException("No table descriptor file under " + tableDir);
+    }
+    return readTableDescriptor(fs, status);
+  }
+
+  private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
+    throws IOException {
+    int len = Ints.checkedCast(status.getLen());
+    byte [] content = new byte[len];
+    FSDataInputStream fsDataInputStream = fs.open(status.getPath());
+    try {
+      fsDataInputStream.readFully(content);
+    } finally {
+      fsDataInputStream.close();
+    }
+    TableDescriptor htd = null;
+    try {
+      htd = TableDescriptorBuilder.parseFrom(content);
+    } catch (DeserializationException e) {
+      throw new IOException("content=" + Bytes.toShort(content), e);
+    }
+    return htd;
+  }
+
+  /**
+   * Deletes all the table descriptor files from the file system.
+   * Used in unit tests only.
+   * @throws NotImplementedException if in read only mode
+   */
+  public void deleteTableDescriptorIfExists(TableName tableName) throws IOException {
+    Path tableDir = getTableDir(tableName);
+    Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
+    deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
+  }
+
+  /**
+   * Deletes files matching the table info file pattern within the given directory
+   * whose sequenceId is at most the given max sequenceId.
+   */
+  private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId)
+    throws IOException {
+    FileStatus [] status = HBCKFsUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
+    for (FileStatus file : status) {
+      Path path = file.getPath();
+      int sequenceId = getTableInfoSequenceId(path);
+      if (sequenceId <= maxSequenceId) {
+        boolean success = HBCKFsUtils.delete(fs, path, false);
+        if (success) {
+          LOG.debug("Deleted " + path);
+        } else {
+          LOG.error("Failed to delete table descriptor at " + path);
+        }
+      }
+    }
+  }
+
+  /**
+   * Attempts to write a new table descriptor to the given table's directory.
+   * It first writes it to the .tmp dir then uses an atomic rename to move it into place.
+   * It begins at the currentSequenceId + 1 and tries 10 times to find a new sequence number
+   * not already in use.
+   * Removes the current descriptor file if passed in.
+   *
+   * @return Descriptor file or null if we failed write.
+   */
+  private static Path writeTableDescriptor(final FileSystem fs,
+                                           final TableDescriptor htd, final Path tableDir,
+                                           final FileStatus currentDescriptorFile)
+    throws IOException {
+    // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
+    // This directory is never removed to avoid removing it out from under a concurrent writer.
+    Path tmpTableDir = new Path(tableDir, TMP_DIR);
+    Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
+
+    // What is current sequenceid?  We read the current sequenceid from
+    // the current file.  After we read it, another thread could come in and
+    // compete with us writing out next version of file.  The below retries
+    // should help in this case some but its hard to do guarantees in face of
+    // concurrent schema edits.
+    int currentSequenceId = currentDescriptorFile == null ? 0 :
+      getTableInfoSequenceId(currentDescriptorFile.getPath());
+    int newSequenceId = currentSequenceId;
+
+    // Put arbitrary upperbound on how often we retry
+    int retries = 10;
+    int retrymax = currentSequenceId + retries;
+    Path tableInfoDirPath = null;
+    do {
+      newSequenceId += 1;
+      String filename = getTableInfoFileName(newSequenceId);
+      Path tempPath = new Path(tmpTableDir, filename);
+      if (fs.exists(tempPath)) {
+        LOG.debug(tempPath + " exists; retrying up to " + retries + " times");
+        continue;
+      }
+      tableInfoDirPath = new Path(tableInfoDir, filename);
+      try {
+        writeTD(fs, tempPath, htd);
+        fs.mkdirs(tableInfoDirPath.getParent());
+        if (!fs.rename(tempPath, tableInfoDirPath)) {
+          throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
+        }
+        LOG.debug("Wrote into " + tableInfoDirPath);
+      } catch (IOException ioe) {
+        // Presume clash of names or something; go around again.
+        LOG.debug("Failed write and/or rename; retrying", ioe);
+        if (!HBCKFsUtils.deleteDirectory(fs, tempPath)) {
+          LOG.warn("Failed cleanup of " + tempPath);
+        }
+        tableInfoDirPath = null;
+        continue;
+      }
+      break;
+    } while (newSequenceId < retrymax);
+    if (tableInfoDirPath != null) {
+      // if we succeeded, remove old table info files.
+      deleteTableDescriptorFiles(fs, tableInfoDir, newSequenceId - 1);
+    }
+    return tableInfoDirPath;
+  }
+
+  private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
+    throws IOException {
+    FSDataOutputStream out = fs.create(p, false);
+    try {
+      // We used to write this file out as a serialized HTD Writable followed by two '\n's and then
+      // the toString version of HTD.  Now we just write out the pb serialization.
+      out.write(TableDescriptorBuilder.toByteArray(htd));
+    } finally {
+      out.close();
+    }
+  }
+
+  /**
+   * Create new TableDescriptor in HDFS. Happens when we are creating table. If
+   * forceCreation is true then even if previous table descriptor is present it
+   * will be overwritten
+   *
+   * @return True if we successfully created file.
+   */
+  public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
+    throws IOException {
+    Path tableDir = getTableDir(htd.getTableName());
+    return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
+  }
+
+  /**
+   * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
+   * a new table or snapshot a table.
+   * @param tableDir table directory under which we should write the file
+   * @param htd description of the table to write
+   * @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
+   *          be overwritten
+   * @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
+   *         already exists and we weren't forcing the descriptor creation.
+   * @throws IOException if a filesystem error occurs
+   */
+  public boolean createTableDescriptorForTableDirectory(Path tableDir,
+    TableDescriptor htd, boolean forceCreation) throws IOException {
+    FileStatus status = getTableInfoPath(fs, tableDir);
+    if (status != null) {
+      LOG.debug("Current path=" + status.getPath());
+      if (!forceCreation) {
+        if (fs.exists(status.getPath()) && status.getLen() > 0) {
+          if (readTableDescriptor(fs, status).equals(htd)) {
+            LOG.trace("TableInfo already exists.. Skipping creation");
+            return false;
+          }
+        }
+      }
+    }
+    Path p = writeTableDescriptor(fs, htd, tableDir, status);
+    return p != null;
+  }
+
+}
+
+
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java
index c8785f8..6a7395a 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCKFsUtils.java
@@ -17,15 +17,30 @@
  */
 package org.apache.hbase;
 
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
 
 /**
  * hbck's local version of the CommonFSUtils from the hbase repo
@@ -34,6 +49,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class HBCKFsUtils {
 
+  private static final Logger LOG = LoggerFactory.getLogger(HBCKFsUtils.class);
+
   /**
    * Private constructor to keep this class from being instantiated.
    */
@@ -85,4 +102,247 @@ public final class HBCKFsUtils {
     return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
   }
 
+  /**
+   *
+   * COPIED from CommonFSUtils.listStatus
+   *
+   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
+   * This accommodates differences between hadoop versions, where hadoop 1
+   * does not throw a FileNotFoundException, and return an empty FileStatus[]
+   * while Hadoop 2 will throw FileNotFoundException.
+   *
+   * Where possible, prefer FSUtils#listStatusWithStatusFilter(FileSystem,
+   * Path, FileStatusFilter) instead.
+   *
+   * @param fs file system
+   * @param dir directory
+   * @param filter path filter
+   * @return null if dir is empty or doesn't exist, otherwise FileStatus array
+   */
+  public static FileStatus [] listStatus(final FileSystem fs,
+    final Path dir, final PathFilter filter) throws IOException {
+    FileStatus [] status = null;
+    try {
+      status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
+    } catch (FileNotFoundException fnfe) {
+      // if directory doesn't exist, return null
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(dir + " doesn't exist");
+      }
+    }
+    if (status == null || status.length < 1) {
+      return null;
+    }
+    return status;
+  }
+
+  /**
+   *
+   * COPIED from CommonFSUtils.delete
+   *
+   * Calls fs.delete() and returns the value returned by the fs.delete()
+   *
+   * @param fs must not be null
+   * @param path must not be null
+   * @param recursive delete tree rooted at path
+   * @return the value returned by the fs.delete()
+   * @throws IOException from underlying FileSystem
+   */
+  public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
+    throws IOException {
+    return fs.delete(path, recursive);
+  }
+
+  /**
+   *
+   * COPIED from CommonFSUtils.deleteDirectory
+   *
+   * Delete if exists.
+   * @param fs filesystem object
+   * @param dir directory to delete
+   * @return True if deleted <code>dir</code>
+   * @throws IOException on IO errors
+   */
+  public static boolean deleteDirectory(final FileSystem fs, final Path dir)
+    throws IOException {
+    return fs.exists(dir) && fs.delete(dir, true);
+  }
+
+  /**
+   *
+   * COPIED from FsUtils.getRegionDirs
+   *
+   * Given a particular table dir, return all the regiondirs inside it, excluding files such as
+   * .tableinfo
+   * @param fs A file system for the Path
+   * @param tableDir Path to a specific table directory &lt;hbase.rootdir&gt;/&lt;tabledir&gt;
+   * @return List of paths to valid region directories in table dir.
+   * @throws IOException on IO errors
+   */
+  public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir)
+    throws IOException {
+    // assumes we are in a table dir.
+    List<FileStatus> rds = listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
+    if (rds == null) {
+      return new ArrayList<>();
+    }
+    List<Path> regionDirs = new ArrayList<>(rds.size());
+    for (FileStatus rdfs: rds) {
+      Path rdPath = rdfs.getPath();
+      regionDirs.add(rdPath);
+    }
+    return regionDirs;
+  }
+
+  /**
+   *
+   * COPIED from FsUtils.listStatusWithStatusFilter
+   *
+   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal
+   * This accommodates differences between hadoop versions, where hadoop 1
+   * does not throw a FileNotFoundException, and return an empty FileStatus[]
+   * while Hadoop 2 will throw FileNotFoundException.
+   *
+   * @param fs file system
+   * @param dir directory
+   * @param filter file status filter
+   * @return null if dir is empty or doesn't exist, otherwise FileStatus list
+   */
+  public static List<FileStatus> listStatusWithStatusFilter(final FileSystem fs, final Path dir,
+    final HBCKFileStatusFilter filter) throws IOException {
+    FileStatus [] status = null;
+    try {
+      status = fs.listStatus(dir);
+    } catch (FileNotFoundException fnfe) {
+      // if directory doesn't exist, return null
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(dir + " doesn't exist");
+      }
+    }
+
+    if (status == null || status.length < 1)  {
+      return null;
+    }
+
+    if (filter == null) {
+      return Arrays.asList(status);
+    } else {
+      List<FileStatus> status2 = filterFileStatuses(status, filter);
+      if (status2 == null || status2.isEmpty()) {
+        return null;
+      } else {
+        return status2;
+      }
+    }
+  }
+
+  /**
+   *
+   * COPIED from FsUtils.filterFileStatuses
+   *
+   * Filters FileStatuses in an array and returns a list
+   *
+   * @param input   An array of FileStatuses
+   * @param filter  A required filter to filter the array
+   * @return        A list of FileStatuses
+   */
+  public static List<FileStatus> filterFileStatuses(FileStatus[] input,
+                                                    HBCKFileStatusFilter filter) {
+    if (input == null) {
+      return null;
+    }
+    return filterFileStatuses(Iterators.forArray(input), filter);
+  }
+
+  /**
+   *
+   * COPIED from FsUtils.filterFileStatuses
+   *
+   * Filters FileStatuses in an iterator and returns a list
+   *
+   * @param input   An iterator of FileStatuses
+   * @param filter  A required filter to filter the array
+   * @return        A list of FileStatuses
+   */
+  public static List<FileStatus> filterFileStatuses(Iterator<FileStatus> input,
+                                                    HBCKFileStatusFilter filter) {
+    if (input == null) {
+      return null;
+    }
+    ArrayList<FileStatus> results = new ArrayList<>();
+    while (input.hasNext()) {
+      FileStatus f = input.next();
+      if (filter.accept(f)) {
+        results.add(f);
+      }
+    }
+    return results;
+  }
+
+
+  /**
+   *
+   * COPIED from FsUtils.FamilyDirFilter
+   *
+   * Filter for all dirs that are legal column family names.  This is generally used for colfam
+   * dirs &lt;hbase.rootdir&gt;/&lt;tabledir&gt;/&lt;regiondir&gt;/&lt;colfamdir&gt;.
+   */
+  public static class FamilyDirFilter extends HBCKAbstractFileStatusFilter {
+    final FileSystem fs;
+
+    public FamilyDirFilter(FileSystem fs) {
+      this.fs = fs;
+    }
+
+    @Override
+    protected boolean accept(Path p, @CheckForNull Boolean isDir) {
+      try {
+        // throws IAE if invalid
+        ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(Bytes.toBytes(p.getName()));
+      } catch (IllegalArgumentException iae) {
+        // path name is an invalid family name and thus is excluded.
+        return false;
+      }
+
+      try {
+        return isDirectory(fs, isDir, p);
+      } catch (IOException ioe) {
+        // Maybe the file was moved or the fs was disconnected.
+        LOG.warn("Skipping file " + p +" due to IOException", ioe);
+        return false;
+      }
+    }
+  }
+
+  /**
+   *
+   * COPIED from FsUtils.RegionDirFilter
+   *
+   * Filter for all dirs that don't start with '.'
+   */
+  public static class RegionDirFilter extends HBCKAbstractFileStatusFilter {
+    // This pattern will accept 0.90+ style hex region dirs and older numeric region dir names.
+    final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
+    final FileSystem fs;
+
+    public RegionDirFilter(FileSystem fs) {
+      this.fs = fs;
+    }
+
+    @Override
+    protected boolean accept(Path p, @CheckForNull Boolean isDir) {
+      if (!regionDirPattern.matcher(p.getName()).matches()) {
+        return false;
+      }
+
+      try {
+        return isDirectory(fs, isDir, p);
+      } catch (IOException ioe) {
+        // Maybe the file was moved or the fs was disconnected.
+        LOG.warn("Skipping file " + p +" due to IOException", ioe);
+        return false;
+      }
+    }
+  }
+
 }
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/MissingTableDescriptorGenerator.java b/hbase-hbck2/src/main/java/org/apache/hbase/MissingTableDescriptorGenerator.java
new file mode 100644
index 0000000..9427976
--- /dev/null
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/MissingTableDescriptorGenerator.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class can be used to generate missing table descriptor file based on the in-memory cache
+ * of the active master or based on the file system.
+ */
+public class MissingTableDescriptorGenerator {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MissingTableDescriptorGenerator.class);
+
+  private final Configuration configuration;
+  private FileSystem fs;
+  private Path rootDir;
+
+  public MissingTableDescriptorGenerator(Configuration configuration) throws IOException {
+    this.configuration = configuration;
+    this.rootDir = HBCKFsUtils.getRootDir(this.configuration);
+    this.fs = rootDir.getFileSystem(this.configuration);
+  }
+
+  /**
+   * Trying to generate missing table descriptor. If anything goes wrong, then the method throws
+   * IllegalStateException without changing anything. The method follows these steps:
+   *
+   * - if the table folder is missing, then we return
+   * - if the .tableinfo file is not missing, then we return (we don't overwrite it)
+   * - if TableDescriptor is cached in master then recover the .tableinfo accordingly
+   * - if TableDescriptor is not cached in master, then we create a default .tableinfo file
+   *   with the following items:
+   *      - the table name
+   *      - the column family list (determined based on the file system)
+   *      - the default properties for both {@link TableDescriptor} and
+   *        {@link ColumnFamilyDescriptor}
+   *
+   * This method does not change anything in HBase, only writes the new .tableinfo file
+   * to the file system.
+   *
+   * @param tableNameAsString the table name in standard 'table' or 'ns:table' format
+   */
+  public void generateTableDescriptorFileIfMissing(String tableNameAsString) {
+    TableName tableName = TableName.valueOf(tableNameAsString);
+    assertTableFolderIsPresent(tableName);
+    if (checkIfTableInfoPresent(tableName)) {
+      LOG.info("Table descriptor already exists, exiting without changing anything.");
+      return;
+    }
+
+    Optional<TableDescriptor> tableDescriptorFromMaster = getTableDescriptorFromMaster(tableName);
+    HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, rootDir);
+    try {
+      if (tableDescriptorFromMaster.isPresent()) {
+        LOG.info("Table descriptor found in the cache of HBase Master, " +
+                 "writing it to the file system.");
+        fstd.createTableDescriptor(tableDescriptorFromMaster.get(), false);
+        LOG.info("Table descriptor written successfully. Orphan table {} fixed.", tableName);
+      } else {
+        generateDefaultTableInfo(fstd, tableName);
+        LOG.info("Table descriptor written successfully.");
+        LOG.warn("Orphan table {} fixed with a default .tableinfo file. It is strongly " +
+                 "recommended to review the TableDescriptor and modify if necessary.", tableName);
+      }
+    } catch (IOException e) {
+      LOG.error("Exception while writing the table descriptor to the file system for table {}",
+                tableName, e);
+    }
+
+  }
+
+  private void assertTableFolderIsPresent(TableName tableName) {
+    final Path tableDir = HBCKFsUtils.getTableDir(rootDir, tableName);
+    try {
+      if (!fs.exists(tableDir)) {
+        throw new IllegalStateException("Exiting without changing anything. " +
+                                        "Table folder does not exist: " + tableDir);
+      }
+      if (!fs.getFileStatus(tableDir).isDirectory()) {
+        throw new IllegalStateException("Exiting without changing anything. " +
+                                        "Table folder is not a directory: " + tableDir);
+      }
+    } catch (IOException e) {
+      LOG.error("Exception while trying to find table folder for table {}", tableName, e);
+      throw new IllegalStateException("Exiting without changing anything. " +
+                                      "Can not validate if table folder exists.");
+    }
+  }
+
+  private boolean checkIfTableInfoPresent(TableName tableName) {
+    final Path tableDir = HBCKFsUtils.getTableDir(rootDir, tableName);
+    try {
+      FileStatus tableInfoFile = HBCKFsTableDescriptors.getTableInfoPath(fs, tableDir);
+      if (tableInfoFile != null) {
+        LOG.info("Table descriptor found for table {} in: {}", tableName, tableInfoFile.getPath());
+        return true;
+      }
+    } catch (IOException e) {
+      LOG.error("Exception while trying to find the table descriptor for table {}", tableName, e);
+      throw new IllegalStateException("Can not validate if table descriptor exists. " +
+                                      "Exiting without changing anything.");
+    }
+    return false;
+  }
+
+  private Optional<TableDescriptor> getTableDescriptorFromMaster(TableName tableName) {
+    LOG.info("Trying to fetch table descriptor for orphan table: {}", tableName);
+    try (Connection conn = ConnectionFactory.createConnection(configuration);
+         Admin admin = conn.getAdmin()) {
+      TableDescriptor tds = admin.getDescriptor(tableName);
+      return Optional.of(tds);
+    } catch (TableNotFoundException e) {
+      LOG.info("Table Descriptor not found in HBase Master: {}", tableName);
+    } catch (IOException e) {
+      LOG.warn("Exception while fetching table descriptor. Is master offline?", e);
+    }
+    return Optional.empty();
+  }
+
+  private void generateDefaultTableInfo(HBCKFsTableDescriptors fstd, TableName tableName)
+    throws IOException {
+    Set<String> columnFamilies = getColumnFamilies(tableName);
+    if(columnFamilies.isEmpty()) {
+      LOG.warn("No column family found in HDFS for table {}.", tableName);
+    } else {
+      LOG.info("Column families to be listed in the new table info: {}", columnFamilies);
+    }
+
+    TableDescriptorBuilder tableBuilder = TableDescriptorBuilder.newBuilder(tableName);
+    for (String columnFamily : columnFamilies) {
+      final ColumnFamilyDescriptor family = ColumnFamilyDescriptorBuilder.of(columnFamily);
+      tableBuilder.setColumnFamily(family);
+    }
+    fstd.createTableDescriptor(tableBuilder.build(), false);
+  }
+
+  private Set<String> getColumnFamilies(TableName tableName) {
+    try {
+      final Path tableDir = HBCKFsUtils.getTableDir(rootDir, tableName);
+      final List<Path> regionDirs = HBCKFsUtils.getRegionDirs(fs, tableDir);
+      Set<String> columnFamilies = new HashSet<>();
+      for (Path regionDir : regionDirs) {
+        FileStatus[] familyDirs = fs.listStatus(regionDir, new HBCKFsUtils.FamilyDirFilter(fs));
+        for (FileStatus familyDir : familyDirs) {
+          String columnFamily = familyDir.getPath().getName();
+          columnFamilies.add(columnFamily);
+        }
+      }
+      return columnFamilies;
+    } catch (IOException e) {
+      LOG.error("Exception while trying to find in HDFS the column families for table {}",
+                tableName, e);
+      throw new IllegalStateException("Unable to determine the list of column families. " +
+                                      "Exiting without changing anything.");
+    }
+  }
+}
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java
index 6313b85..c04bd02 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsck.java
@@ -126,8 +126,20 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.util.*;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.hbase.util.KeyRange;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.hadoop.hbase.util.RegionSplitCalculator;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.hadoop.hbase.util.RetryCounterFactory;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsckRepair.java b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsckRepair.java
index b2cc879..0b0f7d7 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsckRepair.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/HBaseFsckRepair.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hbase.HBCKMetaTableAccessor;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/OfflineMetaRepair.java b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/OfflineMetaRepair.java
index e2bce8c..d945194 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/OfflineMetaRepair.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/hbck1/OfflineMetaRepair.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.io.MultipleIOException;
 
 import org.slf4j.Logger;
diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCK2.java b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCK2.java
index 1e8ba04..14ab7dd 100644
--- a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCK2.java
+++ b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCK2.java
@@ -27,8 +27,8 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
-import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Scanner;
@@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Threads;
 
 import org.junit.After;
@@ -161,7 +160,7 @@ public class TestHBCK2 {
         Scanner scanner = new Scanner(result).useDelimiter("[\\D]+");
         pids = new ArrayList<>();
         while (scanner.hasNext()) {
-            pids.add(scanner.nextLong());
+          pids.add(scanner.nextLong());
         }
         scanner.close();
         waitOnPids(pids);
diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptorForceCreation.java b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptorForceCreation.java
new file mode 100644
index 0000000..2db717a
--- /dev/null
+++ b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptorForceCreation.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+/**
+ * COPIED from org.apache.hadoop.hbase.TestFSTableDescriptorForceCreation
+ */
+@Category({MiscTests.class, SmallTests.class})
+public class TestHBCKFsTableDescriptorForceCreation {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(org.apache.hadoop.hbase.TestFSTableDescriptorForceCreation.class);
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  @Rule
+  public TestName name = new TestName();
+
+  @Test
+  public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse()
+    throws IOException {
+    final String name = this.name.getMethodName();
+    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+    Path rootdir = new Path(UTIL.getDataTestDir(), name);
+    HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, rootdir);
+
+    final TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+    assertTrue("Should create new table descriptor",
+               fstd.createTableDescriptor(td, false));
+  }
+
+  @Test
+  public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse()
+    throws IOException {
+    final String name = this.name.getMethodName();
+    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+    // Cleanup old tests if any detritus laying around.
+    Path rootdir = new Path(UTIL.getDataTestDir(), name);
+    HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, rootdir);
+    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+    // create it once
+    fstd.createTableDescriptor(htd, false);
+    // the second creation should fail
+    assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
+  }
+
+  @Test
+  public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
+    throws Exception {
+    final String name = this.name.getMethodName();
+    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+    Path rootdir = new Path(UTIL.getDataTestDir(), name);
+    HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, rootdir);
+    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+    fstd.createTableDescriptor(htd, false);
+    assertTrue("Should create new table descriptor",
+               fstd.createTableDescriptor(htd, true));
+  }
+
+}
+
+
+
diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptors.java b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptors.java
new file mode 100644
index 0000000..20ccdfc
--- /dev/null
+++ b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKFsTableDescriptors.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableInfoMissingException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * COPIED (partially) from org.apache.hadoop.hbase.util.TestFSTableDescriptors
+ *
+ * Tests for {@link HBCKFsTableDescriptors}.
+ */
+@Category({MiscTests.class, MediumTests.class})
+public class TestHBCKFsTableDescriptors {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestHBCKFsTableDescriptors.class);
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static final Logger LOG = LoggerFactory.getLogger(TestHBCKFsTableDescriptors.class);
+
+  @Rule
+  public TestName name = new TestName();
+
+  @Test (expected=IllegalArgumentException.class)
+  public void testRegexAgainstOldStyleTableInfo() {
+    Path p = new Path("/tmp", HBCKFsTableDescriptors.TABLEINFO_FILE_PREFIX);
+    int i = HBCKFsTableDescriptors.getTableInfoSequenceId(p);
+    assertEquals(0, i);
+    // Assert it won't eat garbage -- that it fails
+    p = new Path("/tmp", "abc");
+    HBCKFsTableDescriptors.getTableInfoSequenceId(p);
+  }
+
+  @Test
+  public void testFormatTableInfoSequenceId() {
+    Path p0 = assertWriteAndReadSequenceId(0);
+    // Assert p0 has format we expect.
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < HBCKFsTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) {
+      sb.append("0");
+    }
+    assertEquals(HBCKFsTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(),
+                 p0.getName());
+    // Check a few more.
+    Path p2 = assertWriteAndReadSequenceId(2);
+    Path p10000 = assertWriteAndReadSequenceId(10000);
+    // Get a .tablinfo that has no sequenceid suffix.
+    Path p = new Path(p0.getParent(), HBCKFsTableDescriptors.TABLEINFO_FILE_PREFIX);
+    FileStatus fs = new FileStatus(0, false, 0, 0, 0, p);
+    FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0);
+    FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2);
+    FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000);
+    Comparator<FileStatus> comparator = HBCKFsTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR;
+    assertTrue(comparator.compare(fs, fs0) > 0);
+    assertTrue(comparator.compare(fs0, fs2) > 0);
+    assertTrue(comparator.compare(fs2, fs10000) > 0);
+  }
+
+  private Path assertWriteAndReadSequenceId(final int i) {
+    Path p = new Path("/tmp", HBCKFsTableDescriptors.getTableInfoFileName(i));
+    int ii = HBCKFsTableDescriptors.getTableInfoSequenceId(p);
+    assertEquals(i, ii);
+    return p;
+  }
+
+
+  @Test public void testReadingHTDFromFS() throws IOException {
+    final String name = this.name.getMethodName();
+    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+    Path rootdir = UTIL.getDataTestDir(name);
+    HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, rootdir);
+    fstd.createTableDescriptor(htd, false);
+    TableDescriptor td2 =
+      HBCKFsTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName());
+    assertTrue(htd.equals(td2));
+  }
+
+  @Test(expected = TableInfoMissingException.class)
+  public void testNoSuchTable() throws IOException {
+    final String name = "testNoSuchTable";
+    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+    // Cleanup old tests if any detrius laying around.
+    Path rootdir = new Path(UTIL.getDataTestDir(), name);
+    HBCKFsTableDescriptors htds = new HBCKFsTableDescriptors(fs, rootdir);
+    final TableName noSuchTable = TableName.valueOf("NoSuchTable");
+
+    // should throw exception
+    HBCKFsTableDescriptors.getTableDescriptorFromFs(fs, rootdir, noSuchTable);
+  }
+
+  @Test
+  public void testTableInfoFileStatusComparator() {
+    FileStatus bare =
+      new FileStatus(0, false, 0, 0, -1,
+                     new Path("/tmp", HBCKFsTableDescriptors.TABLEINFO_FILE_PREFIX));
+    FileStatus future =
+      new FileStatus(0, false, 0, 0, -1,
+                     new Path("/tmp/tablinfo." + System.currentTimeMillis()));
+    FileStatus farFuture =
+      new FileStatus(0, false, 0, 0, -1,
+                     new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000));
+    FileStatus [] alist = {bare, future, farFuture};
+    FileStatus [] blist = {bare, farFuture, future};
+    FileStatus [] clist = {farFuture, bare, future};
+    Comparator<FileStatus> c = HBCKFsTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR;
+    Arrays.sort(alist, c);
+    Arrays.sort(blist, c);
+    Arrays.sort(clist, c);
+    // Now assert all sorted same in way we want.
+    for (int i = 0; i < alist.length; i++) {
+      assertTrue(alist[i].equals(blist[i]));
+      assertTrue(blist[i].equals(clist[i]));
+      assertTrue(clist[i].equals(i == 0? farFuture: i == 1? future: bare));
+    }
+  }
+
+  @Test
+  public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
+    Path testdir = UTIL.getDataTestDir(name.getMethodName());
+    final TableName name = TableName.valueOf(this.name.getMethodName());
+    TableDescriptor htd = TableDescriptorBuilder.newBuilder(name).build();
+    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+    HBCKFsTableDescriptors fstd = new HBCKFsTableDescriptors(fs, testdir);
+    assertTrue(fstd.createTableDescriptor(htd, false));
+    assertFalse(fstd.createTableDescriptor(htd, false));
+    htd = TableDescriptorBuilder.newBuilder(htd)
+      .setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"))
+      .build();
+    assertTrue(fstd.createTableDescriptor(htd, false)); //this will re-create
+    Path tableDir = fstd.getTableDir(htd.getTableName());
+    Path tmpTableDir = new Path(tableDir, HBCKFsTableDescriptors.TMP_DIR);
+    FileStatus[] statuses = fs.listStatus(tmpTableDir);
+    assertTrue(statuses.length == 0);
+
+    assertEquals(htd, HBCKFsTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
+  }
+
+}
+
diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/TestMissingTableDescriptorGenerator.java b/hbase-hbck2/src/test/java/org/apache/hbase/TestMissingTableDescriptorGenerator.java
new file mode 100644
index 0000000..f2d7d04
--- /dev/null
+++ b/hbase-hbck2/src/test/java/org/apache/hbase/TestMissingTableDescriptorGenerator.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hbase;
+
+import static java.util.Arrays.asList;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestMissingTableDescriptorGenerator {
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final String TABLE_NAME_AS_STRING = "test-1";
+  private static final TableName TABLE_NAME = TableName.valueOf(TABLE_NAME_AS_STRING);
+  private static final byte[] FAMILY_A = Bytes.toBytes("familyA");
+  private static final byte[] FAMILY_B = Bytes.toBytes("familyB");
+  private static final List<ColumnFamilyDescriptor> COLUMN_FAMILIES = asList(
+    ColumnFamilyDescriptorBuilder.of(FAMILY_A), ColumnFamilyDescriptorBuilder.of(FAMILY_B));
+  private static final int CUSTOM_MAX_FILE_SIZE = 99 * 1024 * 1024;
+
+  private static final TableDescriptor TABLE_INFO_WITH_DEFAULT_PARAMS =
+    TableDescriptorBuilder.newBuilder(TABLE_NAME)
+      .setColumnFamilies(COLUMN_FAMILIES)
+      .build();
+
+  private static final TableDescriptor TABLE_INFO_WITH_CUSTOM_MAX_FILE_SIZE =
+    TableDescriptorBuilder.newBuilder(TABLE_NAME)
+      .setColumnFamilies(COLUMN_FAMILIES)
+      .setMaxFileSize(CUSTOM_MAX_FILE_SIZE)
+      .build();
+
+  private MissingTableDescriptorGenerator missingTableDescriptorGenerator;
+  private HBCKFsTableDescriptors tableDescriptorUtil;
+  private Path rootDir;
+  private FileSystem fs;
+
+  @Before
+  public void before() throws Exception {
+    TEST_UTIL.startMiniCluster(1);
+    final Configuration conf = TEST_UTIL.getConfiguration();
+    missingTableDescriptorGenerator = new MissingTableDescriptorGenerator(conf);
+
+    // creating FSTableDescriptors helper class, with usecache=false, so it will
+    // always fetch the table descriptors from the filesystem
+    rootDir = TEST_UTIL.getDefaultRootDirPath();
+    fs = TEST_UTIL.getTestFileSystem();
+    tableDescriptorUtil = new HBCKFsTableDescriptors(fs, rootDir);
+  }
+
+  @After
+  public void after() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void shouldGenerateTableInfoBasedOnCachedTableDescriptor() throws Exception {
+    TEST_UTIL.createTable(TABLE_INFO_WITH_CUSTOM_MAX_FILE_SIZE, null);
+
+    // remove the .tableinfo file
+    tableDescriptorUtil.deleteTableDescriptorIfExists(TABLE_NAME);
+
+    // regenerate the .tableinfo file
+    missingTableDescriptorGenerator.generateTableDescriptorFileIfMissing(TABLE_NAME_AS_STRING);
+
+    // verify table info file content (as the table descriptor should be restored based on the
+    // cache in HBase Master, we expect the maxFileSize to be set to the non-default value)
+    TableDescriptor descriptor =
+      HBCKFsTableDescriptors.getTableDescriptorFromFs(fs, rootDir, TABLE_NAME);
+    assertEquals(TABLE_NAME.getNameAsString(), descriptor.getTableName().getNameAsString());
+    assertTrue(descriptor.hasColumnFamily(FAMILY_A));
+    assertTrue(descriptor.hasColumnFamily(FAMILY_B));
+    assertEquals(CUSTOM_MAX_FILE_SIZE, descriptor.getMaxFileSize());
+
+    // restart the cluster (the table descriptor cache should be reinitialized in the HBase Master)
+    TEST_UTIL.shutdownMiniHBaseCluster();
+    Thread.sleep(2000);
+    TEST_UTIL.restartHBaseCluster(1);
+
+    // verify the table is working
+    try(Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME)) {
+      TEST_UTIL.loadRandomRows(table, FAMILY_A, 10, 10);
+    }
+  }
+
+  @Test
+  public void shouldGenerateTableInfoBasedOnFileSystem() throws Exception {
+    TEST_UTIL.createTable(TABLE_INFO_WITH_CUSTOM_MAX_FILE_SIZE, null);
+
+    // remove the .tableinfo file
+    tableDescriptorUtil.deleteTableDescriptorIfExists(TABLE_NAME);
+
+    // restart HBase (so the table descriptor cache should be cleaned in HBase Master)
+    // In this case actually the region belongs to the test table shouldn't be online
+    // after the restart. You should find in the logs a warning similar to:
+    // "Failed opening region test-1,,1608040700497.5d72e524ae11c5c72c6f3d365f190349.
+    //  java.io.IOException: Missing table descriptor for 5d72e524ae11c5c72c6f3d365f190349"
+    TEST_UTIL.shutdownMiniHBaseCluster();
+    Thread.sleep(2000);
+    TEST_UTIL.restartHBaseCluster(1);
+
+    // regenerate the .tableinfo file
+    missingTableDescriptorGenerator.generateTableDescriptorFileIfMissing(TABLE_NAME_AS_STRING);
+
+    // verify table info file content (as the table descriptor should be restored based on the
+    // file system, we expect the maxFileSize to be set to the default value)
+    TableDescriptor descriptor =
+      HBCKFsTableDescriptors.getTableDescriptorFromFs(fs, rootDir, TABLE_NAME);
+    assertEquals(TABLE_NAME.getNameAsString(), descriptor.getTableName().getNameAsString());
+    assertTrue(descriptor.hasColumnFamily(FAMILY_A));
+    assertTrue(descriptor.hasColumnFamily(FAMILY_B));
+    assertEquals(TABLE_INFO_WITH_DEFAULT_PARAMS.getMaxFileSize(), descriptor.getMaxFileSize());
+
+    // restart the cluster again
+    TEST_UTIL.shutdownMiniHBaseCluster();
+    Thread.sleep(2000);
+    TEST_UTIL.restartHBaseCluster(1);
+
+    // verify the table is working
+    try(Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME)) {
+      TEST_UTIL.loadRandomRows(table, FAMILY_A, 10, 10);
+    }
+  }
+
+}
diff --git a/hbase-hbck2/src/test/java/org/apache/hbase/TestSchedulingRecoveries.java b/hbase-hbck2/src/test/java/org/apache/hbase/TestSchedulingRecoveries.java
index b9ea9bd..93916e8 100644
--- a/hbase-hbck2/src/test/java/org/apache/hbase/TestSchedulingRecoveries.java
+++ b/hbase-hbck2/src/test/java/org/apache/hbase/TestSchedulingRecoveries.java
@@ -26,9 +26,11 @@ import java.util.List;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Hbck;
-import org.junit.*;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
 
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 
 public class TestSchedulingRecoveries {
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();