You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2013/08/08 08:08:31 UTC

svn commit: r1511591 [14/23] - in /hbase/branches/0.95: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/jav...

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java Thu Aug  8 06:08:23 2013
@@ -38,11 +38,12 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableInfoMissingException;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -84,8 +85,8 @@ public class FSTableDescriptors implemen
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.
   // TODO.
-  private final Map<String, TableDescriptorAndModtime> cache =
-    new ConcurrentHashMap<String, TableDescriptorAndModtime>();
+  private final Map<TableName, TableDescriptorAndModtime> cache =
+    new ConcurrentHashMap<TableName, TableDescriptorAndModtime>();
 
   /**
    * Data structure to hold modification time and table descriptor.
@@ -140,32 +141,20 @@ public class FSTableDescriptors implemen
    * to see if a newer file has been created since the cached one was read.
    */
   @Override
-  public HTableDescriptor get(final byte [] tablename)
-  throws IOException {
-    return get(Bytes.toString(tablename));
-  }
-
-  /**
-   * Get the current table descriptor for the given table, or null if none exists.
-   * 
-   * Uses a local cache of the descriptor but still checks the filesystem on each call
-   * to see if a newer file has been created since the cached one was read.
-   */
-  @Override
-  public HTableDescriptor get(final String tablename)
+  public HTableDescriptor get(final TableName tablename)
   throws IOException {
     invocations++;
-    if (HTableDescriptor.ROOT_TABLEDESC.getNameAsString().equals(tablename)) {
+    if (HTableDescriptor.ROOT_TABLEDESC.getTableName().equals(tablename)) {
       cachehits++;
       return HTableDescriptor.ROOT_TABLEDESC;
     }
-    if (HTableDescriptor.META_TABLEDESC.getNameAsString().equals(tablename)) {
+    if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tablename)) {
       cachehits++;
       return HTableDescriptor.META_TABLEDESC;
     }
     // .META. and -ROOT- is already handled. If some one tries to get the descriptor for
     // .logs, .oldlogs or .corrupt throw an exception.
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) {
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
        throw new IOException("No descriptor found for non table = " + tablename);
     }
 
@@ -212,14 +201,36 @@ public class FSTableDescriptors implemen
     for (Path d: tableDirs) {
       HTableDescriptor htd = null;
       try {
+        htd = get(FSUtils.getTableName(d));
+      } catch (FileNotFoundException fnfe) {
+        // inability of retrieving one HTD shouldn't stop getting the remaining
+        LOG.warn("Trouble retrieving htd", fnfe);
+      }
+      if (htd == null) continue;
+      htds.put(htd.getTableName().getNameAsString(), htd);
+    }
+    return htds;
+  }
 
-        htd = get(d.getName());
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
+   */
+  @Override
+  public Map<String, HTableDescriptor> getByNamespace(String name)
+  throws IOException {
+    Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
+    List<Path> tableDirs =
+        FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
+    for (Path d: tableDirs) {
+      HTableDescriptor htd = null;
+      try {
+        htd = get(FSUtils.getTableName(d));
       } catch (FileNotFoundException fnfe) {
         // inability of retrieving one HTD shouldn't stop getting the remaining
         LOG.warn("Trouble retrieving htd", fnfe);
       }
       if (htd == null) continue;
-      htds.put(d.getName(), htd);
+      htds.put(FSUtils.getTableName(d).getNameAsString(), htd);
     }
     return htds;
   }
@@ -233,19 +244,16 @@ public class FSTableDescriptors implemen
     if (fsreadonly) {
       throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
     }
-    if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) {
+    if (TableName.META_TABLE_NAME.equals(htd.getTableName())) {
       throw new NotImplementedException();
     }
-    if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) {
-      throw new NotImplementedException();
-    }
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) {
       throw new NotImplementedException(
         "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString());
     }
     updateTableDescriptor(htd);
-    long modtime = getTableInfoModtime(htd.getNameAsString());
-    this.cache.put(htd.getNameAsString(), new TableDescriptorAndModtime(modtime, htd));
+    long modtime = getTableInfoModtime(htd.getTableName());
+    this.cache.put(htd.getTableName(), new TableDescriptorAndModtime(modtime, htd));
   }
 
   /**
@@ -254,12 +262,12 @@ public class FSTableDescriptors implemen
    * from the FileSystem.
    */
   @Override
-  public HTableDescriptor remove(final String tablename)
+  public HTableDescriptor remove(final TableName tablename)
   throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
     }
-    Path tabledir = getTableDirectory(tablename);
+    Path tabledir = getTableDir(tablename);
     if (this.fs.exists(tabledir)) {
       if (!this.fs.delete(tabledir, true)) {
         throw new IOException("Failed delete of " + tabledir.toString());
@@ -276,7 +284,7 @@ public class FSTableDescriptors implemen
    * @return true if exists
    * @throws IOException
    */
-  public boolean isTableInfoExists(String tableName) throws IOException {
+  public boolean isTableInfoExists(TableName tableName) throws IOException {
     return getTableInfoPath(tableName) != null;
   }
   
@@ -284,8 +292,8 @@ public class FSTableDescriptors implemen
    * Find the most current table info file for the given table in the hbase root directory.
    * @return The file status of the current table info file or null if it does not exist
    */
-  private FileStatus getTableInfoPath(final String tableName) throws IOException {
-    Path tableDir = getTableDirectory(tableName);
+  private FileStatus getTableInfoPath(final TableName tableName) throws IOException {
+    Path tableDir = getTableDir(tableName);
     return getTableInfoPath(tableDir);
   }
 
@@ -384,17 +392,10 @@ public class FSTableDescriptors implemen
   /**
    * Return the table directory in HDFS
    */
-  @VisibleForTesting Path getTableDirectory(final String tableName) {
-    return getTableDirectory(rootdir, tableName);
+  @VisibleForTesting Path getTableDir(final TableName tableName) {
+    return FSUtils.getTableDir(rootdir, tableName);
   }
-  
-  /**
-   * Return the table directory in HDFS
-   */
-  static Path getTableDirectory(Path rootDir, String tableName) {
-    return FSUtils.getTablePath(rootDir, tableName);
-  }
-  
+
   private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() {
     @Override
     public boolean accept(Path p) {
@@ -460,7 +461,7 @@ public class FSTableDescriptors implemen
    * or <code>0</code> if no tableinfo file found.
    * @throws IOException
    */
-  private long getTableInfoModtime(final String tableName) throws IOException {
+  private long getTableInfoModtime(final TableName tableName) throws IOException {
     FileStatus status = getTableInfoPath(tableName);
     return status == null ? 0 : status.getModificationTime();
   }
@@ -471,8 +472,8 @@ public class FSTableDescriptors implemen
    * Returns null if it's not found.
    */
   public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
-      Path hbaseRootDir, String tableName) throws IOException {
-    Path tableDir = getTableDirectory(hbaseRootDir, tableName);
+      Path hbaseRootDir, TableName tableName) throws IOException {
+    Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
     return getTableDescriptorFromFs(fs, tableDir);
   }
 
@@ -490,14 +491,14 @@ public class FSTableDescriptors implemen
     return readTableDescriptor(fs, status, false);
   }
   
-  private TableDescriptorAndModtime getTableDescriptorAndModtime(String tableName)
+  private TableDescriptorAndModtime getTableDescriptorAndModtime(TableName tableName)
   throws IOException {
     // ignore both -ROOT- and .META. tables
-    if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0
-        || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) {
+    if (tableName.equals(TableName.ROOT_TABLE_NAME)
+        || tableName.equals(TableName.META_TABLE_NAME)) {
       return null;
     }
-    return getTableDescriptorAndModtime(getTableDirectory(tableName));
+    return getTableDescriptorAndModtime(getTableDir(tableName));
   }
 
   private TableDescriptorAndModtime getTableDescriptorAndModtime(Path tableDir)
@@ -545,7 +546,7 @@ public class FSTableDescriptors implemen
     if (fsreadonly) {
       throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
     }
-    Path tableDir = getTableDirectory(htd.getNameAsString());
+    Path tableDir = getTableDir(htd.getTableName());
     Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir));
     if (p == null) throw new IOException("Failed update");
     LOG.info("Updated tableinfo=" + p);
@@ -557,12 +558,12 @@ public class FSTableDescriptors implemen
    * Used in unit tests only.
    * @throws NotImplementedException if in read only mode
    */
-  public void deleteTableDescriptorIfExists(String tableName) throws IOException {
+  public void deleteTableDescriptorIfExists(TableName tableName) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
     }
    
-    Path tableDir = getTableDirectory(tableName);
+    Path tableDir = getTableDir(tableName);
     Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
     deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
   }
@@ -683,7 +684,7 @@ public class FSTableDescriptors implemen
    */
   public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
   throws IOException {
-    Path tableDir = getTableDirectory(htd.getNameAsString());
+    Path tableDir = getTableDir(htd.getTableName());
     return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
   }
   

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Thu Aug  8 06:08:23 2013
@@ -31,6 +31,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.regex.Pattern;
@@ -53,6 +54,7 @@ import org.apache.hadoop.hbase.ClusterId
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
@@ -925,17 +927,8 @@ public abstract class FSUtils {
   public static boolean isMajorCompacted(final FileSystem fs,
       final Path hbaseRootDir)
   throws IOException {
-    // Presumes any directory under hbase.rootdir is a table.
-    FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
-    for (FileStatus tableDir : tableDirs) {
-      // Skip the .log directory.  All others should be tables.  Inside a table,
-      // there are compaction.dir directories to skip.  Otherwise, all else
-      // should be regions.  Then in each region, should only be family
-      // directories.  Under each of these, should be one file only.
-      Path d = tableDir.getPath();
-      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
-        continue;
-      }
+    List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
+    for (Path d : tableDirs) {
       FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
       for (FileStatus regionDir : regionDirs) {
         Path dd = regionDir.getPath();
@@ -1010,17 +1003,8 @@ public abstract class FSUtils {
     int cfCountTotal = 0;
     int cfFragTotal = 0;
     DirFilter df = new DirFilter(fs);
-    // presumes any directory under hbase.rootdir is a table
-    FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
-    for (FileStatus tableDir : tableDirs) {
-      // Skip the .log directory.  All others should be tables.  Inside a table,
-      // there are compaction.dir directories to skip.  Otherwise, all else
-      // should be regions.  Then in each region, should only be family
-      // directories.  Under each of these, should be one file only.
-      Path d = tableDir.getPath();
-      if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
-        continue;
-      }
+    List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
+    for (Path d : tableDirs) {
       int cfCount = 0;
       int cfFrag = 0;
       FileStatus[] regionDirs = fs.listStatus(d, df);
@@ -1044,7 +1028,8 @@ public abstract class FSUtils {
         }
       }
       // compute percentage per table and store in result list
-      frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
+      frags.put(FSUtils.getTableName(d).getNameAsString(),
+          Math.round((float) cfFrag / cfCount * 100));
     }
     // set overall percentage for all tables
     frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
@@ -1081,13 +1066,12 @@ public abstract class FSUtils {
       final Path hbaseRootDir)
   throws IOException {
     // Presumes any directory under hbase.rootdir is a table.
-    FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
-    for (FileStatus tableDir : tableDirs) {
+    List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
+    for (Path d: tableDirs) {
       // Inside a table, there are compaction.dir directories to skip.
       // Otherwise, all else should be regions.  Then in each region, should
       // only be family directories.  Under each of these, should be a mapfile
       // and info directory and in these only one file.
-      Path d = tableDir.getPath();
       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
         continue;
       }
@@ -1133,6 +1117,45 @@ public abstract class FSUtils {
   }
 
   /**
+   * Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under
+   * path rootdir
+   *
+   * @param rootdir qualified path of HBase root directory
+   * @param tableName name of table
+   * @return {@link org.apache.hadoop.fs.Path} for table
+   */
+  public static Path getTableDir(Path rootdir, final TableName tableName) {
+    return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
+        tableName.getQualifierAsString());
+  }
+
+  /**
+   * Returns the {@link org.apache.hadoop.hbase.TableName} object representing
+   * the table directory under
+   * path rootdir
+   *
+   * @param rootdir qualified path of HBase root directory
+   * @param tablePath path of table
+   * @return {@link org.apache.hadoop.fs.Path} for table
+   */
+  public static TableName getTableName(Path tablePath) {
+    return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
+  }
+
+  /**
+   * Returns the {@link org.apache.hadoop.fs.Path} object representing
+   * the namespace directory under path rootdir
+   *
+   * @param rootdir qualified path of HBase root directory
+   * @param namespace namespace name
+   * @return {@link org.apache.hadoop.fs.Path} for table
+   */
+  public static Path getNamespaceDir(Path rootdir, final String namespace) {
+    return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
+        new Path(namespace)));
+  }
+
+  /**
    * A {@link PathFilter} that returns only regular files.
    */
   static class FileFilter implements PathFilter {
@@ -1173,7 +1196,7 @@ public abstract class FSUtils {
           isValid = fs.getFileStatus(p).isDir();
         }
       } catch (IOException e) {
-        LOG.warn("An error occurred while verifying if [" + p.toString() + 
+        LOG.warn("An error occurred while verifying if [" + p.toString() +
                  "] is a valid directory. Returning 'not valid' and continuing.", e);
       }
       return isValid;
@@ -1235,15 +1258,27 @@ public abstract class FSUtils {
   public abstract void recoverFileLease(final FileSystem fs, final Path p,
       Configuration conf, CancelableProgressable reporter) throws IOException;
 
+  public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
+      throws IOException {
+    List<Path> tableDirs = new LinkedList<Path>();
+
+    for(FileStatus status :
+        fs.globStatus(new Path(rootdir,
+            new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
+      tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
+    }
+    return tableDirs;
+  }
+
   /**
    * @param fs
    * @param rootdir
    * @return All the table directories under <code>rootdir</code>. Ignore non table hbase folders such as
-   * .logs, .oldlogs, .corrupt, .META., and -ROOT- folders.
+   * .logs, .oldlogs, .corrupt folders.
    * @throws IOException
    */
-  public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
-  throws IOException {
+  public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
+      throws IOException {
     // presumes any directory under hbase.rootdir is a table
     FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs));
     List<Path> tabledirs = new ArrayList<Path>(dirs.length);
@@ -1257,14 +1292,6 @@ public abstract class FSUtils {
     return tabledirs;
   }
 
-  public static Path getTablePath(Path rootdir, byte [] tableName) {
-    return getTablePath(rootdir, Bytes.toString(tableName));
-  }
-
-  public static Path getTablePath(Path rootdir, final String tableName) {
-    return new Path(rootdir, tableName);
-  }
-
   /**
    * Filter for all dirs that don't start with '.'
    */
@@ -1422,14 +1449,14 @@ public abstract class FSUtils {
 
     // if this method looks similar to 'getTableFragmentation' that is because
     // it was borrowed from it.
-    
+
     DirFilter df = new DirFilter(fs);
     // presumes any directory under hbase.rootdir is a table
     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
     for (FileStatus tableDir : tableDirs) {
       // Skip the .log and other non-table directories.  All others should be tables.
       // Inside a table, there are compaction.dir directories to skip.  Otherwise, all else
-      // should be regions. 
+      // should be regions.
       Path d = tableDir.getPath();
       if (HConstants.HBASE_NON_TABLE_DIRS.contains(d.getName())) {
         continue;

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Thu Aug  8 06:08:23 2013
@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -202,7 +203,7 @@ public class HBaseFsck extends Configure
 
   // limit checking/fixes to listed tables, if empty attempt to check/fix all
   // .META. are always checked
-  private Set<String> tablesIncluded = new HashSet<String>();
+  private Set<TableName> tablesIncluded = new HashSet<TableName>();
   private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge
   private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE; // maximum number of overlapping regions to sideline
   private boolean sidelineBigOverlaps = false; // sideline overlaps with >maxMerge regions
@@ -225,8 +226,8 @@ public class HBaseFsck extends Configure
    * to detect and correct consistency (hdfs/meta/deployment) problems.
    */
   private TreeMap<String, HbckInfo> regionInfoMap = new TreeMap<String, HbckInfo>();
-  private TreeSet<byte[]> disabledTables =
-    new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+  private TreeSet<TableName> disabledTables =
+    new TreeSet<TableName>();
   // Empty regioninfo qualifiers in .META.
   private Set<Result> emptyRegionInfoQualifiers = new HashSet<Result>();
 
@@ -240,14 +241,16 @@ public class HBaseFsck extends Configure
    * unless checkMetaOnly is specified, in which case, it contains only
    * the meta table
    */
-  private SortedMap<String, TableInfo> tablesInfo = new ConcurrentSkipListMap<String,TableInfo>();
+  private SortedMap<TableName, TableInfo> tablesInfo =
+      new ConcurrentSkipListMap<TableName, TableInfo>();
 
   /**
    * When initially looking at HDFS, we attempt to find any orphaned data.
    */
   private List<HbckInfo> orphanHdfsDirs = Collections.synchronizedList(new ArrayList<HbckInfo>());
 
-  private Map<String, Set<String>> orphanTableDirs = new HashMap<String, Set<String>>();
+  private Map<TableName, Set<String>> orphanTableDirs =
+      new HashMap<TableName, Set<String>>();
 
   /**
    * Constructor
@@ -288,7 +291,7 @@ public class HBaseFsck extends Configure
    */
   public void connect() throws IOException {
     admin = new HBaseAdmin(getConf());
-    meta = new HTable(getConf(), HConstants.META_TABLE_NAME);
+    meta = new HTable(getConf(), TableName.META_TABLE_NAME);
     status = admin.getClusterStatus();
     connection = admin.getConnection();
   }
@@ -503,7 +506,7 @@ public class HBaseFsck extends Configure
       return ;
     }
 
-    String tableName = Bytes.toString(hi.getTableName());
+    TableName tableName = hi.getTableName();
     TableInfo tableInfo = tablesInfo.get(tableName);
     Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
     HTableDescriptor template = tableInfo.getHTD();
@@ -566,7 +569,7 @@ public class HBaseFsck extends Configure
         Bytes.toString(orphanRegionRange.getSecond()) + ")");
 
     // create new region on hdfs.  move data into place.
-    HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
+    HRegionInfo hri = new HRegionInfo(template.getTableName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
     LOG.info("Creating new region : " + hri);
     HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
     Path target = region.getRegionFileSystem().getRegionDir();
@@ -710,8 +713,7 @@ public class HBaseFsck extends Configure
         errors.detail("Number of Tables in flux: " + numSkipped.get());
       }
       for (HTableDescriptor td : allTables) {
-        String tableName = td.getNameAsString();
-        errors.detail("  Table: " + tableName + "\t" +
+        errors.detail("  Table: " + td.getTableName() + "\t" +
                            (td.isReadOnly() ? "ro" : "rw") + "\t" +
                             (td.isMetaRegion() ? "META" : "    ") + "\t" +
                            " families: " + td.getFamilies().size());
@@ -761,7 +763,8 @@ public class HBaseFsck extends Configure
   /**
    * Populate hbi's from regionInfos loaded from file system.
    */
-  private SortedMap<String, TableInfo> loadHdfsRegionInfos() throws IOException, InterruptedException {
+  private SortedMap<TableName, TableInfo> loadHdfsRegionInfos()
+      throws IOException, InterruptedException {
     tablesInfo.clear(); // regenerating the data
     // generate region split structure
     Collection<HbckInfo> hbckInfos = regionInfoMap.values();
@@ -799,7 +802,7 @@ public class HBaseFsck extends Configure
 
 
       // get table name from hdfs, populate various HBaseFsck tables.
-      String tableName = Bytes.toString(hbi.getTableName());
+      TableName tableName = hbi.getTableName();
       if (tableName == null) {
         // There was an entry in META not in the HDFS?
         LOG.warn("tableName was null for: " + hbi);
@@ -861,7 +864,7 @@ public class HBaseFsck extends Configure
    * 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
    * @throws IOException
    */
-  private boolean fabricateTableInfo(FSTableDescriptors fstd, String tableName,
+  private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
       Set<String> columns) throws IOException {
     if (columns ==null || columns.isEmpty()) return false;
     HTableDescriptor htd = new HTableDescriptor(tableName);
@@ -899,19 +902,21 @@ public class HBaseFsck extends Configure
   public void fixOrphanTables() throws IOException {
     if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) {
 
-      List<String> tmpList = new ArrayList<String>();
+      List<TableName> tmpList = new ArrayList<TableName>();
       tmpList.addAll(orphanTableDirs.keySet());
       HTableDescriptor[] htds = getHTableDescriptors(tmpList);
-      Iterator<Entry<String, Set<String>>> iter = orphanTableDirs.entrySet().iterator();
+      Iterator<Entry<TableName, Set<String>>> iter =
+          orphanTableDirs.entrySet().iterator();
       int j = 0;
       int numFailedCase = 0;
       FSTableDescriptors fstd = new FSTableDescriptors(getConf());
       while (iter.hasNext()) {
-        Entry<String, Set<String>> entry = (Entry<String, Set<String>>) iter.next();
-        String tableName = entry.getKey();
+        Entry<TableName, Set<String>> entry =
+            (Entry<TableName, Set<String>>) iter.next();
+        TableName tableName = entry.getKey();
         LOG.info("Trying to fix orphan table error: " + tableName);
         if (j < htds.length) {
-          if (tableName.equals(Bytes.toString(htds[j].getName()))) {
+          if (tableName.equals(htds[j].getTableName())) {
             HTableDescriptor htd = htds[j];
             LOG.info("fixing orphan table: " + tableName + " from cache");
             fstd.createTableDescriptor(htd, true);
@@ -969,14 +974,15 @@ public class HBaseFsck extends Configure
    *
    * @return An array list of puts to do in bulk, null if tables have problems
    */
-  private ArrayList<Put> generatePuts(SortedMap<String, TableInfo> tablesInfo) throws IOException {
+  private ArrayList<Put> generatePuts(
+      SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
     ArrayList<Put> puts = new ArrayList<Put>();
     boolean hasProblems = false;
-    for (Entry<String, TableInfo> e : tablesInfo.entrySet()) {
-      String name = e.getKey();
+    for (Entry<TableName, TableInfo> e : tablesInfo.entrySet()) {
+      TableName name = e.getKey();
 
       // skip ".META."
-      if (Bytes.compareTo(Bytes.toBytes(name), HConstants.META_TABLE_NAME) == 0) {
+      if (name.compareTo(TableName.META_TABLE_NAME) == 0) {
         continue;
       }
 
@@ -1006,7 +1012,8 @@ public class HBaseFsck extends Configure
   /**
    * Suggest fixes for each table
    */
-  private void suggestFixes(SortedMap<String, TableInfo> tablesInfo) throws IOException {
+  private void suggestFixes(
+      SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
     for (TableInfo tInfo : tablesInfo.values()) {
       TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
       tInfo.checkRegionChain(handler);
@@ -1077,7 +1084,7 @@ public class HBaseFsck extends Configure
     return true;
   }
 
-  private SortedMap<String, TableInfo> checkHdfsIntegrity(boolean fixHoles,
+  private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles,
       boolean fixOverlaps) throws IOException {
     LOG.info("Checking HBase region split map from HDFS data...");
     for (TableInfo tInfo : tablesInfo.values()) {
@@ -1123,7 +1130,7 @@ public class HBaseFsck extends Configure
    */
   Path sidelineRegionDir(FileSystem fs,
       String parentDir, HbckInfo hi) throws IOException {
-    String tableName = Bytes.toString(hi.getTableName());
+    TableName tableName = hi.getTableName();
     Path regionDir = hi.getHdfsRegionDir();
 
     if (!fs.exists(regionDir)) {
@@ -1135,7 +1142,7 @@ public class HBaseFsck extends Configure
     if (parentDir != null) {
       rootDir = new Path(rootDir, parentDir);
     }
-    Path sidelineTableDir= new Path(rootDir, tableName);
+    Path sidelineTableDir= FSUtils.getTableDir(rootDir, tableName);
     Path sidelineRegionDir = new Path(sidelineTableDir, regionDir.getName());
     fs.mkdirs(sidelineRegionDir);
     boolean success = false;
@@ -1194,16 +1201,16 @@ public class HBaseFsck extends Configure
   /**
    * Side line an entire table.
    */
-  void sidelineTable(FileSystem fs, byte[] table, Path hbaseDir,
+  void sidelineTable(FileSystem fs, TableName tableName, Path hbaseDir,
       Path backupHbaseDir) throws IOException {
-    String tableName = Bytes.toString(table);
-    Path tableDir = new Path(hbaseDir, tableName);
+    Path tableDir = FSUtils.getTableDir(hbaseDir, tableName);
     if (fs.exists(tableDir)) {
-      Path backupTableDir= new Path(backupHbaseDir, tableName);
+      Path backupTableDir= FSUtils.getTableDir(backupHbaseDir, tableName);
+      fs.mkdirs(backupTableDir.getParent());
       boolean success = fs.rename(tableDir, backupTableDir);
       if (!success) {
         throw new IOException("Failed to move  " + tableName + " from "
-            +  tableDir.getName() + " to " + backupTableDir.getName());
+            +  tableDir + " to " + backupTableDir);
       }
     } else {
       LOG.info("No previous " + tableName +  " exists.  Continuing.");
@@ -1220,7 +1227,7 @@ public class HBaseFsck extends Configure
     Path backupDir = getSidelineDir();
     fs.mkdirs(backupDir);
     try {
-      sidelineTable(fs, HConstants.META_TABLE_NAME, hbaseDir, backupDir);
+      sidelineTable(fs, TableName.META_TABLE_NAME, hbaseDir, backupDir);
     } catch (IOException e) {
       LOG.fatal("... failed to sideline meta. Currently in inconsistent state.  To restore "
       + "try to rename .META. in " + backupDir.getName() + " to "
@@ -1242,8 +1249,9 @@ public class HBaseFsck extends Configure
       public Void connect(HConnection connection) throws IOException {
         ZooKeeperWatcher zkw = createZooKeeperWatcher();
         try {
-          for (String tableName : ZKTableReadOnly.getDisabledOrDisablingTables(zkw)) {
-            disabledTables.add(Bytes.toBytes(tableName));
+          for (TableName tableName :
+              ZKTableReadOnly.getDisabledOrDisablingTables(zkw)) {
+            disabledTables.add(tableName);
           }
         } catch (KeeperException ke) {
           throw new IOException(ke);
@@ -1273,18 +1281,16 @@ public class HBaseFsck extends Configure
     // list all tables from HDFS
     List<FileStatus> tableDirs = Lists.newArrayList();
 
-    boolean foundVersionFile = false;
-    FileStatus[] files = fs.listStatus(rootDir);
-    for (FileStatus file : files) {
-      String dirName = file.getPath().getName();
-      if (dirName.equals(HConstants.VERSION_FILE_NAME)) {
-        foundVersionFile = true;
-      } else {
-        if ((!checkMetaOnly && isTableIncluded(dirName)) ||
-            dirName.equals(".META.")) {
-          tableDirs.add(file);
-        }
-      }
+    boolean foundVersionFile = fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+
+    List<Path> paths = FSUtils.getTableDirs(fs, rootDir);
+    for (Path path : paths) {
+      TableName tableName = FSUtils.getTableName(path);
+       if ((!checkMetaOnly &&
+           isTableIncluded(tableName)) ||
+           tableName.equals(TableName.META_TABLE_NAME)) {
+         tableDirs.add(fs.getFileStatus(path));
+       }
     }
 
     // verify that version file exists
@@ -1328,7 +1334,7 @@ public class HBaseFsck extends Configure
    */
   private boolean recordMetaRegion() throws IOException {
     HRegionLocation metaLocation = connection.locateRegion(
-      HConstants.META_TABLE_NAME, HConstants.EMPTY_START_ROW);
+      TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW);
 
     // Check if Meta region is valid and existing
     if (metaLocation == null || metaLocation.getRegionInfo() == null ||
@@ -1773,8 +1779,8 @@ public class HBaseFsck extends Configure
    * repeated or overlapping ones.
    * @throws IOException
    */
-  SortedMap<String, TableInfo> checkIntegrity() throws IOException {
-    tablesInfo = new TreeMap<String,TableInfo> ();
+  SortedMap<TableName, TableInfo> checkIntegrity() throws IOException {
+    tablesInfo = new TreeMap<TableName,TableInfo> ();
     List<HbckInfo> noHDFSRegionInfos = new ArrayList<HbckInfo>();
     LOG.debug("There are " + regionInfoMap.size() + " region info entries");
     for (HbckInfo hbi : regionInfoMap.values()) {
@@ -1811,7 +1817,7 @@ public class HBaseFsck extends Configure
       if (hbi.deployedOn.size() == 0) continue;
 
       // We should be safe here
-      String tableName = hbi.metaEntry.getTableNameAsString();
+      TableName tableName = hbi.metaEntry.getTableName();
       TableInfo modTInfo = tablesInfo.get(tableName);
       if (modTInfo == null) {
         modTInfo = new TableInfo(tableName);
@@ -1901,7 +1907,7 @@ public class HBaseFsck extends Configure
    * Maintain information about a particular table.
    */
   public class TableInfo {
-    String tableName;
+    TableName tableName;
     TreeSet <ServerName> deployedOn;
 
     // backwards regions
@@ -1920,7 +1926,7 @@ public class HBaseFsck extends Configure
     final Multimap<byte[], HbckInfo> overlapGroups =
       TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, cmp);
 
-    TableInfo(String name) {
+    TableInfo(TableName name) {
       this.tableName = name;
       deployedOn = new TreeSet <ServerName>();
     }
@@ -1965,7 +1971,7 @@ public class HBaseFsck extends Configure
       this.deployedOn.add(server);
     }
 
-    public String getName() {
+    public TableName getName() {
       return tableName;
     }
 
@@ -2070,7 +2076,7 @@ public class HBaseFsck extends Configure
             getTableInfo(), next);
         HTableDescriptor htd = getTableInfo().getHTD();
         // from special EMPTY_START_ROW to next region's startKey
-        HRegionInfo newRegion = new HRegionInfo(htd.getName(),
+        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
             HConstants.EMPTY_START_ROW, next.getStartKey());
 
         // TODO test
@@ -2086,7 +2092,7 @@ public class HBaseFsck extends Configure
                 + "region and regioninfo in HDFS to plug the hole.", getTableInfo());
         HTableDescriptor htd = getTableInfo().getHTD();
         // from curEndKey to EMPTY_START_ROW
-        HRegionInfo newRegion = new HRegionInfo(htd.getName(), curEndKey,
+        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
             HConstants.EMPTY_START_ROW);
 
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
@@ -2108,7 +2114,7 @@ public class HBaseFsck extends Configure
                 + ".  Creating a new regioninfo and region "
                 + "dir in hdfs to plug the hole.");
         HTableDescriptor htd = getTableInfo().getHTD();
-        HRegionInfo newRegion = new HRegionInfo(htd.getName(), holeStartKey, holeStopKey);
+        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("Plugged hold by creating new empty region: "+ newRegion + " " +region);
         fixes++;
@@ -2193,7 +2199,7 @@ public class HBaseFsck extends Configure
         // create new empty container region.
         HTableDescriptor htd = getTableInfo().getHTD();
         // from start key to end Key
-        HRegionInfo newRegion = new HRegionInfo(htd.getName(), range.getFirst(),
+        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(),
             range.getSecond());
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("Created new empty container region: " +
@@ -2274,7 +2280,7 @@ public class HBaseFsck extends Configure
       // When table is disabled no need to check for the region chain. Some of the regions
       // accidently if deployed, this below code might report some issues like missing start
       // or end regions or region hole in chain and may try to fix which is unwanted.
-      if (disabledTables.contains(this.tableName.getBytes())) {
+      if (disabledTables.contains(this.tableName)) {
         return true;
       }
       int originalErrorsCount = errors.getErrorList().size();
@@ -2415,7 +2421,7 @@ public class HBaseFsck extends Configure
 
   public void dumpSidelinedRegions(Map<Path, HbckInfo> regions) {
     for (Map.Entry<Path, HbckInfo> entry: regions.entrySet()) {
-      String tableName = Bytes.toStringBinary(entry.getValue().getTableName());
+      TableName tableName = entry.getValue().getTableName();
       Path path = entry.getKey();
       errors.print("This sidelined region dir should be bulk loaded: "
         + path.toString());
@@ -2426,7 +2432,7 @@ public class HBaseFsck extends Configure
   }
 
   public Multimap<byte[], HbckInfo> getOverlapGroups(
-      String table) {
+      TableName table) {
     TableInfo ti = tablesInfo.get(table);
     return ti.overlapGroups;
   }
@@ -2441,7 +2447,7 @@ public class HBaseFsck extends Configure
    * @throws IOException if an error is encountered
    */
    HTableDescriptor[] getTables(AtomicInteger numSkipped) {
-    List<String> tableNames = new ArrayList<String>();
+    List<TableName> tableNames = new ArrayList<TableName>();
     long now = System.currentTimeMillis();
 
     for (HbckInfo hbi : regionInfoMap.values()) {
@@ -2451,7 +2457,7 @@ public class HBaseFsck extends Configure
       // pick only those tables that were not modified in the last few milliseconds.
       if (info != null && info.getStartKey().length == 0 && !info.isMetaRegion()) {
         if (info.modTime + timelag < now) {
-          tableNames.add(info.getTableNameAsString());
+          tableNames.add(info.getTableName());
         } else {
           numSkipped.incrementAndGet(); // one more in-flux table
         }
@@ -2460,11 +2466,11 @@ public class HBaseFsck extends Configure
     return getHTableDescriptors(tableNames);
   }
 
-   HTableDescriptor[] getHTableDescriptors(List<String> tableNames) {
+   HTableDescriptor[] getHTableDescriptors(List<TableName> tableNames) {
     HTableDescriptor[] htd = new HTableDescriptor[0];
      try {
        LOG.info("getHTableDescriptors == tableNames => " + tableNames);
-       htd = new HBaseAdmin(getConf()).getTableDescriptors(tableNames);
+       htd = new HBaseAdmin(getConf()).getTableDescriptorsByTableName(tableNames);
      } catch (IOException e) {
        LOG.debug("Exception getting table descriptors", e);
      }
@@ -2575,7 +2581,7 @@ public class HBaseFsck extends Configure
             sn = pair.getSecond();
           }
           HRegionInfo hri = pair.getFirst();
-          if (!(isTableIncluded(hri.getTableNameAsString())
+          if (!(isTableIncluded(hri.getTableName())
               || hri.isMetaRegion())) {
             return true;
           }
@@ -2652,7 +2658,7 @@ public class HBaseFsck extends Configure
       hash ^= Arrays.hashCode(getStartKey());
       hash ^= Arrays.hashCode(getEndKey());
       hash ^= Boolean.valueOf(isOffline()).hashCode();
-      hash ^= Arrays.hashCode(getTableName());
+      hash ^= getTableName().hashCode();
       if (regionServer != null) {
         hash ^= regionServer.hashCode();
       }
@@ -2741,14 +2747,14 @@ public class HBaseFsck extends Configure
       }
     }
 
-    public byte[] getTableName() {
+    public TableName getTableName() {
       if (this.metaEntry != null) {
         return this.metaEntry.getTableName();
       } else if (this.hdfsEntry != null) {
         // we are only guaranteed to have a path and not an HRI for hdfsEntry,
         // so we get the name from the Path
         Path tableDir = this.hdfsEntry.hdfsRegionDir.getParent();
-        return Bytes.toBytes(tableDir.getName());
+        return FSUtils.getTableName(tableDir);
       } else {
         // Currently no code exercises this path, but we could add one for
         // getting table name from OnlineEntry
@@ -2829,8 +2835,7 @@ public class HBaseFsck extends Configure
         return 0;
       }
 
-      int tableCompare = RegionSplitCalculator.BYTES_COMPARATOR.compare(
-          l.getTableName(), r.getTableName());
+      int tableCompare = l.getTableName().compareTo(r.getTableName());
       if (tableCompare != 0) {
         return tableCompare;
       }
@@ -2873,7 +2878,7 @@ public class HBaseFsck extends Configure
   /**
    * Prints summary of all tables found on the system.
    */
-  private void printTableSummary(SortedMap<String, TableInfo> tablesInfo) {
+  private void printTableSummary(SortedMap<TableName, TableInfo> tablesInfo) {
     StringBuilder sb = new StringBuilder();
     errors.print("Summary:");
     for (TableInfo tInfo : tablesInfo.values()) {
@@ -3100,7 +3105,7 @@ public class HBaseFsck extends Configure
       List<HRegionInfo> ret = Lists.newArrayList();
       for (HRegionInfo hri : regions) {
         if (hri.isMetaTable() || (!hbck.checkMetaOnly
-            && hbck.isTableIncluded(hri.getTableNameAsString()))) {
+            && hbck.isTableIncluded(hri.getTableName()))) {
           ret.add(hri);
         }
       }
@@ -3129,12 +3134,6 @@ public class HBaseFsck extends Configure
     @Override
     public synchronized Void call() throws IOException {
       try {
-        String tableName = tableDir.getPath().getName();
-        // ignore hidden files
-        if (tableName.startsWith(".") &&
-            !tableName.equals( Bytes.toString(HConstants.META_TABLE_NAME))) {
-          return null;
-        }
         // level 2: <HBASE_DIR>/<table>/*
         FileStatus[] regionDirs = fs.listStatus(tableDir.getPath());
         for (FileStatus regionDir : regionDirs) {
@@ -3208,7 +3207,7 @@ public class HBaseFsck extends Configure
           hbck.loadHdfsRegioninfo(hbi);
         } catch (IOException ioe) {
           String msg = "Orphan region in HDFS: Unable to load .regioninfo from table "
-              + Bytes.toString(hbi.getTableName()) + " in hdfs dir "
+              + hbi.getTableName() + " in hdfs dir "
               + hbi.getHdfsRegionDir()
               + "!  It may be an invalid format or version file.  Treating as "
               + "an orphaned regiondir.";
@@ -3404,16 +3403,16 @@ public class HBaseFsck extends Configure
    * Only check/fix tables specified by the list,
    * Empty list means all tables are included.
    */
-  boolean isTableIncluded(String table) {
+  boolean isTableIncluded(TableName table) {
     return (tablesIncluded.size() == 0) || tablesIncluded.contains(table);
   }
 
-  public void includeTable(String table) {
+  public void includeTable(TableName table) {
     tablesIncluded.add(table);
   }
 
-  Set<String> getIncludedTables() {
-    return new HashSet<String>(tablesIncluded);
+  Set<TableName> getIncludedTables() {
+    return new HashSet<TableName>(tablesIncluded);
   }
 
   /**
@@ -3680,7 +3679,7 @@ public class HBaseFsck extends Configure
         errors.reportError(ERROR_CODE.WRONG_USAGE, "Unrecognized option:" + cmd);
         return printUsageAndExit();
       } else {
-        includeTable(cmd);
+        includeTable(TableName.valueOf(cmd));
         errors.print("Allow checking/fixes for table: " + cmd);
       }
     }
@@ -3702,12 +3701,12 @@ public class HBaseFsck extends Configure
       LOG.info("Checking all hfiles for corruption");
       HFileCorruptionChecker hfcc = createHFileCorruptionChecker(sidelineCorruptHFiles);
       setHFileCorruptionChecker(hfcc); // so we can get result
-      Collection<String> tables = getIncludedTables();
+      Collection<TableName> tables = getIncludedTables();
       Collection<Path> tableDirs = new ArrayList<Path>();
       Path rootdir = FSUtils.getRootDir(getConf());
       if (tables.size() > 0) {
-        for (String t : tables) {
-          tableDirs.add(FSUtils.getTablePath(rootdir, t));
+        for (TableName t : tables) {
+          tableDirs.add(FSUtils.getTableDir(rootdir, t));
         }
       } else {
         tableDirs = FSUtils.getTableDirs(FSUtils.getCurrentFileSystem(getConf()), rootdir);

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java Thu Aug  8 06:08:23 2013
@@ -28,10 +28,10 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -177,7 +177,7 @@ public class HBaseFsckRepair {
    */
   public static void fixMetaHoleOnline(Configuration conf,
       HRegionInfo hri) throws IOException {
-    HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
+    HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
     MetaEditor.addRegionToMeta(meta, hri);
     meta.close();
   }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java Thu Aug  8 06:08:23 2013
@@ -19,11 +19,10 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -46,7 +45,8 @@ public class HFileArchiveUtil {
    * @return {@link Path} to the directory to archive the given store or
    *         <tt>null</tt> if it should not be archived
    */
-  public static Path getStoreArchivePath(final Configuration conf, final String tableName,
+  public static Path getStoreArchivePath(final Configuration conf,
+                                         final TableName tableName,
       final String regionName, final String familyName) throws IOException {
     Path tableArchiveDir = getTableArchivePath(conf, tableName);
     return HStore.getStoreHomedir(tableArchiveDir, regionName, Bytes.toBytes(familyName));
@@ -54,29 +54,36 @@ public class HFileArchiveUtil {
 
   /**
    * Get the directory to archive a store directory
-   * @param conf {@link Configuration} to read for the archive directory name. Can be null.
+   * @param conf {@link Configuration} to read for the archive directory name.
    * @param region parent region information under which the store currently lives
    * @param tabledir directory for the table under which the store currently lives
    * @param family name of the family in the store
    * @return {@link Path} to the directory to archive the given store or <tt>null</tt> if it should
    *         not be archived
    */
-  public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir,
-      byte[] family) {
-    Path tableArchiveDir = getTableArchivePath(tabledir);
+  public static Path getStoreArchivePath(Configuration conf,
+                                         HRegionInfo region,
+                                         Path tabledir,
+      byte[] family) throws IOException {
+    TableName tableName =
+        FSUtils.getTableName(tabledir);
+    Path rootDir = FSUtils.getRootDir(conf);
+    Path tableArchiveDir = getTableArchivePath(rootDir, tableName);
     return HStore.getStoreHomedir(tableArchiveDir, region, family);
   }
 
   /**
    * Get the archive directory for a given region under the specified table
-   * @param tabledir the original table directory. Cannot be null.
+   * @param tableName the table name. Cannot be null.
    * @param regiondir the path to the region directory. Cannot be null.
    * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
    *         should not be archived
    */
-  public static Path getRegionArchiveDir(Path tabledir, Path regiondir) {
+  public static Path getRegionArchiveDir(Path rootDir,
+                                         TableName tableName,
+                                         Path regiondir) {
     // get the archive directory for a table
-    Path archiveDir = getTableArchivePath(tabledir);
+    Path archiveDir = getTableArchivePath(rootDir, tableName);
 
     // then add on the region path under the archive
     String encodedRegionName = regiondir.getName();
@@ -85,19 +92,16 @@ public class HFileArchiveUtil {
 
   /**
    * Get the archive directory for a given region under the specified table
-   * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
+   * @param rootDir {@link Path} to the root directory where hbase files are stored (for building
    *          the archive path)
-   * @param tabledir the original table directory. Cannot be null.
-   * @param regiondir the path to the region directory. Cannot be null.
+   * @param tableName name of the table to archive. Cannot be null.
    * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
    *         should not be archived
    */
-  public static Path getRegionArchiveDir(Path rootdir, Path tabledir, Path regiondir) {
+  public static Path getRegionArchiveDir(Path rootDir,
+                                         TableName tableName, String encodedRegionName) {
     // get the archive directory for a table
-    Path archiveDir = getTableArchivePath(rootdir, tabledir.getName());
-
-    // then add on the region path under the archive
-    String encodedRegionName = regiondir.getName();
+    Path archiveDir = getTableArchivePath(rootDir, tableName);
     return HRegion.getRegionDir(archiveDir, encodedRegionName);
   }
 
@@ -107,27 +111,13 @@ public class HFileArchiveUtil {
    * Get the path to the table's archive directory.
    * <p>
    * Generally of the form: /hbase/.archive/[tablename]
-   * @param tabledir directory of the table to be archived. Cannot be null.
-   * @return {@link Path} to the archive directory for the table
-   */
-  public static Path getTableArchivePath(Path tabledir) {
-    Path root = tabledir.getParent();
-    return getTableArchivePath(root, tabledir.getName());
-  }
-
-  /**
-   * Get the path to the table archive directory based on the configured archive directory.
-   * <p>
-   * Get the path to the table's archive directory.
-   * <p>
-   * Generally of the form: /hbase/.archive/[tablename]
    * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
    *          the archive path)
    * @param tableName Name of the table to be archived. Cannot be null.
    * @return {@link Path} to the archive directory for the table
    */
-  public static Path getTableArchivePath(final Path rootdir, final String tableName) {
-    return new Path(getArchivePath(rootdir), tableName);
+  public static Path getTableArchivePath(final Path rootdir, final TableName tableName) {
+    return FSUtils.getTableDir(getArchivePath(rootdir), tableName);
   }
 
   /**
@@ -138,9 +128,10 @@ public class HFileArchiveUtil {
    * @param tableName Name of the table to be archived. Cannot be null.
    * @return {@link Path} to the archive directory for the table
    */
-  public static Path getTableArchivePath(final Configuration conf, final String tableName)
+  public static Path getTableArchivePath(final Configuration conf,
+                                         final TableName tableName)
       throws IOException {
-    return new Path(getArchivePath(conf), tableName);
+    return FSUtils.getTableDir(getArchivePath(conf), tableName);
   }
 
   /**

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java Thu Aug  8 06:08:23 2013
@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -79,7 +80,7 @@ class HMerge {
    * @throws IOException
    */
   public static void merge(Configuration conf, FileSystem fs,
-    final byte [] tableName)
+    final TableName tableName)
   throws IOException {
     merge(conf, fs, tableName, true);
   }
@@ -100,7 +101,7 @@ class HMerge {
    * @throws IOException
    */
   public static void merge(Configuration conf, FileSystem fs,
-    final byte [] tableName, final boolean testMasterRunning)
+    final TableName tableName, final boolean testMasterRunning)
   throws IOException {
     boolean masterIsRunning = false;
     if (testMasterRunning) {
@@ -112,7 +113,7 @@ class HMerge {
             }
           });
     }
-    if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
+    if (tableName.equals(TableName.META_TABLE_NAME)) {
       if (masterIsRunning) {
         throw new IllegalStateException(
             "Can not compact META table if instance is on-line");
@@ -140,7 +141,7 @@ class HMerge {
     private final long maxFilesize;
 
 
-    protected Merger(Configuration conf, FileSystem fs, final byte [] tableName)
+    protected Merger(Configuration conf, FileSystem fs, final TableName tableName)
     throws IOException {
       this.conf = conf;
       this.fs = fs;
@@ -148,7 +149,7 @@ class HMerge {
           HConstants.DEFAULT_MAX_FILE_SIZE);
 
       this.rootDir = FSUtils.getRootDir(conf);
-      Path tabledir = HTableDescriptor.getTableDir(this.rootDir, tableName);
+      Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
       this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
       String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
 
@@ -225,17 +226,17 @@ class HMerge {
 
   /** Instantiated to compact a normal user table */
   private static class OnlineMerger extends Merger {
-    private final byte [] tableName;
+    private final TableName tableName;
     private final HTable table;
     private final ResultScanner metaScanner;
     private HRegionInfo latestRegion;
 
     OnlineMerger(Configuration conf, FileSystem fs,
-      final byte [] tableName)
+      final TableName tableName)
     throws IOException {
       super(conf, fs, tableName);
       this.tableName = tableName;
-      this.table = new HTable(conf, HConstants.META_TABLE_NAME);
+      this.table = new HTable(conf, TableName.META_TABLE_NAME);
       this.metaScanner = table.getScanner(HConstants.CATALOG_FAMILY,
           HConstants.REGIONINFO_QUALIFIER);
       this.latestRegion = null;
@@ -253,7 +254,7 @@ class HMerge {
               Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
               Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
         }
-        if (!Bytes.equals(region.getTableName(), this.tableName)) {
+        if (!region.getTableName().equals(this.tableName)) {
           return null;
         }
         return region;
@@ -281,6 +282,11 @@ class HMerge {
           currentRow = metaScanner.next();
           continue;
         }
+        HRegionInfo region = HRegionInfo.getHRegionInfo(currentRow);
+        if (!region.getTableName().equals(this.tableName)) {
+          currentRow = metaScanner.next();
+          continue;
+        }
         foundResult = true;
         break;
       }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java Thu Aug  8 06:08:23 2013
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -58,7 +59,7 @@ public class Merge extends Configured im
   static final Log LOG = LogFactory.getLog(Merge.class);
   private Path rootdir;
   private volatile MetaUtils utils;
-  private byte [] tableName;               // Name of table
+  private TableName tableName;               // Name of table
   private volatile byte [] region1;        // Name of region 1
   private volatile byte [] region2;        // Name of region 2
   private volatile HRegionInfo mergeInfo;
@@ -131,7 +132,7 @@ public class Merge extends Configured im
    */
   private void mergeTwoRegions() throws IOException {
     LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " +
-        Bytes.toStringBinary(this.region2) + " in table " + Bytes.toString(this.tableName));
+        Bytes.toStringBinary(this.region2) + " in table " + this.tableName);
     HRegion meta = this.utils.getMetaRegion();
     Get get = new Get(region1);
     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
@@ -153,7 +154,7 @@ public class Merge extends Configured im
       throw new NullPointerException("info2 is null using key " + meta);
     }
     HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
-      this.rootdir, Bytes.toString(this.tableName));
+      this.rootdir, this.tableName);
     HRegion merged = merge(htd, meta, info1, info2);
 
     LOG.info("Adding " + merged.getRegionInfo() + " to " +
@@ -244,7 +245,7 @@ public class Merge extends Configured im
       usage();
       return -1;
     }
-    tableName = Bytes.toBytes(remainingArgs[0]);
+    tableName = TableName.valueOf(remainingArgs[0]);
 
     region1 = Bytes.toBytesBinary(remainingArgs[1]);
     region2 = Bytes.toBytesBinary(remainingArgs[2]);
@@ -258,10 +259,11 @@ public class Merge extends Configured im
     return status;
   }
 
-  private boolean notInTable(final byte [] tn, final byte [] rn) {
-    if (WritableComparator.compareBytes(tn, 0, tn.length, rn, 0, tn.length) != 0) {
+  private boolean notInTable(final TableName tn, final byte [] rn) {
+    if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length,
+        rn, 0, tn.getName().length) != 0) {
       LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " +
-        Bytes.toString(tn));
+        tn);
       return true;
     }
     return false;

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java Thu Aug  8 06:08:23 2013
@@ -95,7 +95,7 @@ public abstract class ModifyRegionUtils 
     if (newRegions == null) return null;
     int regionNumber = newRegions.length;
     ThreadPoolExecutor regionOpenAndInitThreadPool = getRegionOpenAndInitThreadPool(conf,
-        "RegionOpenAndInitThread-" + hTableDescriptor.getNameAsString(), regionNumber);
+        "RegionOpenAndInitThread-" + hTableDescriptor.getTableName(), regionNumber);
     CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<HRegionInfo>(
         regionOpenAndInitThreadPool);
     List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java Thu Aug  8 06:08:23 2013
@@ -47,11 +47,11 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
@@ -374,7 +374,7 @@ public class RegionSplitter {
     LOG.debug("Creating table " + tableName + " with " + columnFamilies.length
         + " column families.  Presplitting to " + splitCount + " regions");
 
-    HTableDescriptor desc = new HTableDescriptor(tableName);
+    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
     for (String cf : columnFamilies) {
       desc.addFamily(new HColumnDescriptor(Bytes.toBytes(cf)));
     }
@@ -410,7 +410,7 @@ public class RegionSplitter {
         Math.max(table.getConnection().getCurrentNrHRS() / 2, minOS);
 
     Path hbDir = FSUtils.getRootDir(conf);
-    Path tableDir = HTableDescriptor.getTableDir(hbDir, table.getTableName());
+    Path tableDir = FSUtils.getTableDir(hbDir, table.getName());
     Path splitFile = new Path(tableDir, "_balancedSplit");
     FileSystem fs = FileSystem.get(conf);
 
@@ -640,7 +640,7 @@ public class RegionSplitter {
 
     // get table info
     Path rootDir = FSUtils.getRootDir(table.getConfiguration());
-    Path tableDir = HTableDescriptor.getTableDir(rootDir, table.getTableName());
+    Path tableDir = FSUtils.getTableDir(rootDir, table.getName());
     FileSystem fs = tableDir.getFileSystem(table.getConfiguration());
     HTableDescriptor htd = table.getTableDescriptor();
 
@@ -684,7 +684,7 @@ public class RegionSplitter {
           // check every Column Family for that region
           boolean refFound = false;
           for (HColumnDescriptor c : htd.getFamilies()) {
-            if ((refFound = regionFs.hasReferences(htd.getNameAsString()))) {
+            if ((refFound = regionFs.hasReferences(htd.getTableName().getNameAsString()))) {
               break;
             }
           }
@@ -716,7 +716,7 @@ public class RegionSplitter {
   static LinkedList<Pair<byte[], byte[]>> getSplits(HTable table,
       SplitAlgorithm splitAlgo) throws IOException {
     Path hbDir = FSUtils.getRootDir(table.getConfiguration());
-    Path tableDir = HTableDescriptor.getTableDir(hbDir, table.getTableName());
+    Path tableDir = FSUtils.getTableDir(hbDir, table.getName());
     Path splitFile = new Path(tableDir, "_balancedSplit");
     FileSystem fs = tableDir.getFileSystem(table.getConfiguration());
 

Modified: hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp (original)
+++ hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp Thu Aug  8 06:08:23 2013
@@ -36,7 +36,8 @@
   import="org.apache.hadoop.util.StringUtils"
   import="java.util.List"
   import="java.util.Map"
-  import="org.apache.hadoop.hbase.HConstants"%><%
+  import="org.apache.hadoop.hbase.HConstants"%>
+<%@ page import="org.apache.hadoop.hbase.TableName" %><%
   HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
   Configuration conf = master.getConfiguration();
   HBaseAdmin hbadmin = new HBaseAdmin(conf);
@@ -44,10 +45,12 @@
   String snapshotName = request.getParameter("name");
   SnapshotDescription snapshot = null;
   SnapshotInfo.SnapshotStats stats = null;
+  TableName snapshotTable = null;
   for (SnapshotDescription snapshotDesc: hbadmin.listSnapshots()) {
     if (snapshotName.equals(snapshotDesc.getName())) {
       snapshot = snapshotDesc;
       stats = SnapshotInfo.getSnapshotStats(conf, snapshot);
+      snapshotTable = TableName.valueOf(snapshot.getTable());
       break;
     }
   }
@@ -162,7 +165,8 @@
         <th>State</th>
     </tr>
     <tr>
-        <td><a href="table.jsp?name=<%= snapshot.getTable() %>"><%= snapshot.getTable() %></a></td>
+        <td><a href="table.jsp?name=<%= snapshotTable.getNameAsString() %>">
+            <%= snapshotTable.getNameAsString() %></a></td>
         <td><%= new Date(snapshot.getCreationTime()) %></td>
         <td><%= snapshot.getType() %></td>
         <td><%= snapshot.getVersion() %></td>

Modified: hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/table.jsp?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/table.jsp (original)
+++ hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/table.jsp Thu Aug  8 06:08:23 2013
@@ -34,12 +34,14 @@
   import="org.apache.hadoop.hbase.master.HMaster" 
   import="org.apache.hadoop.hbase.util.Bytes"
   import="org.apache.hadoop.hbase.util.FSUtils"
-  import="org.apache.hadoop.hbase.protobuf.ProtobufUtil"%><%
+  import="org.apache.hadoop.hbase.protobuf.ProtobufUtil"%>
+<%@ page import="org.apache.hadoop.hbase.TableName" %>
+<%
   HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
   Configuration conf = master.getConfiguration();
   HBaseAdmin hbadmin = new HBaseAdmin(conf);
-  String tableName = request.getParameter("name");
-  HTable table = new HTable(conf, tableName);
+  String fqtn = request.getParameter("name");
+  HTable table = new HTable(conf, fqtn);
   String tableHeader = "<h2>Table Regions</h2><table class=\"table table-striped\"><tr><th>Name</th><th>Region Server</th><th>Start Key</th><th>End Key</th><th>Requests</th></tr>";
   ServerName rl = master.getCatalogTracker().getMetaLocation();
   boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
@@ -116,7 +118,7 @@
     if (key != null && key.length() > 0) {
       hbadmin.split(key);
     } else {
-      hbadmin.split(tableName);
+      hbadmin.split(fqtn);
     }
     
     %> Split request accepted. <%
@@ -124,7 +126,7 @@
     if (key != null && key.length() > 0) {
       hbadmin.compact(key);
     } else {
-      hbadmin.compact(tableName);
+      hbadmin.compact(fqtn);
     }
     %> Compact request accepted. <%
   }
@@ -139,7 +141,7 @@
 %>
   <head>
     <meta charset="utf-8">
-    <title>Table: <%= tableName %></title>
+    <title>Table: <%= fqtn %></title>
     <meta name="viewport" content="width=device-width, initial-scale=1.0">
     <meta name="description" content="">
     <meta name="author" content="">
@@ -180,12 +182,12 @@
 
     <div class="row-fluid inner_header">
         <div class="page-header">
-            <h1>Table <small><%= tableName %></small></h1>
+            <h1>Table <small><%= fqtn %></small></h1>
         </div>
     </div>
     <div class="row-fluid">
 <%
-  if(tableName.equals(Bytes.toString(HConstants.META_TABLE_NAME))) {
+  if(fqtn.equals(TableName.META_TABLE_NAME.getNameAsString())) {
 %>
 <%= tableHeader %>
 <%
@@ -226,7 +228,7 @@
 <%  if (showFragmentation) { %>
   <tr>
       <td>Fragmentation</td>
-      <td><%= frags.get(tableName) != null ? frags.get(tableName).intValue() + "%" : "n/a" %></td>
+      <td><%= frags.get(fqtn) != null ? frags.get(fqtn).intValue() + "%" : "n/a" %></td>
       <td>How fragmented is the table. After a major compaction it is 0%.</td>
   </tr>
 <%  } %>
@@ -311,7 +313,7 @@ Actions:
 <tr>
   <form method="get">
   <input type="hidden" name="action" value="compact">
-  <input type="hidden" name="name" value="<%= tableName %>">
+  <input type="hidden" name="name" value="<%= fqtn %>">
   <td style="border-style: none; text-align: center">
       <input style="font-size: 12pt; width: 10em" type="submit" value="Compact" class="btn"></td>
   <td style="border-style: none" width="5%">&nbsp;</td>
@@ -325,7 +327,7 @@ Actions:
 <tr>
   <form method="get">
   <input type="hidden" name="action" value="split">
-  <input type="hidden" name="name" value="<%= tableName %>">
+  <input type="hidden" name="name" value="<%= fqtn %>">
   <td style="border-style: none; text-align: center">
       <input style="font-size: 12pt; width: 10em" type="submit" value="Split" class="btn"></td>
   <td style="border-style: none" width="5%">&nbsp;</td>

Modified: hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp (original)
+++ hbase/branches/0.95/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp Thu Aug  8 06:08:23 2013
@@ -90,7 +90,7 @@
 </tr>
 <%   for(HTableDescriptor htDesc : tables ) { %>
 <tr>
-    <td><a href="/table.jsp?name=<%= escapeXml(htDesc.getNameAsString()) %>"><%= escapeXml(htDesc.getNameAsString()) %></a></td>
+    <td><a href="/table.jsp?name=<%= escapeXml(htDesc.getTableName().getNameAsString()) %>"><%= escapeXml(htDesc.getTableName().getNameAsString()) %></a></td>
     <td><%= htDesc.toString() %></td>
 </tr>
 <%   }  %>

Modified: hbase/branches/0.95/hbase-server/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/ruby/hbase/admin.rb?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/ruby/hbase/admin.rb (original)
+++ hbase/branches/0.95/hbase-server/src/main/ruby/hbase/admin.rb Thu Aug  8 06:08:23 2013
@@ -42,7 +42,7 @@ module Hbase
     #----------------------------------------------------------------------------------------------
     # Returns a list of tables in hbase
     def list(regex = ".*")
-        @admin.listTables(regex).map { |t| t.getNameAsString }
+        @admin.listTables(regex).map { |t| t.getTableName().getNameAsString }
     end
 
     #----------------------------------------------------------------------------------------------
@@ -152,7 +152,7 @@ module Hbase
     # Disables all tables matching the given regex
     def disable_all(regex)
       regex = regex.to_s
-      @admin.disableTables(regex).map { |t| t.getNameAsString }
+      @admin.disableTables(regex).map { |t| t.getTableName().getNameAsString }
     end
 
     #---------------------------------------------------------------------------------------------
@@ -180,7 +180,7 @@ module Hbase
     # Drops a table
     def drop_all(regex)
       regex = regex.to_s
-      failed  = @admin.deleteTables(regex).map { |t| t.getNameAsString }
+      failed  = @admin.deleteTables(regex).map { |t| t.getTableName().getNameAsString }
       return failed
     end
 
@@ -201,7 +201,7 @@ module Hbase
       has_columns = false
 
       # Start defining the table
-      htd = org.apache.hadoop.hbase.HTableDescriptor.new(table_name)
+      htd = org.apache.hadoop.hbase.HTableDescriptor.new(org.apache.hadoop.hbase.TableName.valueOf(table_name))
       splits = nil
       # Args are either columns or splits, add them to the table definition
       # TODO: add table options support
@@ -368,7 +368,7 @@ module Hbase
 
       status = Pair.new()
       begin
-        status = @admin.getAlterStatus(table_name.to_java_bytes)
+        status = @admin.getAlterStatus(org.apache.hadoop.hbase.TableName.valueOf(table_name))
         if status.getSecond() != 0
           puts "#{status.getSecond() - status.getFirst()}/#{status.getSecond()} regions updated."
         else
@@ -650,7 +650,8 @@ module Hbase
     # Enables/disables a region by name
     def online(region_name, on_off)
       # Open meta table
-      meta = org.apache.hadoop.hbase.client.HTable.new(org.apache.hadoop.hbase.HConstants::META_TABLE_NAME)
+      meta = org.apache.hadoop.hbase.client.HTable.new(
+          org.apache.hadoop.hbase.TableName::META_TABLE_NAME)
 
       # Read region info
       # FIXME: fail gracefully if can't find the region
@@ -722,5 +723,101 @@ module Hbase
         end
     end
 
+    #----------------------------------------------------------------------------------------------
+    # Returns namespace's structure description
+    def describe_namespace(namespace_name)
+      namespace = @admin.getNamespaceDescriptor(namespace_name)
+
+      unless namespace.nil?
+        return namespace.to_s
+      end
+
+      raise(ArgumentError, "Failed to find namespace named #{namespace_name}")
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns a list of namespaces in hbase
+    def list_namespace
+      @admin.listNamespaceDescriptors.map { |ns| ns.getName }
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns a list of tables in namespace
+    def list_namespace_tables(namespace_name)
+      unless namespace_name.nil?
+        return @admin.getTableDescriptorsByNamespace(namespace_name).map { |t| t.getTableName().getNameAsString }
+      end
+
+      raise(ArgumentError, "Failed to find namespace named #{namespace_name}")
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Creates a namespace
+    def create_namespace(namespace_name, *args)
+      # Fail if table name is not a string
+      raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String)
+
+      # Flatten params array
+      args = args.flatten.compact
+
+      # Start defining the table
+      nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(namespace_name)
+      args.each do |arg|
+        unless arg.kind_of?(Hash)
+          raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
+        end
+        for k,v in arg
+          v = v.to_s unless v.nil?
+          nsb.addProperty(k, v)
+        end
+      end
+      @admin.createNamespace(nsb.build());
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # modify a namespace
+    def alter_namespace(namespace_name, *args)
+      # Fail if table name is not a string
+      raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String)
+
+      nsd = @admin.getNamespaceDescriptor(namespace_name)
+
+      unless nsd
+        raise(ArgumentError, "Namespace does not exist")
+      end
+      nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(nsd)
+
+      # Flatten params array
+      args = args.flatten.compact
+
+      # Start defining the table
+      args.each do |arg|
+        unless arg.kind_of?(Hash)
+          raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash type")
+        end
+        method = arg[METHOD]
+        if method == "unset"
+          nsb.removeConfiguration(arg[NAME])
+        elsif  method == "set"
+          arg.delete(METHOD)
+          for k,v in arg
+            v = v.to_s unless v.nil?
+
+            nsb.addConfiguration(k, v)
+          end
+        else
+          raise(ArgumentError, "Unknown method #{method}")
+        end
+      end
+      @admin.modifyNamespace(nsb.build());
+    end
+
+
+    #----------------------------------------------------------------------------------------------
+    # Drops a table
+    def drop_namespace(namespace_name)
+      @admin.deleteNamespace(namespace_name)
+    end
+
   end
 end

Modified: hbase/branches/0.95/hbase-server/src/main/ruby/hbase/table.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/ruby/hbase/table.rb?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/ruby/hbase/table.rb (original)
+++ hbase/branches/0.95/hbase-server/src/main/ruby/hbase/table.rb Thu Aug  8 06:08:23 2013
@@ -466,7 +466,8 @@ EOF
     # Checks if current table is one of the 'meta' tables
     def is_meta_table?
       tn = @table.table_name
-      org.apache.hadoop.hbase.util.Bytes.equals(tn, org.apache.hadoop.hbase.HConstants::META_TABLE_NAME)
+      org.apache.hadoop.hbase.util.Bytes.equals(tn,
+          org.apache.hadoop.hbase.TableName::META_TABLE_NAME.getName)
     end
 
     # Returns family and (when has it) qualifier for a column name

Modified: hbase/branches/0.95/hbase-server/src/main/ruby/shell.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/ruby/shell.rb?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/ruby/shell.rb (original)
+++ hbase/branches/0.95/hbase-server/src/main/ruby/shell.rb Thu Aug  8 06:08:23 2013
@@ -254,6 +254,19 @@ Shell.load_command_group(
 )
 
 Shell.load_command_group(
+  'namespace',
+  :full_name => 'NAMESPACE MANAGEMENT COMMANDS',
+  :commands => %w[
+    namespace_create
+    namespace_drop
+    namespace_alter
+    namespace_describe
+    namespace_list
+    namespace_list_tables
+  ]
+)
+
+Shell.load_command_group(
   'dml',
   :full_name => 'DATA MANIPULATION COMMANDS',
   :commands => %w[

Modified: hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands.rb?rev=1511591&r1=1511590&r2=1511591&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands.rb (original)
+++ hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands.rb Thu Aug  8 06:08:23 2013
@@ -81,12 +81,12 @@ module Shell
 
       def translate_hbase_exceptions(*args)
         yield
-      rescue org.apache.hadoop.hbase.exceptions.TableNotFoundException
+      rescue org.apache.hadoop.hbase.TableNotFoundException
         raise "Unknown table #{args.first}!"
-      rescue org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException
+      rescue org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException
         valid_cols = table(args.first).get_all_columns.map { |c| c + '*' }
         raise "Unknown column family! Valid column names: #{valid_cols.join(", ")}"
-      rescue org.apache.hadoop.hbase.exceptions.TableExistsException
+      rescue org.apache.hadoop.hbase.TableExistsException => e
         raise "Table already exists: #{args.first}!"
       end
     end

Added: hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_alter.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_alter.rb?rev=1511591&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_alter.rb (added)
+++ hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_alter.rb Thu Aug  8 06:08:23 2013
@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class NamespaceAlter < Command
+      def help
+        return <<-EOF
+Alter namespace properties.
+
+To add/modify a property:
+
+  hbase> namespace_alter 'ns1', {METHOD => 'set', 'PROERTY_NAME' => 'PROPERTY_VALUE'}
+
+To delete a property:
+
+  hbase> namespace_alter 'ns1', {METHOD => 'unset', NAME=>'PROERTY_NAME'}
+EOF
+      end
+
+      def command(namespace, *args)
+        format_simple_command do
+          admin.alter_namespace(namespace, *args)
+        end
+      end
+    end
+  end
+end

Added: hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_create.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_create.rb?rev=1511591&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_create.rb (added)
+++ hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_create.rb Thu Aug  8 06:08:23 2013
@@ -0,0 +1,41 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class NamespaceCreate < Command
+      def help
+        return <<-EOF
+Create namespace; pass namespace name,
+and optionally a dictionary of namespace configuration.
+Examples:
+
+  hbase> namespace_create 'ns1'
+  hbase> namespace_create 'ns1', {'PROERTY_NAME'=>'PROPERTY_VALUE'}
+EOF
+      end
+
+      def command(namespace, *args)
+        format_simple_command do
+          admin.create_namespace(namespace, *args)
+        end
+      end
+    end
+  end
+end

Added: hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_describe.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_describe.rb?rev=1511591&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_describe.rb (added)
+++ hbase/branches/0.95/hbase-server/src/main/ruby/shell/commands/namespace_describe.rb Thu Aug  8 06:08:23 2013
@@ -0,0 +1,41 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class NamespaceDescribe < Command
+      def help
+        return <<-EOF
+Describe the named namespace. For example:
+  hbase> namespace_describe 'ns1'
+EOF
+      end
+
+      def command(namespace)
+        now = Time.now
+
+        desc = admin.describe_namespace(namespace)
+
+        formatter.header([ "DESCRIPTION" ], [ 64 ])
+        formatter.row([ desc ], true, [ 64 ])
+        formatter.footer(now)
+      end
+    end
+  end
+end