You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2008/07/17 09:17:28 UTC

svn commit: r677517 [6/6] - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/ipc/ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/regionserv...

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/MetaUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/MetaUtils.java?rev=677517&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/MetaUtils.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/MetaUtils.java Thu Jul 17 00:17:26 2008
@@ -0,0 +1,452 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util.migration.v5;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Writables;
+
+/**
+ * Contains utility methods for manipulating HBase meta tables.
+ * Be sure to call {@link #shutdown()} when done with this class so it closes
+ * resources opened during meta processing (ROOT, META, etc.).  Be careful
+ * how you use this class.  If used during migrations, be careful when using
+ * this class to check whether migration is needed.
+ */
+public class MetaUtils {
+  private static final Log LOG = LogFactory.getLog(MetaUtils.class);
+  private final HBaseConfiguration conf;
+  private FileSystem fs;
+  private Path rootdir;
+  private HLog log;
+  private HRegion rootRegion;
+  private Map<byte [], HRegion> metaRegions = Collections.synchronizedSortedMap(
+    new TreeMap<byte [], HRegion>(Bytes.BYTES_COMPARATOR));
+  
+  /** Default constructor 
+   * @throws IOException */
+  public MetaUtils() throws IOException {
+    this(new HBaseConfiguration());
+  }
+  
+  /** @param conf HBaseConfiguration 
+   * @throws IOException */
+  public MetaUtils(HBaseConfiguration conf) throws IOException {
+    this.conf = conf;
+    conf.setInt("hbase.client.retries.number", 1);
+    this.rootRegion = null;
+    initialize();
+  }
+
+  /**
+   * Verifies that DFS is available and that HBase is off-line.
+   * @throws IOException
+   */
+  private void initialize() throws IOException {
+    this.fs = FileSystem.get(this.conf);              // get DFS handle
+    // Get root directory of HBase installation
+    this.rootdir = fs.makeQualified(new Path(this.conf.get(HConstants.HBASE_DIR)));
+    if (!fs.exists(rootdir)) {
+      String message = "HBase root directory " + rootdir.toString() +
+        " does not exist.";
+      LOG.error(message);
+      throw new FileNotFoundException(message);
+    }
+  }
+
+  /** @return the HLog 
+   * @throws IOException */
+  public synchronized HLog getLog() throws IOException {
+    if (this.log == null) {
+      Path logdir = new Path(this.fs.getHomeDirectory(),
+          HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis());
+      this.log = new HLog(this.fs, logdir, this.conf, null);
+    }
+    return this.log;
+  }
+  
+  /**
+   * @return HRegion for root region
+   * @throws IOException
+   */
+  public HRegion getRootRegion() throws IOException {
+    if (this.rootRegion == null) {
+      openRootRegion();
+    }
+    return this.rootRegion;
+  }
+  
+  /**
+   * Open or return cached opened meta region
+   * 
+   * @param metaInfo HRegionInfo for meta region
+   * @return meta HRegion
+   * @throws IOException
+   */
+  public HRegion getMetaRegion(HRegionInfo metaInfo) throws IOException {
+    HRegion meta = metaRegions.get(metaInfo.getRegionName());
+    if (meta == null) {
+      meta = openMetaRegion(metaInfo);
+      this.metaRegions.put(metaInfo.getRegionName(), meta);
+    }
+    return meta;
+  }
+  
+  /**
+   * Closes catalog regions if open. Also closes and deletes the HLog. You
+   * must call this method if you want to persist changes made during a
+   * MetaUtils edit session.
+   */
+  public void shutdown() {
+    if (this.rootRegion != null) {
+      try {
+        this.rootRegion.close();
+      } catch (IOException e) {
+        LOG.error("closing root region", e);
+      } finally {
+        this.rootRegion = null;
+      }
+    }
+    try {
+      for (HRegion r: metaRegions.values()) {
+        r.close();
+      }
+    } catch (IOException e) {
+      LOG.error("closing meta region", e);
+    } finally {
+      metaRegions.clear();
+    }
+    try {
+      if (this.log != null) {
+        this.log.rollWriter();
+        this.log.closeAndDelete();
+      }
+    } catch (IOException e) {
+      LOG.error("closing HLog", e);
+    } finally {
+      this.log = null;
+    }
+  }
+
+  /**
+   * Used by scanRootRegion and scanMetaRegion to call back the caller so it
+   * can process the data for a row.
+   */
+  public interface ScannerListener {
+    /**
+     * Callback so client of scanner can process row contents
+     * 
+     * @param info HRegionInfo for row
+     * @return false to terminate the scan
+     * @throws IOException
+     */
+    public boolean processRow(HRegionInfo info) throws IOException;
+  }
+  
+  /**
+   * Scans the root region. For every meta region found, calls the listener with
+   * the HRegionInfo of the meta region.
+   * 
+   * @param listener method to be called for each meta region found
+   * @throws IOException
+   */
+  public void scanRootRegion(ScannerListener listener) throws IOException {
+    // Open root region so we can scan it
+    if (this.rootRegion == null) {
+      openRootRegion();
+    }
+    InternalScanner rootScanner = rootRegion.getScanner(
+        HConstants.COL_REGIONINFO_ARRAY, HConstants.EMPTY_START_ROW,
+        HConstants.LATEST_TIMESTAMP, null);
+    try {
+      HStoreKey key = new HStoreKey();
+      SortedMap<byte [], Cell> results =
+        new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
+      while (rootScanner.next(key, results)) {
+        HRegionInfo info = (HRegionInfo)Writables.getWritable(
+            results.get(HConstants.COL_REGIONINFO).getValue(),
+            new HRegionInfo());
+        if (info == null) {
+          LOG.warn("region info is null for row " + key.getRow() +
+              " in table " + HConstants.ROOT_TABLE_NAME);
+          continue;
+        }
+        if (!listener.processRow(info)) {
+          break;
+        }
+        results.clear();
+      }
+    } finally {
+      rootScanner.close();
+    }
+  }
+
+  /**
+   * Scans a meta region. For every region found, calls the listener with
+   * the HRegionInfo of the region.
+   * TODO: Use Visitor rather than Listener pattern.  Allow multiple Visitors.
+   * Use this everywhere we scan meta regions: e.g. in metascanners, in close
+   * handling, etc.  Have it pass in the whole row, not just HRegionInfo.
+   * 
+   * @param metaRegionInfo HRegionInfo for meta region
+   * @param listener method to be called for each meta region found
+   * @throws IOException
+   */
+  public void scanMetaRegion(HRegionInfo metaRegionInfo,
+    ScannerListener listener)
+  throws IOException {
+    // Open meta region so we can scan it
+    HRegion metaRegion = openMetaRegion(metaRegionInfo);
+    scanMetaRegion(metaRegion, listener);
+  }
+
+  /**
+   * Scan the passed in metaregion <code>m</code> invoking the passed
+   * <code>listener</code> per row found.
+   * @param m
+   * @param listener
+   * @throws IOException
+   */
+  public void scanMetaRegion(final HRegion m, final ScannerListener listener)
+  throws IOException {
+    InternalScanner metaScanner = m.getScanner(HConstants.COL_REGIONINFO_ARRAY,
+      HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
+    try {
+      HStoreKey key = new HStoreKey();
+      SortedMap<byte[], Cell> results =
+        new TreeMap<byte[], Cell>(Bytes.BYTES_COMPARATOR);
+      while (metaScanner.next(key, results)) {
+        HRegionInfo info = (HRegionInfo)Writables.getWritable(
+            results.get(HConstants.COL_REGIONINFO).getValue(),
+            new HRegionInfo());
+        if (info == null) {
+          LOG.warn("regioninfo null for row " + key.getRow() + " in table " +
+            Bytes.toString(m.getTableDesc().getName()));
+          continue;
+        }
+        if (!listener.processRow(info)) {
+          break;
+        }
+        results.clear();
+      }
+    } finally {
+      metaScanner.close();
+    }
+  }
+
+  private synchronized HRegion openRootRegion() throws IOException {
+    if (this.rootRegion != null) {
+      return this.rootRegion;
+    }
+    this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO,
+      this.rootdir, getLog(), this.conf);
+    this.rootRegion.compactStores();
+    return this.rootRegion;
+  }
+
+  private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException {
+    HRegion meta =
+      HRegion.openHRegion(metaInfo, this.rootdir, getLog(), this.conf);
+    meta.compactStores();
+    return meta;
+  }
+ 
+  /**
+   * Set a single region on/offline.
+   * This is a tool to repair tables that have offlined tables in their midst.
+   * Can happen on occasion.  Use at your own risk.  Call from a bit of java
+   * or jython script.  This method is 'expensive' in that it creates a
+   * {@link HTable} instance per invocation to go against <code>.META.</code>
+   * @param c A configuration that has its <code>hbase.master</code>
+   * properly set.
+   * @param row Row in the catalog .META. table whose HRegionInfo's offline
+   * status we want to change.
+   * @param onlineOffline Pass <code>true</code> to OFFLINE the region.
+   * @throws IOException
+   */
+  public static void changeOnlineStatus (final HBaseConfiguration c,
+      final byte [] row, final boolean onlineOffline)
+  throws IOException {
+    HTable t = new HTable(c, HConstants.META_TABLE_NAME);
+    Cell cell = t.get(row, HConstants.COL_REGIONINFO);
+    if (cell == null) {
+      throw new IOException("no information for row " + row);
+    }
+    // Throws exception if null.
+    HRegionInfo info = (HRegionInfo)Writables.
+      getWritable(cell.getValue(), new HRegionInfo());
+    BatchUpdate b = new BatchUpdate(row);
+    info.setOffline(onlineOffline);
+    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(info));
+    b.delete(HConstants.COL_SERVER);
+    b.delete(HConstants.COL_STARTCODE);
+    t.commit(b);
+  }
+  
+  /**
+   * Offline version of the online TableOperation,
+   * org.apache.hadoop.hbase.master.AddColumn.
+   * @param tableName
+   * @param hcd Add this column to <code>tableName</code>
+   * @throws IOException 
+   */
+  public void addColumn(final byte [] tableName,
+      final HColumnDescriptor hcd)
+  throws IOException {
+    List<HRegionInfo> metas = getMETARows(tableName);
+    for (HRegionInfo hri: metas) {
+      final HRegion m = getMetaRegion(hri);
+      scanMetaRegion(m, new ScannerListener() {
+        private boolean inTable = true;
+        
+        @SuppressWarnings("synthetic-access")
+        public boolean processRow(HRegionInfo info) throws IOException {
+          LOG.debug("Testing " + Bytes.toString(tableName) + " against " +
+            Bytes.toString(info.getTableDesc().getName()));
+          if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
+            this.inTable = false;
+            info.getTableDesc().addFamily(hcd);
+            updateMETARegionInfo(m, info);
+            return true;
+          }
+          // If we got here and we have not yet encountered the table yet,
+          // inTable will be false.  Otherwise, we've passed out the table.
+          // Stop the scanner.
+          return this.inTable;
+        }});
+    }
+  }
+  
+  /**
+   * Offline version of the online TableOperation,
+   * org.apache.hadoop.hbase.master.DeleteColumn.
+   * @param tableName
+   * @param columnFamily Name of column name to remove.
+   * @throws IOException
+   */
+  public void deleteColumn(final byte [] tableName,
+      final byte [] columnFamily) throws IOException {
+    List<HRegionInfo> metas = getMETARows(tableName);
+    final Path tabledir = new Path(rootdir, Bytes.toString(tableName));
+    for (HRegionInfo hri: metas) {
+      final HRegion m = getMetaRegion(hri);
+      scanMetaRegion(m, new ScannerListener() {
+        private boolean inTable = true;
+        
+        @SuppressWarnings("synthetic-access")
+        public boolean processRow(HRegionInfo info) throws IOException {
+          if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
+            this.inTable = false;
+            info.getTableDesc().removeFamily(columnFamily);
+            updateMETARegionInfo(m, info);
+            FSUtils.deleteColumnFamily(fs, tabledir, info.getEncodedName(),
+              HStoreKey.getFamily(columnFamily));
+            return false;
+          }
+          // If we got here and we have not yet encountered the table yet,
+          // inTable will be false.  Otherwise, we've passed out the table.
+          // Stop the scanner.
+          return this.inTable;
+        }});
+    }
+  }
+  
+  /**
+   * Update COL_REGIONINFO in meta region r with HRegionInfo hri
+   * 
+   * @param r
+   * @param hri
+   * @throws IOException
+   */
+  public void updateMETARegionInfo(HRegion r, final HRegionInfo hri) 
+  throws IOException {
+    if (LOG.isDebugEnabled()) {
+      HRegionInfo h = (HRegionInfo)Writables.getWritable(
+        r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue(),
+        new HRegionInfo());
+      LOG.debug("Old " + Bytes.toString(HConstants.COL_REGIONINFO) +
+        " for " + hri.toString() + " in " + r.toString() + " is: " +
+        h.toString());
+    }
+    BatchUpdate b = new BatchUpdate(hri.getRegionName());
+    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(hri));
+    r.batchUpdate(b);
+    if (LOG.isDebugEnabled()) {
+      HRegionInfo h = (HRegionInfo)Writables.getWritable(
+          r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue(),
+          new HRegionInfo());
+        LOG.debug("New " + Bytes.toString(HConstants.COL_REGIONINFO) +
+          " for " + hri.toString() + " in " + r.toString() + " is: " +
+          h.toString());
+    }
+  }
+
+  /**
+   * @return List of {@link HRegionInfo} rows found in the ROOT or META
+   * catalog table.
+   * @param tableName Name of table to go looking for.
+   * @throws IOException
+   * @see #getMetaRegion(HRegionInfo)
+   */
+  public List<HRegionInfo> getMETARows(final byte [] tableName)
+  throws IOException {
+    final List<HRegionInfo> result = new ArrayList<HRegionInfo>();
+    // If passed table name is META, then  return the root region.
+    if (Bytes.equals(HConstants.META_TABLE_NAME, tableName)) {
+      result.add(openRootRegion().getRegionInfo());
+      return result;
+    }
+    // Return all meta regions that contain the passed tablename.
+    scanRootRegion(new ScannerListener() {
+      private final Log SL_LOG = LogFactory.getLog(this.getClass());
+      
+      @SuppressWarnings("unused")
+      public boolean processRow(HRegionInfo info) throws IOException {
+        SL_LOG.debug("Testing " + info);
+        if (Bytes.equals(info.getTableDesc().getName(),
+            HConstants.META_TABLE_NAME)) {
+          result.add(info);
+          return false;
+        }
+        return true;
+      }});
+    return result;
+  }
+}

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/RegionHistorian.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/RegionHistorian.java?rev=677517&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/RegionHistorian.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/RegionHistorian.java Thu Jul 17 00:17:26 2008
@@ -0,0 +1,322 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util.migration.v5;
+
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.GregorianCalendar;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * The Region Historian task is to keep track of every modification a region
+ * has to go through. Public methods are used to update the information in the
+ * <code>.META.</code> table and to retrieve it.  This is a Singleton.  By
+ * default, the Historian is offline; it will not log.  Its enabled in the
+ * regionserver and master down in their guts after there's some certainty the
+ * .META. has been deployed.
+ */
+public class RegionHistorian implements HConstants {
+  private static final Log LOG = LogFactory.getLog(RegionHistorian.class);
+  
+  private HTable metaTable;
+
+  private GregorianCalendar cal = new GregorianCalendar();
+
+  /** Singleton reference */
+  private static RegionHistorian historian;
+
+  /** Date formater for the timestamp in RegionHistoryInformation */
+  private static SimpleDateFormat dateFormat = new SimpleDateFormat(
+  "EEE, d MMM yyyy HH:mm:ss");
+
+  public static enum HistorianColumnKey  {
+    REGION_CREATION ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"creation")),
+    REGION_OPEN ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"open")),
+    REGION_SPLIT ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"split")),
+    REGION_COMPACTION ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"compaction")),
+    REGION_FLUSH ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"flush")),
+    REGION_ASSIGNMENT ( Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR+"assignment"));
+
+    public byte[] key;
+
+    HistorianColumnKey(byte[] key) {
+      this.key = key;
+    }
+  } 
+
+  /**
+   * Default constructor. Initializes reference to .META. table.  Inaccessible.
+   * Use {@link #getInstance(HBaseConfiguration)} to obtain the Singleton
+   * instance of this class.
+   */
+  private RegionHistorian() {
+    super();
+  }
+
+  /**
+   * Get the RegionHistorian Singleton instance.
+   * @return The region historian
+   */
+  public static RegionHistorian getInstance() {
+    if (historian == null) {
+      historian = new RegionHistorian();
+    }
+    return historian;
+  }
+
+  /**
+   * Returns, for a given region name, an ordered list by timestamp of all
+   * values in the historian column of the .META. table.
+   * @param regionName
+   *          Region name as a string
+   * @return List of RegionHistoryInformation or null if we're offline.
+   */
+  public List<RegionHistoryInformation> getRegionHistory(String regionName) {
+    if (!isOnline()) {
+      return null;
+    }
+    List<RegionHistoryInformation> informations =
+      new ArrayList<RegionHistoryInformation>();
+    try {
+      /*
+       * TODO REGION_HISTORIAN_KEYS is used because there is no other for the
+       * moment to retrieve all version and to have the column key information.
+       * To be changed when HTable.getRow handles versions.
+       */
+      for (HistorianColumnKey keyEnu : HistorianColumnKey.values()) {
+        byte[] columnKey = keyEnu.key;
+        Cell[] cells = this.metaTable.get(Bytes.toBytes(regionName),
+            columnKey, ALL_VERSIONS);
+        if (cells != null) {
+          for (Cell cell : cells) {
+            informations.add(historian.new RegionHistoryInformation(cell
+                .getTimestamp(), Bytes.toString(columnKey).split(":")[1], Bytes
+                .toString(cell.getValue())));
+          }
+        }
+      }
+    } catch (IOException ioe) {
+      LOG.warn("Unable to retrieve region history", ioe);
+    }
+    Collections.sort(informations);
+    return informations;
+  }
+  
+  /**
+   * Method to add a creation event to the row in the .META table
+   * @param info
+   */
+  public void addRegionAssignment(HRegionInfo info, String serverName) {
+    add(HistorianColumnKey.REGION_ASSIGNMENT.key, "Region assigned to server "
+        + serverName, info);
+  }
+
+  /**
+   * Method to add a creation event to the row in the .META table
+   * @param info
+   */
+  public void addRegionCreation(HRegionInfo info) {
+    add(HistorianColumnKey.REGION_CREATION.key, "Region creation", info);
+  }
+
+  /**
+   * Method to add a opening event to the row in the .META table
+   * @param info
+   * @param address
+   */
+  public void addRegionOpen(HRegionInfo info, HServerAddress address) {
+    add(HistorianColumnKey.REGION_OPEN.key, "Region opened on server : "
+        + address.getHostname(), info);
+  }
+
+  /**
+   * Method to add a split event to the rows in the .META table with
+   * information from oldInfo.
+   * @param oldInfo
+   * @param newInfo1 
+   * @param newInfo2
+   */
+  public void addRegionSplit(HRegionInfo oldInfo, HRegionInfo newInfo1,
+     HRegionInfo newInfo2) {
+    HRegionInfo[] infos = new HRegionInfo[] { newInfo1, newInfo2 };
+    for (HRegionInfo info : infos) {
+      add(HistorianColumnKey.REGION_SPLIT.key, "Region split from  : "
+          + oldInfo.getRegionNameAsString(), info);
+    }
+  }
+
+  /**
+   * Method to add a compaction event to the row in the .META table
+   * @param info
+   */
+  public void addRegionCompaction(final HRegionInfo info,
+      final String timeTaken) {
+    // While historian can not log flushes because it could deadlock the
+    // regionserver -- see the note in addRegionFlush -- there should be no
+    // such danger compacting; compactions are not allowed when
+    // Flusher#flushSomeRegions is run.
+    if (LOG.isDebugEnabled()) {
+      add(HistorianColumnKey.REGION_COMPACTION.key,
+        "Region compaction completed in " + timeTaken, info);
+    }
+  }
+
+  /**
+   * Method to add a flush event to the row in the .META table
+   * @param info
+   */
+  public void addRegionFlush(HRegionInfo info,
+    @SuppressWarnings("unused") String timeTaken) {
+    // Disabled.  Noop.  If this regionserver is hosting the .META. AND is
+    // holding the reclaimMemcacheMemory global lock --
+    // see Flusher#flushSomeRegions --  we deadlock.  For now, just disable
+    // logging of flushes.
+  }
+
+  /**
+   * Method to add an event with LATEST_TIMESTAMP.
+   * @param column
+   * @param text
+   * @param info
+   */
+  private void add(byte[] column,
+      String text, HRegionInfo info) {
+    add(column, text, info, LATEST_TIMESTAMP);
+  }
+
+  /**
+   * Method to add an event with provided information.
+   * @param column
+   * @param text
+   * @param info
+   * @param timestamp
+   */
+  private void add(byte[] column,
+      String text, HRegionInfo info, long timestamp) {
+    if (!isOnline()) {
+      // Its a noop
+      return;
+    }
+    if (!info.isMetaRegion()) {
+      BatchUpdate batch = new BatchUpdate(info.getRegionName());
+      batch.setTimestamp(timestamp);
+      batch.put(column, Bytes.toBytes(text));
+      try {
+        this.metaTable.commit(batch);
+      } catch (IOException ioe) {
+        LOG.warn("Unable to '" + text + "'", ioe);
+      }
+    }
+  }
+
+  /**
+   * Inner class that only contains information about an event.
+   * 
+   */
+  public class RegionHistoryInformation implements
+  Comparable<RegionHistoryInformation> {
+
+    private long timestamp;
+
+    private String event;
+
+    private String description;
+
+    public RegionHistoryInformation(long timestamp, String event,
+        String description) {
+      this.timestamp = timestamp;
+      this.event = event;
+      this.description = description;
+    }
+
+    /**
+     * Returns the inverse value of Long.compareTo
+     */
+    public int compareTo(RegionHistoryInformation otherInfo) {
+      return -1 * Long.valueOf(timestamp).compareTo(otherInfo.getTimestamp());
+    }
+
+    public String getEvent() {
+      return event;
+    }
+
+    public String getDescription() {
+      return description;
+    }
+
+    public long getTimestamp() {
+      return timestamp;
+    }
+
+    /**
+     * @return The value of the timestamp processed with the date formater.
+     */
+    public String getTimestampAsString() {
+      cal.setTimeInMillis(timestamp);
+      return dateFormat.format(cal.getTime());
+    }
+  }
+
+  /**
+   * @return True if the historian is online. When offline, will not add
+   * updates to the .META. table.
+   */
+  public boolean isOnline() {
+    return this.metaTable != null;
+  }
+
+  /**
+   * @param c Online the historian.  Invoke after cluster has spun up.
+   */
+  public void online(final HBaseConfiguration c) {
+    try {
+      this.metaTable = new HTable(c, META_TABLE_NAME);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Onlined");
+      }
+    } catch (IOException ioe) {
+      LOG.error("Unable to create RegionHistorian", ioe);
+    }
+  }
+  
+  /**
+   * Offlines the historian.
+   * @see #online(HBaseConfiguration)
+   */
+  public void offline() {
+    this.metaTable = null;
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Offlined");
+    }
+  }
+}

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/StoreFileScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/StoreFileScanner.java?rev=677517&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/StoreFileScanner.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/StoreFileScanner.java Thu Jul 17 00:17:26 2008
@@ -0,0 +1,391 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util.migration.v5;
+
+import java.io.IOException;
+import java.util.SortedMap;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.regionserver.ChangedReadersObserver;
+import org.apache.hadoop.hbase.regionserver.HAbstractScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.MapFile;
+
+/**
+ * A scanner that iterates through HStore files
+ */
+class StoreFileScanner extends HAbstractScanner
+implements ChangedReadersObserver {
+  private final Log LOG = LogFactory.getLog(this.getClass());
+  
+    // Keys retrieved from the sources
+  private volatile HStoreKey keys[];
+  // Values that correspond to those keys
+  private volatile byte [][] vals;
+  
+  // Readers we go against.
+  private volatile MapFile.Reader[] readers;
+  
+  // Store this scanner came out of.
+  private final HStore store;
+  
+  // Used around replacement of Readers if they change while we're scanning.
+  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+  
+  /**
+   * @param store
+   * @param timestamp
+   * @param targetCols
+   * @param firstRow
+   * @throws IOException
+   */
+  public StoreFileScanner(final HStore store, final long timestamp,
+    final byte [][] targetCols, final byte [] firstRow)
+  throws IOException {
+    super(timestamp, targetCols);
+    this.store = store;
+    this.store.addChangedReaderObserver(this);
+    this.store.lock.readLock().lock();
+    try {
+      openReaders(firstRow);
+    } catch (Exception ex) {
+      close();
+      IOException e = new IOException("HStoreScanner failed construction");
+      e.initCause(ex);
+      throw e;
+    } finally {
+      this.store.lock.readLock().unlock();
+    }
+  }
+  
+  /*
+   * Go open new Reader iterators and cue them at <code>firstRow</code>.
+   * Closes existing Readers if any.
+   * @param firstRow
+   * @throws IOException
+   */
+  private void openReaders(final byte [] firstRow) throws IOException {
+    if (this.readers != null) {
+      for (int i = 0; i < this.readers.length; i++) {
+        if (this.readers[i] != null) {
+          this.readers[i].close();
+        }
+      }
+    }
+    // Open our own copies of the Readers here inside in the scanner.
+    this.readers = new MapFile.Reader[this.store.getStorefiles().size()];
+    
+    // Most recent map file should be first
+    int i = readers.length - 1;
+    for(HStoreFile curHSF: store.getStorefiles().values()) {
+      readers[i--] = curHSF.getReader(store.fs, false, false);
+    }
+    
+    this.keys = new HStoreKey[readers.length];
+    this.vals = new byte[readers.length][];
+    
+    // Advance the readers to the first pos.
+    for (i = 0; i < readers.length; i++) {
+      keys[i] = new HStoreKey();
+      if (firstRow != null && firstRow.length != 0) {
+        if (findFirstRow(i, firstRow)) {
+          continue;
+        }
+      }
+      while (getNext(i)) {
+        if (columnMatch(i)) {
+          break;
+        }
+      }
+    }
+  }
+
+  /**
+   * For a particular column i, find all the matchers defined for the column.
+   * Compare the column family and column key using the matchers. The first one
+   * that matches returns true. If no matchers are successful, return false.
+   * 
+   * @param i index into the keys array
+   * @return true if any of the matchers for the column match the column family
+   * and the column key.
+   * @throws IOException
+   */
+  boolean columnMatch(int i) throws IOException {
+    return columnMatch(keys[i].getColumn());
+  }
+
+  /**
+   * Get the next set of values for this scanner.
+   * 
+   * @param key The key that matched
+   * @param results All the results for <code>key</code>
+   * @return true if a match was found
+   * @throws IOException
+   * 
+   * @see org.apache.hadoop.hbase.regionserver.InternalScanner#next(org.apache.hadoop.hbase.HStoreKey, java.util.SortedMap)
+   */
+  @Override
+  public boolean next(HStoreKey key, SortedMap<byte [], Cell> results)
+  throws IOException {
+    if (this.scannerClosed) {
+      return false;
+    }
+    this.lock.readLock().lock();
+    try {
+      // Find the next viable row label (and timestamp).
+      ViableRow viableRow = getNextViableRow();
+      
+      // Grab all the values that match this row/timestamp
+      boolean insertedItem = false;
+      if (viableRow.getRow() != null) {
+        key.setRow(viableRow.getRow());
+        key.setVersion(viableRow.getTimestamp());
+
+        for (int i = 0; i < keys.length; i++) {
+          // Fetch the data
+          while ((keys[i] != null)
+              && (Bytes.compareTo(keys[i].getRow(), viableRow.getRow()) == 0)) {
+
+            // If we are doing a wild card match or there are multiple matchers
+            // per column, we need to scan all the older versions of this row
+            // to pick up the rest of the family members
+            if(!isWildcardScanner()
+                && !isMultipleMatchScanner()
+                && (keys[i].getTimestamp() != viableRow.getTimestamp())) {
+              break;
+            }
+
+            if(columnMatch(i)) {              
+              // We only want the first result for any specific family member
+              if(!results.containsKey(keys[i].getColumn())) {
+                results.put(keys[i].getColumn(), 
+                    new Cell(vals[i], keys[i].getTimestamp()));
+                insertedItem = true;
+              }
+            }
+
+            if (!getNext(i)) {
+              closeSubScanner(i);
+            }
+          }
+
+          // Advance the current scanner beyond the chosen row, to
+          // a valid timestamp, so we're ready next time.
+          while ((keys[i] != null)
+              && ((Bytes.compareTo(keys[i].getRow(), viableRow.getRow()) <= 0)
+                  || (keys[i].getTimestamp() > this.timestamp)
+                  || (! columnMatch(i)))) {
+            getNext(i);
+          }
+        }
+      }
+      return insertedItem;
+    } finally {
+      this.lock.readLock().unlock();
+    }
+  }
+  
+  // Data stucture to hold next, viable row (and timestamp).
+  class ViableRow {
+    private final byte [] row;
+    private final long ts;
+
+    ViableRow(final byte [] r, final long t) {
+      this.row = r;
+      this.ts = t;
+    }
+
+    byte [] getRow() {
+      return this.row;
+    }
+
+    long getTimestamp() {
+      return this.ts;
+    }
+  }
+
+  /*
+   * @return An instance of <code>ViableRow</code>
+   * @throws IOException
+   */
+  private ViableRow getNextViableRow() throws IOException {
+    // Find the next viable row label (and timestamp).
+    byte [] viableRow = null;
+    long viableTimestamp = -1;
+    long now = System.currentTimeMillis();
+    long ttl = store.ttl;
+    for(int i = 0; i < keys.length; i++) {
+      // The first key that we find that matches may have a timestamp greater
+      // than the one we're looking for. We have to advance to see if there
+      // is an older version present, since timestamps are sorted descending
+      while (keys[i] != null &&
+          keys[i].getTimestamp() > this.timestamp &&
+          columnMatch(i) &&
+          getNext(i)) {
+        if (columnMatch(i)) {
+          break;
+        }
+      }
+      if((keys[i] != null)
+          // If we get here and keys[i] is not null, we already know that the
+          // column matches and the timestamp of the row is less than or equal
+          // to this.timestamp, so we do not need to test that here
+          && ((viableRow == null)
+              || (Bytes.compareTo(keys[i].getRow(), viableRow) < 0)
+              || ((Bytes.compareTo(keys[i].getRow(), viableRow) == 0)
+                  && (keys[i].getTimestamp() > viableTimestamp)))) {
+        if (ttl == HConstants.FOREVER || now < keys[i].getTimestamp() + ttl) {
+          viableRow = keys[i].getRow();
+          viableTimestamp = keys[i].getTimestamp();
+        } else {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("getNextViableRow :" + keys[i] + ": expired, skipped");
+          }
+        }
+      }
+    }
+    return new ViableRow(viableRow, viableTimestamp);
+  }
+
+  /**
+   * The user didn't want to start scanning at the first row. This method
+   * seeks to the requested row.
+   *
+   * @param i which iterator to advance
+   * @param firstRow seek to this row
+   * @return true if this is the first row or if the row was not found
+   */
+  private boolean findFirstRow(int i, final byte [] firstRow) throws IOException {
+    ImmutableBytesWritable ibw = new ImmutableBytesWritable();
+    HStoreKey firstKey
+      = (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), ibw);
+    if (firstKey == null) {
+      // Didn't find it. Close the scanner and return TRUE
+      closeSubScanner(i);
+      return true;
+    }
+    long now = System.currentTimeMillis();
+    long ttl = store.ttl;
+    if (ttl != HConstants.FOREVER && now >= firstKey.getTimestamp() + ttl) {
+      // Didn't find it. Close the scanner and return TRUE
+      closeSubScanner(i);
+      return true;
+    }
+    this.vals[i] = ibw.get();
+    keys[i].setRow(firstKey.getRow());
+    keys[i].setColumn(firstKey.getColumn());
+    keys[i].setVersion(firstKey.getTimestamp());
+    return columnMatch(i);
+  }
+  
+  /**
+   * Get the next value from the specified reader.
+   * 
+   * @param i which reader to fetch next value from
+   * @return true if there is more data available
+   */
+  private boolean getNext(int i) throws IOException {
+    boolean result = false;
+    ImmutableBytesWritable ibw = new ImmutableBytesWritable();
+    long now = System.currentTimeMillis();
+    long ttl = store.ttl;
+    while (true) {
+      if (!readers[i].next(keys[i], ibw)) {
+        closeSubScanner(i);
+        break;
+      }
+      if (keys[i].getTimestamp() <= this.timestamp) {
+        if (ttl == HConstants.FOREVER || now < keys[i].getTimestamp() + ttl) {
+          vals[i] = ibw.get();
+          result = true;
+          break;
+        }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("getNext: " + keys[i] + ": expired, skipped");
+        }
+      }
+    }
+    return result;
+  }
+  
+  /** Close down the indicated reader. */
+  private void closeSubScanner(int i) {
+    try {
+      if(readers[i] != null) {
+        try {
+          readers[i].close();
+        } catch(IOException e) {
+          LOG.error(store.storeName + " closing sub-scanner", e);
+        }
+      }
+      
+    } finally {
+      readers[i] = null;
+      keys[i] = null;
+      vals[i] = null;
+    }
+  }
+
+  /** Shut it down! */
+  public void close() {
+    if (!this.scannerClosed) {
+      this.store.deleteChangedReaderObserver(this);
+      try {
+        for(int i = 0; i < readers.length; i++) {
+          if(readers[i] != null) {
+            try {
+              readers[i].close();
+            } catch(IOException e) {
+              LOG.error(store.storeName + " closing scanner", e);
+            }
+          }
+        }
+        
+      } finally {
+        this.scannerClosed = true;
+      }
+    }
+  }
+
+  // Implementation of ChangedReadersObserver
+  
+  /** {@inheritDoc} */
+  public void updateReaders() throws IOException {
+    this.lock.writeLock().lock();
+    try {
+      // The keys are currently lined up at the next row to fetch.  Pass in
+      // the current row as 'first' row and readers will be opened and cue'd
+      // up so future call to next will start here.
+      ViableRow viableRow = getNextViableRow();
+      openReaders(viableRow.getRow());
+      LOG.debug("Replaced Scanner Readers at row " +
+        Bytes.toString(viableRow.getRow()));
+    } finally {
+      this.lock.writeLock().unlock();
+    }
+  }
+}

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/package.html
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/package.html?rev=677517&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/package.html (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/migration/v5/package.html Thu Jul 17 00:17:26 2008
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<head />
+<body bgcolor="white">
+Package of classes used instantiating objects written with pre-version 5
+versions of HBase.
+
+Under the <code>hbase.rootdir</code>, a file named <code>hbase.version</code>
+holds the version number for the data persisted by HBase.  The version number
+is upped every time a change is made in HBase on-filesystem formats.  Version
+0.2.0 of HBase shipped with an on-filesystem version of <code>5</code>.  This
+package holds classes from previous to version 5 used during the migration of
+an HBase instance up to version 5.  See
+<a href="http://wiki.apache.org/hadoop/Hbase/HowToMigrate">How To Migrate</a>
+for more on the migration of HBase across versions and for notes on design
+of the HBase migration system.
+</body>
+</html>
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestTable.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestTable.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestTable.java Thu Jul 17 00:17:26 2008
@@ -23,6 +23,7 @@
 
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.BatchUpdate;
 
 /** Tests table creation restrictions*/
 public class TestTable extends HBaseClusterTestCase {
@@ -123,4 +124,26 @@
     @SuppressWarnings("unused")
     HTable table = new HTable(conf, getName());
   }
+
+  /**
+   * Test read only tables
+   */
+  public void testReadOnlyTable() throws Exception {
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    HTableDescriptor desc = new HTableDescriptor(getName());
+    byte[] colName = "test:".getBytes();
+    desc.addFamily(new HColumnDescriptor(colName));
+    desc.setReadOnly(true);
+    admin.createTable(desc);
+    HTable table = new HTable(conf, getName());
+    try {
+      byte[] value = "somedata".getBytes();
+      BatchUpdate update = new BatchUpdate();
+      update.put(colName, value);
+      table.commit(update);
+      fail("BatchUpdate on read only table succeeded");  
+    } catch (Exception e) {
+      // expected
+    }
+  }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java?rev=677517&r1=677516&r2=677517&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java Thu Jul 17 00:17:26 2008
@@ -21,6 +21,9 @@
 
 import java.io.IOException;
 import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseClusterTestCase;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -31,11 +34,14 @@
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
 
 /**
  * Tests HTable
  */
 public class TestHTable extends HBaseClusterTestCase implements HConstants {
+  private static final Log LOG = LogFactory.getLog(TestHTable.class);
   private static final HColumnDescriptor column =
     new HColumnDescriptor(COLUMN_FAMILY);
 
@@ -45,6 +51,9 @@
   
   private static final byte [] row = Bytes.toBytes("row");
  
+  private static final byte [] attrName = Bytes.toBytes("TESTATTR");
+  private static final byte [] attrValue = Bytes.toBytes("somevalue");
+
   /**
    * the test
    * @throws IOException
@@ -123,7 +132,57 @@
     // We can still access A through newA because it has the table information
     // cached. And if it needs to recalibrate, that will cause the information
     // to be reloaded.
-    
+
+    // Test user metadata
+
+    try {
+      // make a modifiable descriptor
+      HTableDescriptor desc = new HTableDescriptor(a.getMetadata());
+      // offline the table
+      admin.disableTable(tableAname);
+      // add a user attribute to HTD
+      desc.setValue(attrName, attrValue);
+      // add a user attribute to HCD
+      for (HColumnDescriptor c: desc.getFamilies())
+        c.setValue(attrName, attrValue);
+      // update metadata for all regions of this table
+      admin.modifyTableMeta(tableAname, desc);
+      // enable the table
+      admin.enableTable(tableAname);
+
+      // Use a metascanner to avoid client API caching (HConnection has a
+      // metadata cache)
+      MetaScanner.MetaScannerVisitor visitor = new MetaScanner.MetaScannerVisitor() {
+          public boolean processRow(
+              @SuppressWarnings("unused") RowResult rowResult,
+              HRegionLocation regionLocation,
+              HRegionInfo info) {
+            LOG.info("visiting " + regionLocation.toString());
+            HTableDescriptor desc = info.getTableDesc();
+            if (Bytes.compareTo(desc.getName(), tableAname) == 0) {
+              // check HTD attribute
+              byte[] value = desc.getValue(attrName);
+              if (value == null)
+                fail("missing HTD attribute value");
+              if (Bytes.compareTo(value, attrValue) != 0)
+                fail("HTD attribute value is incorrect");
+              // check HCD attribute
+              for (HColumnDescriptor c: desc.getFamilies()) {
+                value = c.getValue(attrName);
+                if (value == null)
+                  fail("missing HCD attribute value");
+                if (Bytes.compareTo(value, attrValue) != 0)
+                  fail("HCD attribute value is incorrect");
+              }
+            }
+            return true;
+          }
+        };
+        MetaScanner.metaScan(conf, visitor);
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail();
+    }
   }
   
   /**