You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ji...@apache.org on 2008/02/23 07:11:45 UTC
svn commit: r630394 - in /hadoop/hbase/trunk: ./
src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/mapred/
src/java/org/apache/hadoop/hbase/master/
src/java/org/apache/hadoop/hbase/util/ src/test/org/apache/hadoop/hbase/
Author: jimk
Date: Fri Feb 22 22:11:44 2008
New Revision: 630394
URL: http://svn.apache.org/viewvc?rev=630394&view=rev
Log:
HBASE-462 Update migration tool
Other miscellaneous changes included:
IdentityTableReduce
- Added SuppressWarnings("unused") for reporter argument
- Removed unnecessary cast.
AbstractMergeTestBase
- Removed unnecessary compaction
StaticTestEnvironment
- Change logging level for client connections which are too noisy in most cases
TestBloomFilters
- Removed unnecessary config settings
- Modified to use BatchUpdate instead of deprecated startUpdate, etc.
TestScannerAPI
- Modified to use BatchUpdate instead of deprecated startUpdate, etc.
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java
Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Feb 22 22:11:44 2008
@@ -25,6 +25,7 @@
HBASE-428 Under continuous upload of rows, WrongRegionExceptions are thrown
that reach the client even after retries
HBASE-460 TestMigrate broken when HBase moved to subproject
+ HBASE-462 Update migration tool
IMPROVEMENTS
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java Fri Feb 22 22:11:44 2008
@@ -25,7 +25,8 @@
* HConstants holds a bunch of HBase-related constants
*/
public interface HConstants {
-
+
+ /** long constant for zero */
static final Long ZERO_L = Long.valueOf(0L);
// For migration
@@ -34,7 +35,7 @@
static final String VERSION_FILE_NAME = "hbase.version";
/** version of file system */
- static final String FILE_SYSTEM_VERSION = "0.1";
+ static final String FILE_SYSTEM_VERSION = "2";
// Configuration parameters
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java Fri Feb 22 22:11:44 2008
@@ -39,11 +39,12 @@
*/
@Override
public void reduce(Text key, Iterator<MapWritable> values,
- OutputCollector<Text, MapWritable> output, Reporter reporter)
+ OutputCollector<Text, MapWritable> output,
+ @SuppressWarnings("unused") Reporter reporter)
throws IOException {
while(values.hasNext()) {
- MapWritable r = (MapWritable)values.next();
+ MapWritable r = values.next();
output.collect(key, r);
}
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Fri Feb 22 22:11:44 2008
@@ -148,6 +148,7 @@
/** Name of master server */
public static final String MASTER = "master";
+ /** @return InfoServer object */
public InfoServer getInfoServer() {
return infoServer;
}
@@ -270,16 +271,21 @@
try {
// Make sure the root directory exists!
if(! fs.exists(rootdir)) {
- fs.mkdirs(rootdir);
+ fs.mkdirs(rootdir);
FSUtils.setVersion(fs, rootdir);
- } else if (!FSUtils.checkVersion(fs, rootdir)) {
- // Output on stdout so user sees it in terminal.
- String message = "The HBase data files stored on the FileSystem are " +
- "from an earlier version of HBase. You need to run " +
- "'${HBASE_HOME}/bin/hbase migrate' to bring your installation" +
+ } else {
+ String fsversion = FSUtils.checkVersion(fs, rootdir);
+ if (fsversion == null ||
+ fsversion.compareTo(FILE_SYSTEM_VERSION) != 0) {
+ // Output on stdout so user sees it in terminal.
+ String message = "The HBase data files stored on the FileSystem " +
+ "are from an earlier version of HBase. You need to run " +
+ "'${HBASE_HOME}/bin/hbase migrate' to bring your installation " +
"up-to-date.";
- System.out.println("WARNING! " + message + " Master shutting down...");
- throw new IOException(message);
+ // Output on stdout so user sees it in terminal.
+ System.out.println("WARNING! " + message + " Master shutting down...");
+ throw new IOException(message);
+ }
}
if (!fs.exists(rootRegionDir)) {
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java Fri Feb 22 22:11:44 2008
@@ -81,20 +81,19 @@
*
* @param fs
* @param rootdir
- * @return true if the current file system is the correct version
+ * @return null if no version file exists, version string otherwise.
* @throws IOException
*/
- public static boolean checkVersion(FileSystem fs, Path rootdir) throws IOException {
+ public static String checkVersion(FileSystem fs, Path rootdir) throws IOException {
Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
- boolean versionOk = false;
+ String version = null;
if (fs.exists(versionFile)) {
FSDataInputStream s =
fs.open(new Path(rootdir, HConstants.VERSION_FILE_NAME));
- String version = DataInputStream.readUTF(s);
+ version = DataInputStream.readUTF(s);
s.close();
- versionOk = version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0;
}
- return versionOk;
+ return version;
}
/**
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java Fri Feb 22 22:11:44 2008
@@ -25,6 +25,8 @@
import java.io.InputStreamReader;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.HashSet;
@@ -64,7 +66,7 @@
/**
* Perform a file system upgrade to convert older file layouts to that
- * supported by HADOOP-2478
+ * supported by HADOOP-2478, and then to the form supported by HBASE-69
*/
public class Migrate extends Configured implements Tool {
static final Log LOG = LogFactory.getLog(Migrate.class);
@@ -96,10 +98,11 @@
options.put("prompt", ACTION.PROMPT);
}
+ private FileSystem fs = null;
+ private Path rootdir = null;
private boolean readOnly = false;
private boolean migrationNeeded = false;
private boolean newRootRegion = false;
- private ACTION logFiles = ACTION.IGNORE;
private ACTION otherFiles = ACTION.IGNORE;
private BufferedReader reader = null;
@@ -127,7 +130,7 @@
}
try {
- FileSystem fs = FileSystem.get(conf); // get DFS handle
+ fs = FileSystem.get(conf); // get DFS handle
LOG.info("Verifying that file system is available...");
if (!FSUtils.isFileSystemAvailable(fs)) {
@@ -148,8 +151,7 @@
LOG.info("Starting upgrade" + (readOnly ? " check" : ""));
- Path rootdir =
- fs.makeQualified(new Path(this.conf.get(HConstants.HBASE_DIR)));
+ rootdir = fs.makeQualified(new Path(this.conf.get(HConstants.HBASE_DIR)));
if (!fs.exists(rootdir)) {
throw new FileNotFoundException("HBase root directory " +
@@ -158,40 +160,28 @@
// See if there is a file system version file
- if (FSUtils.checkVersion(fs, rootdir)) {
+ String version = FSUtils.checkVersion(fs, rootdir);
+ if (version != null &&
+ version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) {
LOG.info("No upgrade necessary.");
return 0;
}
- // check to see if new root region dir exists
-
- checkNewRootRegionDirExists(fs, rootdir);
-
- // check for "extra" files and for old upgradable regions
-
- extraFiles(fs, rootdir);
-
- if (!newRootRegion) {
- // find root region
-
- Path rootRegion = new Path(rootdir,
- OLD_PREFIX + HRegionInfo.rootRegionInfo.getEncodedName());
-
- if (!fs.exists(rootRegion)) {
- throw new IOException("Cannot find root region " +
- rootRegion.toString());
- } else if (readOnly) {
- migrationNeeded = true;
- } else {
- migrateRegionDir(fs, rootdir, HConstants.ROOT_TABLE_NAME, rootRegion);
- scanRootRegion(fs, rootdir);
-
- // scan for left over regions
-
- extraRegions(fs, rootdir);
- }
+ // Get contents of root directory
+
+ FileStatus[] rootFiles = getRootDirFiles();
+
+ if (version == null) {
+ migrateFromNoVersion(rootFiles);
+ migrateToV2(rootFiles);
+ } else if (version.compareTo("0.1") == 0) {
+ migrateToV2(rootFiles);
+ } else if (version.compareTo("2") == 0) {
+ // Nothing to do (yet)
+ } else {
+ throw new IOException("Unrecognized version: " + version);
}
-
+
if (!readOnly) {
// set file system version
LOG.info("Setting file system version.");
@@ -207,21 +197,85 @@
}
}
- private void checkNewRootRegionDirExists(FileSystem fs, Path rootdir)
- throws IOException {
- Path rootRegionDir =
- HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
- newRootRegion = fs.exists(rootRegionDir);
- migrationNeeded = !newRootRegion;
+ private void migrateFromNoVersion(FileStatus[] rootFiles) throws IOException {
+ LOG.info("No file system version found. Checking to see if file system " +
+ "is at revision 0.1");
+
+ // check to see if new root region dir exists
+
+ checkNewRootRegionDirExists();
+
+ // check for unrecovered region server log files
+
+ checkForUnrecoveredLogFiles(rootFiles);
+
+ // check for "extra" files and for old upgradable regions
+
+ extraFiles(rootFiles);
+
+ if (!newRootRegion) {
+ // find root region
+
+ Path rootRegion = new Path(rootdir,
+ OLD_PREFIX + HRegionInfo.rootRegionInfo.getEncodedName());
+
+ if (!fs.exists(rootRegion)) {
+ throw new IOException("Cannot find root region " +
+ rootRegion.toString());
+ } else if (readOnly) {
+ migrationNeeded = true;
+ } else {
+ migrateRegionDir(HConstants.ROOT_TABLE_NAME, rootRegion);
+ scanRootRegion();
+
+ // scan for left over regions
+
+ extraRegions();
+ }
+ }
}
- // Check for files that should not be there or should be migrated
- private void extraFiles(FileSystem fs, Path rootdir) throws IOException {
+ private void migrateToV2(FileStatus[] rootFiles) throws IOException {
+ LOG.info("Checking to see if file system is at revision 2.");
+ checkForUnrecoveredLogFiles(rootFiles);
+ }
+
+ private FileStatus[] getRootDirFiles() throws IOException {
FileStatus[] stats = fs.listStatus(rootdir);
if (stats == null || stats.length == 0) {
throw new IOException("No files found under root directory " +
rootdir.toString());
}
+ return stats;
+ }
+
+ private void checkNewRootRegionDirExists() throws IOException {
+ Path rootRegionDir =
+ HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
+ newRootRegion = fs.exists(rootRegionDir);
+ migrationNeeded = !newRootRegion;
+ }
+
+ private void checkForUnrecoveredLogFiles(FileStatus[] rootFiles)
+ throws IOException {
+ List<String> unrecoveredLogs = new ArrayList<String>();
+ for (int i = 0; i < rootFiles.length; i++) {
+ String name = rootFiles[i].getPath().getName();
+ if (name.startsWith("log_")) {
+ unrecoveredLogs.add(name);
+ }
+ }
+ if (unrecoveredLogs.size() != 0) {
+ throw new IOException("There are " + unrecoveredLogs.size() +
+ " unrecovered region server logs. Please uninstall this version of " +
+ "HBase, re-install the previous version, start your cluster and " +
+ "shut it down cleanly, so that all region server logs are recovered" +
+ " and deleted.");
+ }
+ }
+
+ // Check for files that should not be there or should be migrated
+ private void extraFiles(FileStatus[] stats) throws IOException {
for (int i = 0; i < stats.length; i++) {
String name = stats[i].getPath().getName();
if (name.startsWith(OLD_PREFIX)) {
@@ -234,31 +288,27 @@
} catch (NumberFormatException e) {
extraFile(otherFiles, "Old region format can not be upgraded: " +
- name, fs, stats[i].getPath());
+ name, stats[i].getPath());
}
} else {
// Since the new root region directory exists, we assume that this
// directory is not necessary
- extraFile(otherFiles, "Old region directory found: " + name, fs,
+ extraFile(otherFiles, "Old region directory found: " + name,
stats[i].getPath());
}
} else {
// File name does not start with "hregion_"
- if (name.startsWith("log_")) {
- String message = "Unrecovered region server log file " + name +
- " this file can be recovered by the master when it starts.";
- extraFile(logFiles, message, fs, stats[i].getPath());
- } else if (!newRootRegion) {
+ if (!newRootRegion) {
// new root region directory does not exist. This is an extra file
String message = "Unrecognized file " + name;
- extraFile(otherFiles, message, fs, stats[i].getPath());
+ extraFile(otherFiles, message, stats[i].getPath());
}
}
}
}
- private void extraFile(ACTION action, String message, FileSystem fs,
- Path p) throws IOException {
+ private void extraFile(ACTION action, String message, Path p)
+ throws IOException {
if (action == ACTION.ABORT) {
throw new IOException(message + " aborting");
@@ -277,8 +327,8 @@
}
}
- private void migrateRegionDir(FileSystem fs, Path rootdir, Text tableName,
- Path oldPath) throws IOException {
+ private void migrateRegionDir(Text tableName, Path oldPath)
+ throws IOException {
// Create directory where table will live
@@ -323,7 +373,7 @@
}
}
- private void scanRootRegion(FileSystem fs, Path rootdir) throws IOException {
+ private void scanRootRegion() throws IOException {
HLog log = new HLog(fs, new Path(rootdir, HConstants.HREGION_LOGDIR_NAME),
conf, null);
@@ -354,12 +404,12 @@
// First move the meta region to where it should be and rename
// subdirectories as necessary
- migrateRegionDir(fs, rootdir, HConstants.META_TABLE_NAME,
+ migrateRegionDir(HConstants.META_TABLE_NAME,
new Path(rootdir, OLD_PREFIX + info.getEncodedName()));
// Now scan and process the meta table
- scanMetaRegion(fs, rootdir, log, info);
+ scanMetaRegion(log, info);
}
} finally {
@@ -375,8 +425,7 @@
}
}
- private void scanMetaRegion(FileSystem fs, Path rootdir, HLog log,
- HRegionInfo info) throws IOException {
+ private void scanMetaRegion(HLog log, HRegionInfo info) throws IOException {
HRegion metaRegion = new HRegion(
new Path(rootdir, info.getTableDesc().getName().toString()), log, fs,
@@ -402,7 +451,7 @@
// Move the region to where it should be and rename
// subdirectories as necessary
- migrateRegionDir(fs, rootdir, region.getTableDesc().getName(),
+ migrateRegionDir(region.getTableDesc().getName(),
new Path(rootdir, OLD_PREFIX + region.getEncodedName()));
results.clear();
@@ -417,7 +466,7 @@
}
}
- private void extraRegions(FileSystem fs, Path rootdir) throws IOException {
+ private void extraRegions() throws IOException {
FileStatus[] stats = fs.listStatus(rootdir);
if (stats == null || stats.length == 0) {
throw new IOException("No files found under root directory " +
@@ -436,7 +485,7 @@
message =
"Region not in meta table and no other regions reference it " + name;
}
- extraFile(otherFiles, message, fs, stats[i].getPath());
+ extraFile(otherFiles, message, stats[i].getPath());
}
}
}
@@ -444,18 +493,11 @@
@SuppressWarnings("static-access")
private int parseArgs(String[] args) {
Options opts = new Options();
- Option logFiles = OptionBuilder.withArgName(ACTIONS)
- .hasArg()
- .withDescription(
- "disposition of unrecovered region server logs: {abort|ignore|delete|prompt}")
- .create("logfiles");
-
Option extraFiles = OptionBuilder.withArgName(ACTIONS)
.hasArg()
.withDescription("disposition of 'extra' files: {abort|ignore|delete|prompt}")
.create("extrafiles");
- opts.addOption(logFiles);
opts.addOption(extraFiles);
GenericOptionsParser parser =
@@ -474,21 +516,12 @@
}
if (readOnly) {
- this.logFiles = ACTION.IGNORE;
this.otherFiles = ACTION.IGNORE;
} else {
CommandLine commandLine = parser.getCommandLine();
ACTION action = null;
- if (commandLine.hasOption("logfiles")) {
- action = options.get(commandLine.getOptionValue("logfiles"));
- if (action == null) {
- usage();
- return -1;
- }
- this.logFiles = action;
- }
if (commandLine.hasOption("extrafiles")) {
action = options.get(commandLine.getOptionValue("extrafiles"));
if (action == null) {
@@ -506,9 +539,6 @@
System.err.println(" check perform upgrade checks only.");
System.err.println(" upgrade perform upgrade checks and modify hbase.\n");
System.err.println(" Options are:");
- System.err.println(" -logfiles={abort|ignore|delete|prompt}");
- System.err.println(" action to take when unrecovered region");
- System.err.println(" server log files are found.\n");
System.err.println(" -extrafiles={abort|ignore|delete|prompt}");
System.err.println(" action to take if \"extra\" files are found.\n");
System.err.println(" -conf <configuration file> specify an application configuration file");
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java Fri Feb 22 22:11:44 2008
@@ -146,7 +146,6 @@
r.flushcache();
}
}
- region.compactIfNeeded();
region.close();
region.getLog().closeAndDelete();
region.getRegionInfo().setOffline(true);
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java Fri Feb 22 22:11:44 2008
@@ -114,9 +114,10 @@
LOG.setLevel(logLevel);
if (!debugging) {
- // Turn off all the filter logging unless debug is set.
+ // Turn off all the and connection logging unless debug is set.
// It is way too noisy.
Logger.getLogger("org.apache.hadoop.hbase.filter").setLevel(Level.INFO);
+ Logger.getLogger("org.apache.hadoop.hbase.client").setLevel(Level.INFO);
}
// Enable mapreduce loggging for the mapreduce jobs.
Logger.getLogger("org.apache.hadoop.mapred").setLevel(Level.DEBUG);
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java Fri Feb 22 22:11:44 2008
@@ -26,6 +26,8 @@
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+
/** Tests per-column bloom filters */
public class TestBloomFilters extends HBaseClusterTestCase {
static final Log LOG = LogFactory.getLog(TestBloomFilters.class);
@@ -145,8 +147,6 @@
/** constructor */
public TestBloomFilters() {
super();
- conf.set("hbase.hregion.memcache.flush.size", "100");// flush cache every 100 bytes
- conf.set("hbase.regionserver.maxlogentries", "90"); // and roll log too
}
/**
@@ -191,9 +191,9 @@
for(int i = 0; i < 100; i++) {
Text row = rows[i];
String value = row.toString();
- long lockid = table.startUpdate(rows[i]);
- table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
- table.commit(lockid);
+ BatchUpdate b = new BatchUpdate(row);
+ b.put(CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
+ table.commit(b);
}
try {
// Give cache flusher and log roller a chance to run
@@ -257,9 +257,9 @@
for(int i = 0; i < 100; i++) {
Text row = rows[i];
String value = row.toString();
- long lockid = table.startUpdate(rows[i]);
- table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
- table.commit(lockid);
+ BatchUpdate b = new BatchUpdate(row);
+ b.put(CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
+ table.commit(b);
}
try {
// Give cache flusher and log roller a chance to run
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java?rev=630394&r1=630393&r2=630394&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java Fri Feb 22 22:11:44 2008
@@ -30,6 +30,7 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.BatchUpdate;
/** test the scanner API at all levels */
public class TestScannerAPI extends HBaseClusterTestCase {
@@ -80,16 +81,16 @@
HTable table = new HTable(conf, new Text(getName()));
for (Map.Entry<Text, SortedMap<Text, byte[]>> row: values.entrySet()) {
- long lockid = table.startUpdate(row.getKey());
+ BatchUpdate b = new BatchUpdate(row.getKey());
for (Map.Entry<Text, byte[]> val: row.getValue().entrySet()) {
- table.put(lockid, val.getKey(), val.getValue());
+ b.put(val.getKey(), val.getValue());
}
- table.commit(lockid);
+ table.commit(b);
}
HRegion region = null;
try {
- SortedMap<Text, HRegion> regions =
+ Map<Text, HRegion> regions =
cluster.getRegionThreads().get(0).getRegionServer().getOnlineRegions();
for (Map.Entry<Text, HRegion> e: regions.entrySet()) {
if (!e.getValue().getRegionInfo().isMetaRegion()) {