You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2018/05/16 21:21:27 UTC
hbase git commit: HBASE-20530 Composition of backup directory
containing namespace when restoring is different from the actual hfile
location
Repository: hbase
Updated Branches:
refs/heads/master f4006b503 -> acbc3a225
HBASE-20530 Composition of backup directory containing namespace when restoring is different from the actual hfile location
Signed-off-by: tedyu <yu...@gmail.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/acbc3a22
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/acbc3a22
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/acbc3a22
Branch: refs/heads/master
Commit: acbc3a225338fd1ff82226ebbd937f7b15ef5b60
Parents: f4006b5
Author: Vladimir Rodionov <vr...@hortonworks.com>
Authored: Thu May 10 13:50:31 2018 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Wed May 16 14:21:20 2018 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/backup/TestBackupBase.java | 6 +++---
.../hadoop/hbase/backup/TestIncrementalBackup.java | 4 ++--
.../hadoop/hbase/mapreduce/HFileOutputFormat2.java | 9 ++++++++-
.../hadoop/hbase/mapreduce/TestHFileOutputFormat2.java | 11 ++++++++---
4 files changed, 21 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/acbc3a22/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index 4243f5b..08ecd63 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -88,8 +88,8 @@ public class TestBackupBase {
protected static TableName table3 = TableName.valueOf("table3");
protected static TableName table4 = TableName.valueOf("table4");
- protected static TableName table1_restore = TableName.valueOf("ns1:table1_restore");
- protected static TableName table2_restore = TableName.valueOf("ns2:table2_restore");
+ protected static TableName table1_restore = TableName.valueOf("default:table1");
+ protected static TableName table2_restore = TableName.valueOf("ns2:table2");
protected static TableName table3_restore = TableName.valueOf("ns3:table3_restore");
protected static TableName table4_restore = TableName.valueOf("ns4:table4_restore");
@@ -404,7 +404,7 @@ public class TestBackupBase {
protected static void createTables() throws Exception {
long tid = System.currentTimeMillis();
- table1 = TableName.valueOf("ns1:test-" + tid);
+ table1 = TableName.valueOf("test-" + tid);
HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
// Create namespaces
http://git-wip-us.apache.org/repos/asf/hbase/blob/acbc3a22/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index 0bce769..b74f42f 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -163,14 +163,14 @@ public class TestIncrementalBackup extends TestBackupBase {
String backupIdIncMultiple2 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple2));
- // #4 - restore full backup for all tables, without overwrite
+ // #4 - restore full backup for all tables
TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
LOG.debug("Restoring full " + backupIdFull);
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
- tablesRestoreFull, tablesMapFull, false));
+ tablesRestoreFull, tablesMapFull, true));
// #5.1 - check tables for full restore
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
http://git-wip-us.apache.org/repos/asf/hbase/blob/acbc3a22/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 3b04c0b..a403455 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -27,6 +27,7 @@ import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
import java.net.URLDecoder;
import java.net.URLEncoder;
+import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@@ -251,6 +252,9 @@ public class HFileOutputFormat2
byte[] tableNameBytes = null;
if (writeMultipleTables) {
tableNameBytes = MultiTableHFileOutputFormat.getTableName(row.get());
+ tableNameBytes =
+ TableName.valueOf(tableNameBytes).getNameWithNamespaceInclAsString()
+ .getBytes(Charset.defaultCharset());
if (!allTableNames.contains(Bytes.toString(tableNameBytes))) {
throw new IllegalArgumentException("TableName '" + Bytes.toString(tableNameBytes) +
"' not" + " expected");
@@ -639,7 +643,10 @@ public class HFileOutputFormat2
for( TableInfo tableInfo : multiTableInfo )
{
regionLocators.add(tableInfo.getRegionLocator());
- allTableNames.add(tableInfo.getRegionLocator().getName().getNameAsString());
+ String tn = writeMultipleTables?
+ tableInfo.getRegionLocator().getName().getNameWithNamespaceInclAsString():
+ tableInfo.getRegionLocator().getName().getNameAsString();
+ allTableNames.add(tn);
tableDescriptors.add(tableInfo.getTableDescriptor());
}
// Record tablenames for creating writer by favored nodes, and decoding compression, block size and other attributes of columnfamily per table
http://git-wip-us.apache.org/repos/asf/hbase/blob/acbc3a22/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 710a94c..09444ac 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -627,15 +627,19 @@ public class TestHFileOutputFormat2 {
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
// Generate the bulk load files
runIncrementalPELoad(conf, tableInfo, testDir, putSortReducer);
+ if (writeMultipleTables) {
+ testDir = new Path(testDir, "default");
+ }
for (Table tableSingle : allTables.values()) {
// This doesn't write into the table, just makes files
assertEquals("HFOF should not touch actual table", 0, util.countRows(tableSingle));
}
int numTableDirs = 0;
- for (FileStatus tf : testDir.getFileSystem(conf).listStatus(testDir)) {
+ FileStatus[] fss =
+ testDir.getFileSystem(conf).listStatus(testDir);
+ for (FileStatus tf: fss) {
Path tablePath = testDir;
-
if (writeMultipleTables) {
if (allTables.containsKey(tf.getPath().getName())) {
++numTableDirs;
@@ -648,7 +652,8 @@ public class TestHFileOutputFormat2 {
// Make sure that a directory was created for every CF
int dir = 0;
- for (FileStatus f : tablePath.getFileSystem(conf).listStatus(tablePath)) {
+ fss = tablePath.getFileSystem(conf).listStatus(tablePath);
+ for (FileStatus f: fss) {
for (byte[] family : FAMILIES) {
if (Bytes.toString(family).equals(f.getPath().getName())) {
++dir;