You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by kt...@apache.org on 2014/03/05 04:42:47 UTC
git commit: ACCUMULO-2190 Added unit test for root tablet file
management code
Repository: accumulo
Updated Branches:
refs/heads/1.6.0-SNAPSHOT 3ef7f72b9 -> 45d2110ed
ACCUMULO-2190 Added unit test for root tablet file management code
Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/45d2110e
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/45d2110e
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/45d2110e
Branch: refs/heads/1.6.0-SNAPSHOT
Commit: 45d2110edc6717eb6d328b487736e2b8eac44504
Parents: 3ef7f72
Author: Keith Turner <kt...@apache.org>
Authored: Tue Mar 4 22:40:23 2014 -0500
Committer: Keith Turner <kt...@apache.org>
Committed: Tue Mar 4 22:40:23 2014 -0500
----------------------------------------------------------------------
.../org/apache/accumulo/tserver/RootFiles.java | 135 +++++++++++++++++
.../org/apache/accumulo/tserver/Tablet.java | 80 +---------
.../apache/accumulo/tserver/RootFilesTest.java | 147 +++++++++++++++++++
3 files changed, 285 insertions(+), 77 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/45d2110e/server/tserver/src/main/java/org/apache/accumulo/tserver/RootFiles.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/RootFiles.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/RootFiles.java
new file mode 100644
index 0000000..0e58c43
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/RootFiles.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Set;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.log4j.Logger;
+
+/**
+ *
+ */
+public class RootFiles {
+
+ private static Logger log = Logger.getLogger(RootFiles.class);
+
+ static void prepareReplacement(VolumeManager fs, Path location, Set<FileRef> oldDatafiles, String compactName) throws IOException {
+ for (FileRef ref : oldDatafiles) {
+ Path path = ref.path();
+ fs.rename(path, new Path(location + "/delete+" + compactName + "+" + path.getName()));
+ }
+ }
+
+ static void renameReplacement(VolumeManager fs, FileRef tmpDatafile, FileRef newDatafile) throws IOException {
+ if (fs.exists(newDatafile.path())) {
+ log.error("Target map file already exist " + newDatafile, new Exception());
+ throw new IllegalStateException("Target map file already exist " + newDatafile);
+ }
+
+ if (!fs.rename(tmpDatafile.path(), newDatafile.path()))
+ log.warn("Rename of " + tmpDatafile + " to " + newDatafile + " returned false");
+ }
+
+ static void finishReplacement(AccumuloConfiguration acuTableConf, VolumeManager fs, Path location, Set<FileRef> oldDatafiles, String compactName)
+ throws IOException {
+ // start deleting files, if we do not finish they will be cleaned
+ // up later
+ for (FileRef ref : oldDatafiles) {
+ Path path = ref.path();
+ Path deleteFile = new Path(location + "/delete+" + compactName + "+" + path.getName());
+ if (acuTableConf.getBoolean(Property.GC_TRASH_IGNORE) || !fs.moveToTrash(deleteFile))
+ fs.deleteRecursively(deleteFile);
+ }
+ }
+
+ public static void replaceFiles(AccumuloConfiguration acuTableConf, VolumeManager fs, Path location, Set<FileRef> oldDatafiles, FileRef tmpDatafile,
+ FileRef newDatafile) throws IOException {
+ String compactName = newDatafile.path().getName();
+
+ prepareReplacement(fs, location, oldDatafiles, compactName);
+ renameReplacement(fs, tmpDatafile, newDatafile);
+ finishReplacement(acuTableConf, fs, location, oldDatafiles, compactName);
+ }
+
+ public static Collection<String> cleanupReplacement(VolumeManager fs, FileStatus[] files, boolean deleteTmp) throws IOException {
+ /*
+ * called in constructor and before major compactions
+ */
+ Collection<String> goodFiles = new ArrayList<String>(files.length);
+
+ for (FileStatus file : files) {
+
+ String path = file.getPath().toString();
+ if (file.getPath().toUri().getScheme() == null) {
+ // depending on the behavior of HDFS, if list status does not return fully qualified volumes then could switch to the default volume
+ throw new IllegalArgumentException("Require fully qualified paths " + file.getPath());
+ }
+
+ String filename = file.getPath().getName();
+
+ // check for incomplete major compaction, this should only occur
+ // for root tablet
+ if (filename.startsWith("delete+")) {
+ String expectedCompactedFile = path.substring(0, path.lastIndexOf("/delete+")) + "/" + filename.split("\\+")[1];
+ if (fs.exists(new Path(expectedCompactedFile))) {
+ // compaction finished, but did not finish deleting compacted files.. so delete it
+ if (!fs.deleteRecursively(file.getPath()))
+ log.warn("Delete of file: " + file.getPath().toString() + " return false");
+ continue;
+ }
+ // compaction did not finish, so put files back
+
+ // reset path and filename for rest of loop
+ filename = filename.split("\\+", 3)[2];
+ path = path.substring(0, path.lastIndexOf("/delete+")) + "/" + filename;
+
+ if (!fs.rename(file.getPath(), new Path(path)))
+ log.warn("Rename of " + file.getPath().toString() + " to " + path + " returned false");
+ }
+
+ if (filename.endsWith("_tmp")) {
+ if (deleteTmp) {
+ log.warn("cleaning up old tmp file: " + path);
+ if (!fs.deleteRecursively(file.getPath()))
+ log.warn("Delete of tmp file: " + file.getPath().toString() + " return false");
+
+ }
+ continue;
+ }
+
+ if (!filename.startsWith(Constants.MAPFILE_EXTENSION + "_") && !FileOperations.getValidExtensions().contains(filename.split("\\.")[1])) {
+ log.error("unknown file in tablet" + path);
+ continue;
+ }
+
+ goodFiles.add(path);
+ }
+
+ return goodFiles;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/45d2110e/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
index dac9528..73f91b1 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
@@ -991,29 +991,7 @@ public class Tablet {
// rename the compacted map file, in case
// the system goes down
- String compactName = newDatafile.path().getName();
-
- for (FileRef ref : oldDatafiles) {
- Path path = ref.path();
- fs.rename(path, new Path(location + "/delete+" + compactName + "+" + path.getName()));
- }
-
- if (fs.exists(newDatafile.path())) {
- log.error("Target map file already exist " + newDatafile, new Exception());
- throw new IllegalStateException("Target map file already exist " + newDatafile);
- }
-
- if (!fs.rename(tmpDatafile.path(), newDatafile.path()))
- log.warn("Rename of " + tmpDatafile + " to " + newDatafile + " returned false");
-
- // start deleting files, if we do not finish they will be cleaned
- // up later
- for (FileRef ref : oldDatafiles) {
- Path path = ref.path();
- Path deleteFile = new Path(location + "/delete+" + compactName + "+" + path.getName());
- if (acuTableConf.getBoolean(Property.GC_TRASH_IGNORE) || !fs.moveToTrash(deleteFile))
- fs.deleteRecursively(deleteFile);
- }
+ RootFiles.replaceFiles(acuTableConf, fs, location, oldDatafiles, tmpDatafile, newDatafile);
}
// atomically remove old files and add new file
@@ -1131,7 +1109,7 @@ public class Tablet {
// cleanUpFiles() has special handling for delete. files
FileStatus[] files = fs.listStatus(location);
- Collection<String> goodPaths = cleanUpFiles(fs, files, true);
+ Collection<String> goodPaths = RootFiles.cleanupReplacement(fs, files, true);
for (String good : goodPaths) {
Path path = new Path(good);
String filename = path.getName();
@@ -1476,58 +1454,6 @@ public class Tablet {
}
}
- private static Collection<String> cleanUpFiles(VolumeManager fs, FileStatus[] files, boolean deleteTmp) throws IOException {
- /*
- * called in constructor and before major compactions
- */
- Collection<String> goodFiles = new ArrayList<String>(files.length);
-
- for (FileStatus file : files) {
-
- String path = file.getPath().toString();
- String filename = file.getPath().getName();
-
- // check for incomplete major compaction, this should only occur
- // for root tablet
- if (filename.startsWith("delete+")) {
- String expectedCompactedFile = path.substring(0, path.lastIndexOf("/delete+")) + "/" + filename.split("\\+")[1];
- if (fs.exists(new Path(expectedCompactedFile))) {
- // compaction finished, but did not finish deleting compacted files.. so delete it
- if (!fs.deleteRecursively(file.getPath()))
- log.warn("Delete of file: " + file.getPath().toString() + " return false");
- continue;
- }
- // compaction did not finish, so put files back
-
- // reset path and filename for rest of loop
- filename = filename.split("\\+", 3)[2];
- path = path.substring(0, path.lastIndexOf("/delete+")) + "/" + filename;
-
- if (!fs.rename(file.getPath(), new Path(path)))
- log.warn("Rename of " + file.getPath().toString() + " to " + path + " returned false");
- }
-
- if (filename.endsWith("_tmp")) {
- if (deleteTmp) {
- log.warn("cleaning up old tmp file: " + path);
- if (!fs.deleteRecursively(file.getPath()))
- log.warn("Delete of tmp file: " + file.getPath().toString() + " return false");
-
- }
- continue;
- }
-
- if (!filename.startsWith(Constants.MAPFILE_EXTENSION + "_") && !FileOperations.getValidExtensions().contains(filename.split("\\.")[1])) {
- log.error("unknown file in tablet" + path);
- continue;
- }
-
- goodFiles.add(path);
- }
-
- return goodFiles;
- }
-
public static class KVEntry extends KeyValue {
public KVEntry(Key k, Value v) {
super(new Key(k), Arrays.copyOf(v.get(), v.get().length));
@@ -3161,7 +3087,7 @@ public class Tablet {
// otherwise deleted compacted files could possible be brought back
// at some point if the file they were compacted to was legitimately
// removed by a major compaction
- cleanUpFiles(fs, fs.listStatus(this.location), false);
+ RootFiles.cleanupReplacement(fs, fs.listStatus(this.location), false);
}
SortedMap<FileRef,DataFileValue> allFiles = datafileManager.getDatafileSizes();
List<FileRef> inputFiles = new ArrayList<FileRef>();
http://git-wip-us.apache.org/repos/asf/accumulo/blob/45d2110e/server/tserver/src/test/java/org/apache/accumulo/tserver/RootFilesTest.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/RootFilesTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/RootFilesTest.java
new file mode 100644
index 0000000..1cd8f12
--- /dev/null
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/RootFilesTest.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.ConfigurationCopy;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+/**
+ *
+ */
+public class RootFilesTest {
+
+ @Rule
+ public TemporaryFolder tempFolder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
+
+ private class TestWrapper {
+ File rootTabletDir;
+ Set<FileRef> oldDatafiles;
+ String compactName;
+ FileRef tmpDatafile;
+ FileRef newDatafile;
+ VolumeManager vm;
+ AccumuloConfiguration conf;
+
+ TestWrapper(VolumeManager vm, AccumuloConfiguration conf, String compactName, String... inputFiles) throws IOException {
+ this.vm = vm;
+ this.conf = conf;
+
+ rootTabletDir = new File(tempFolder.newFolder(), "accumulo/tables/+r/root_tablet");
+ rootTabletDir.mkdirs();
+ oldDatafiles = new HashSet<FileRef>();
+ for (String filename : inputFiles) {
+ File file = new File(rootTabletDir, filename);
+ file.createNewFile();
+ oldDatafiles.add(new FileRef(file.toURI().toString()));
+ }
+
+ this.compactName = compactName;
+
+ File tmpFile = new File(rootTabletDir, compactName + "_tmp");
+ tmpFile.createNewFile();
+ tmpDatafile = new FileRef(tmpFile.toURI().toString());
+
+ newDatafile = new FileRef(new File(rootTabletDir, compactName).toURI().toString());
+ }
+
+ void prepareReplacement() throws IOException {
+ RootFiles.prepareReplacement(vm, new Path(rootTabletDir.toURI()), oldDatafiles, compactName);
+ }
+
+ void renameReplacement() throws IOException {
+ RootFiles.renameReplacement(vm, tmpDatafile, newDatafile);
+ }
+
+ public void finishReplacement() throws IOException {
+ RootFiles.finishReplacement(conf, vm, new Path(rootTabletDir.toURI()), oldDatafiles, compactName);
+ }
+
+ public Collection<String> cleanupReplacement(String... expectedFiles) throws IOException {
+ Collection<String> ret = RootFiles.cleanupReplacement(vm, vm.listStatus(new Path(rootTabletDir.toURI())), true);
+
+ HashSet<String> expected = new HashSet<String>();
+ for (String efile : expectedFiles)
+ expected.add(new File(rootTabletDir, efile).toURI().toString());
+
+ Assert.assertEquals(expected, new HashSet<String>(ret));
+
+ return ret;
+ }
+
+ public void assertFiles(String... files) {
+ HashSet<String> actual = new HashSet<String>();
+ for (File file : rootTabletDir.listFiles()) {
+ actual.add(file.getName());
+ }
+
+ HashSet<String> expected = new HashSet<String>();
+ expected.addAll(Arrays.asList(files));
+
+ Assert.assertEquals(expected, actual);
+ }
+ }
+
+ @Test
+ public void testFileReplacement() throws IOException {
+
+ ConfigurationCopy conf = new ConfigurationCopy();
+ conf.set(Property.INSTANCE_DFS_URI, "file:///");
+
+ VolumeManager vm = VolumeManagerImpl.get(conf);
+
+ TestWrapper wrapper = new TestWrapper(vm, conf, "A00004.rf", "A00002.rf", "F00003.rf");
+ wrapper.prepareReplacement();
+ wrapper.renameReplacement();
+ wrapper.finishReplacement();
+ wrapper.assertFiles("A00004.rf");
+
+ wrapper = new TestWrapper(vm, conf, "A00004.rf", "A00002.rf", "F00003.rf");
+ wrapper.prepareReplacement();
+ wrapper.cleanupReplacement("A00002.rf", "F00003.rf");
+ wrapper.assertFiles("A00002.rf", "F00003.rf");
+
+ wrapper = new TestWrapper(vm, conf, "A00004.rf", "A00002.rf", "F00003.rf");
+ wrapper.prepareReplacement();
+ wrapper.renameReplacement();
+ wrapper.cleanupReplacement("A00004.rf");
+ wrapper.assertFiles("A00004.rf");
+
+ wrapper = new TestWrapper(vm, conf, "A00004.rf", "A00002.rf", "F00003.rf");
+ wrapper.prepareReplacement();
+ wrapper.renameReplacement();
+ wrapper.finishReplacement();
+ wrapper.cleanupReplacement("A00004.rf");
+ wrapper.assertFiles("A00004.rf");
+
+ }
+}