You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by kt...@apache.org on 2019/10/30 20:48:29 UTC
[accumulo] branch master updated: Always choose volume for new
tablet files. (#1389)
This is an automated email from the ASF dual-hosted git repository.
kturner pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git
The following commit(s) were added to refs/heads/master by this push:
new 57ed0ec Always choose volume for new tablet files. (#1389)
57ed0ec is described below
commit 57ed0ec503159e23d608628aa856ca8a3f164465
Author: Keith Turner <kt...@apache.org>
AuthorDate: Wed Oct 30 16:48:20 2019 -0400
Always choose volume for new tablet files. (#1389)
This commit changes Accumulo to always call the volume chooser every time a
tablet creates a new file. It also changes the interpretation of the srv:dir
column in the metadata table. This column used to contain a URI to a
directory on a specific volume that was used for all new tablet files. Now the
srv:dir column only contains a directory name. This directory name will be
used for new tablet files across all volumes.
This change necessitated to ~del markers in the metadata table used for
garbage collection. When a table is cloned or tablets are merged out of
existence it can result in ~del markers for tablet dirs being placed in
the metadata table. These ~del markers used to reference a specific volume.
With this change, the ~del marker now use a special URI of the form
agcav:/tables/<tableId>/<dir name>
When the Accumulo GC sees this, it will delete the dir on all configured
volumes when its no longer used.
This change superceded #642. These changes are possible because of the
changes made in #936.
---
.../java/org/apache/accumulo/core/Constants.java | 5 -
.../apache/accumulo/core/metadata/RootTable.java | 4 +-
.../accumulo/core/metadata/schema/Ample.java | 2 +-
.../core/metadata/schema/MetadataSchema.java | 23 +++
.../core/metadata/schema/RootTabletMetadata.java | 5 +-
.../core/metadata/schema/TabletMetadata.java | 10 +-
.../apache/accumulo/core/file/rfile/RFileTest.java | 4 +-
.../core/metadata/schema/TabletMetadataTest.java | 4 +-
server/base/pom.xml | 4 -
.../accumulo/server/fs/VolumeManagerImpl.java | 1 -
.../org/apache/accumulo/server/fs/VolumeUtil.java | 137 +---------------
.../apache/accumulo/server/gc/GcVolumeUtil.java | 62 +++++++
.../apache/accumulo/server/init/Initialize.java | 71 ++++----
.../accumulo/server/metadata/ServerAmpleImpl.java | 1 -
.../server/metadata/TabletMutatorBase.java | 6 +-
.../org/apache/accumulo/server/util/Admin.java | 9 +-
.../accumulo/server/util/ListVolumesUsed.java | 6 -
.../accumulo/server/util/MasterMetadataUtil.java | 4 +-
.../accumulo/server/util/MetadataTableUtil.java | 33 ++--
.../accumulo/server/util/RandomizeVolumes.java | 159 ------------------
.../apache/accumulo/server/fs/VolumeUtilTest.java | 77 ---------
.../accumulo/gc/GarbageCollectionAlgorithm.java | 11 +-
.../apache/accumulo/gc/SimpleGarbageCollector.java | 159 +++++++++++-------
.../apache/accumulo/gc/GarbageCollectionTest.java | 22 +--
.../accumulo/gc/SimpleGarbageCollectorTest.java | 69 ++++++++
.../apache/accumulo/master/TabletGroupWatcher.java | 39 ++---
.../apache/accumulo/master/tableOps/TableInfo.java | 1 -
.../master/tableOps/clone/CloneMetadata.java | 3 +-
.../accumulo/master/tableOps/create/ChooseDir.java | 29 +---
.../accumulo/master/tableOps/create/CreateDir.java | 81 ---------
.../master/tableOps/create/PopulateMetadata.java | 9 +-
.../tableImport/PopulateMetadataTable.java | 34 +---
.../accumulo/master/upgrade/Upgrader9to10.java | 67 +++++++-
.../master/state/RootTabletStateStoreTest.java | 5 +-
.../tableOps/tableImport/ImportTableTest.java | 60 -------
.../org/apache/accumulo/tserver/TabletServer.java | 6 +-
.../org/apache/accumulo/tserver/tablet/Tablet.java | 151 +++++++----------
.../apache/accumulo/tserver/tablet/TabletData.java | 12 +-
.../accumulo/tserver/CheckTabletMetadataTest.java | 2 +-
.../accumulo/test/RewriteTabletDirectoriesIT.java | 181 ---------------------
.../java/org/apache/accumulo/test/VolumeIT.java | 39 +++--
.../test/upgrade/GCUpgrade9to10TestIT.java | 25 ++-
42 files changed, 560 insertions(+), 1072 deletions(-)
diff --git a/core/src/main/java/org/apache/accumulo/core/Constants.java b/core/src/main/java/org/apache/accumulo/core/Constants.java
index 7d232e1..b01413b 100644
--- a/core/src/main/java/org/apache/accumulo/core/Constants.java
+++ b/core/src/main/java/org/apache/accumulo/core/Constants.java
@@ -81,11 +81,6 @@ public class Constants {
*/
public static final String ZDELEGATION_TOKEN_KEYS = "/delegation_token_keys";
- /**
- * Initial tablet directory name for the default tablet in all tables
- */
- public static final String DEFAULT_TABLET_LOCATION = "/default_tablet";
-
public static final String ZTABLE_LOCKS = "/table_locks";
public static final String BULK_PREFIX = "b-";
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java b/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
index d943f17..3e18f99 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
@@ -30,12 +30,12 @@ public class RootTable {
/**
* DFS location relative to the Accumulo directory
*/
- public static final String ROOT_TABLET_LOCATION = "/root_tablet";
+ public static final String ROOT_TABLET_DIR_NAME = "root_tablet";
/**
* ZK path relative to the zookeeper node where the root tablet metadata is stored.
*/
- public static final String ZROOT_TABLET = ROOT_TABLET_LOCATION;
+ public static final String ZROOT_TABLET = "/" + ROOT_TABLET_DIR_NAME;
/**
* ZK path relative to the zookeeper node where the root tablet gc candidates are stored.
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/Ample.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/Ample.java
index 61fbf06..ac65ee9 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/Ample.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/Ample.java
@@ -193,7 +193,7 @@ public interface Ample {
public TabletMutator putZooLock(ZooLock zooLock);
- public TabletMutator putDir(String dir);
+ public TabletMutator putDirName(String dirName);
public TabletMutator putWal(LogEntry logEntry);
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
index 1a88785..e55ca32 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
@@ -31,6 +31,8 @@ import org.apache.accumulo.core.util.ColumnFQ;
import org.apache.accumulo.fate.FateTxId;
import org.apache.hadoop.io.Text;
+import com.google.common.base.Preconditions;
+
/**
* Describes the table schema used for metadata tables
*/
@@ -111,6 +113,27 @@ public class MetadataSchema {
public static final String DIRECTORY_QUAL = "dir";
public static final ColumnFQ DIRECTORY_COLUMN = new ColumnFQ(NAME, new Text(DIRECTORY_QUAL));
/**
+ * Initial tablet directory name for the default tablet in all tables
+ */
+ public static final String DEFAULT_TABLET_DIR_NAME = "default_tablet";
+
+ /**
+ * @return true if dirName is a valid value for the {@link #DIRECTORY_COLUMN} in the metadata
+ * table. Returns false otherwise.
+ */
+ public static boolean isValidDirCol(String dirName) {
+ return !dirName.contains("/");
+ }
+
+ /**
+ * @throws IllegalArgumentException
+ * when {@link #isValidDirCol(String)} returns false.
+ */
+ public static void validateDirCol(String dirName) {
+ Preconditions.checkArgument(isValidDirCol(dirName), "Invalid dir name %s", dirName);
+ }
+
+ /**
* Holds the {@link TimeType}
*/
public static final String TIME_QUAL = "time";
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadata.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadata.java
index 33790ec..c5a02df 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadata.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadata.java
@@ -178,9 +178,10 @@ public class RootTabletMetadata {
/**
* Generate initial json for the root tablet metadata.
*/
- public static byte[] getInitialJson(String dir, String file) {
+ public static byte[] getInitialJson(String dirName, String file) {
+ ServerColumnFamily.validateDirCol(dirName);
Mutation mutation = RootTable.EXTENT.getPrevRowUpdateMutation();
- ServerColumnFamily.DIRECTORY_COLUMN.put(mutation, new Value(dir.getBytes(UTF_8)));
+ ServerColumnFamily.DIRECTORY_COLUMN.put(mutation, new Value(dirName.getBytes(UTF_8)));
mutation.put(DataFileColumnFamily.STR_NAME, file, new DataFileValue(0, 0).encodeAsValue());
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/TabletMetadata.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/TabletMetadata.java
index 025d4be..e36518f 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/TabletMetadata.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/TabletMetadata.java
@@ -80,7 +80,7 @@ public class TabletMetadata {
private EnumSet<ColumnType> fetchedCols;
private KeyExtent extent;
private Location last;
- private String dir;
+ private String dirName;
private MetadataTime time;
private String cloned;
private SortedMap<Key,Value> keyValues;
@@ -233,9 +233,9 @@ public class TabletMetadata {
return scans;
}
- public String getDir() {
+ public String getDirName() {
ensureFetched(ColumnType.DIR);
- return dir;
+ return dirName;
}
public MetadataTime getTime() {
@@ -325,7 +325,9 @@ public class TabletMetadata {
case ServerColumnFamily.STR_NAME:
switch (qual) {
case DIRECTORY_QUAL:
- te.dir = val;
+ Preconditions.checkArgument(ServerColumnFamily.isValidDirCol(val),
+ "Saw invalid dir name {} {}", key, val);
+ te.dirName = val;
break;
case TIME_QUAL:
te.time = MetadataTime.parse(val);
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
index 0c33bfe..86cf0bf 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
@@ -46,7 +46,6 @@ import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
-import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.sample.RowSampler;
import org.apache.accumulo.core.client.sample.Sampler;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
@@ -78,6 +77,7 @@ import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.schema.MetadataSchema;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.accumulo.core.sample.impl.SamplerFactory;
import org.apache.accumulo.core.spi.cache.BlockCacheManager;
@@ -2360,7 +2360,7 @@ public class RFileTest {
Key defaultDirKey =
new Key(defaultExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
- mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
+ mfw.append(defaultDirKey, new Value(ServerColumnFamily.DEFAULT_TABLET_DIR_NAME.getBytes()));
// default's time
Key defaultTimeKey =
diff --git a/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
index 5d94876..63f81ff 100644
--- a/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
@@ -61,7 +61,7 @@ public class TabletMetadataTest {
Mutation mutation = extent.getPrevRowUpdateMutation();
COMPACT_COLUMN.put(mutation, new Value("5"));
- DIRECTORY_COLUMN.put(mutation, new Value("/a/t/6/a/"));
+ DIRECTORY_COLUMN.put(mutation, new Value("t-0001757"));
FLUSH_COLUMN.put(mutation, new Value("6"));
TIME_COLUMN.put(mutation, new Value("M123456789"));
@@ -96,7 +96,7 @@ public class TabletMetadataTest {
assertEquals("OK", tm.getCloned());
assertEquals(5L, tm.getCompactId().getAsLong());
- assertEquals("/a/t/6/a/", tm.getDir());
+ assertEquals("t-0001757", tm.getDirName());
assertEquals(extent.getEndRow(), tm.getEndRow());
assertEquals(extent, tm.getExtent());
assertEquals(Set.of("df1", "df2"), Set.copyOf(tm.getFiles()));
diff --git a/server/base/pom.xml b/server/base/pom.xml
index c7b544d..9b2e69b 100644
--- a/server/base/pom.xml
+++ b/server/base/pom.xml
@@ -49,10 +49,6 @@
<artifactId>protobuf-java</artifactId>
</dependency>
<dependency>
- <groupId>commons-codec</groupId>
- <artifactId>commons-codec</artifactId>
- </dependency>
- <dependency>
<groupId>jline</groupId>
<artifactId>jline</artifactId>
</dependency>
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index 75f73f1..a9ce173 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -504,5 +504,4 @@ public class VolumeManagerImpl implements VolumeManager {
public Collection<Volume> getVolumes() {
return volumesByName.values();
}
-
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
index 4f0b3ab..fca0d54 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -16,17 +16,14 @@
*/
package org.apache.accumulo.server.fs;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.HashSet;
import java.util.List;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.dataImpl.KeyExtent;
-import org.apache.accumulo.core.metadata.RootTable;
import org.apache.accumulo.core.metadata.schema.DataFileValue;
import org.apache.accumulo.core.protobuf.ProtobufUtil;
import org.apache.accumulo.core.tabletserver.log.LogEntry;
@@ -39,10 +36,6 @@ import org.apache.accumulo.server.replication.StatusUtil;
import org.apache.accumulo.server.replication.proto.Replication.Status;
import org.apache.accumulo.server.util.MetadataTableUtil;
import org.apache.accumulo.server.util.ReplicationTableUtil;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -54,21 +47,6 @@ public class VolumeUtil {
private static final Logger log = LoggerFactory.getLogger(VolumeUtil.class);
- private static boolean isActiveVolume(ServerContext context, Path dir) {
-
- // consider relative path as active and take no action
- if (!dir.toString().contains(":"))
- return true;
-
- for (String tableDir : ServerConstants.getTablesDirs(context)) {
- // use Path to normalize tableDir
- if (dir.toString().startsWith(new Path(tableDir).toString()))
- return true;
- }
-
- return false;
- }
-
public static String removeTrailingSlash(String path) {
while (path.endsWith("/"))
path = path.substring(0, path.length() - 1);
@@ -138,7 +116,7 @@ public class VolumeUtil {
}
public static class TabletFiles {
- public String dir;
+ public String dirName;
public List<LogEntry> logEntries;
public SortedMap<FileRef,DataFileValue> datafiles;
@@ -147,26 +125,14 @@ public class VolumeUtil {
datafiles = new TreeMap<>();
}
- public TabletFiles(String dir, List<LogEntry> logEntries,
+ public TabletFiles(String dirName, List<LogEntry> logEntries,
SortedMap<FileRef,DataFileValue> datafiles) {
- this.dir = dir;
+ this.dirName = dirName;
this.logEntries = logEntries;
this.datafiles = datafiles;
}
}
- public static String switchRootTableVolume(ServerContext context, String location)
- throws IOException {
- String newLocation = switchVolume(location, FileType.TABLE,
- ServerConstants.getVolumeReplacements(context.getConfiguration(), context.getHadoopConf()));
- if (newLocation != null) {
- context.getAmple().mutateTablet(RootTable.EXTENT).putDir(newLocation).mutate();
- log.info("Volume replaced: {} -> {}", location, newLocation);
- return new Path(newLocation).toString();
- }
- return location;
- }
-
/**
* This method does two things. First, it switches any volumes a tablet is using that are
* configured in instance.volumes.replacements. Second, if a tablet dir is no longer configured
@@ -214,17 +180,9 @@ public class VolumeUtil {
}
}
- String tabletDir = tabletFiles.dir;
- String switchedDir = switchVolume(tabletDir, FileType.TABLE, replacements);
-
- if (switchedDir != null) {
- log.debug("Replacing volume {} : {} -> {}", extent, tabletDir, switchedDir);
- tabletDir = switchedDir;
- }
-
- if (logsToRemove.size() + filesToRemove.size() > 0 || switchedDir != null) {
+ if (logsToRemove.size() + filesToRemove.size() > 0) {
MetadataTableUtil.updateTabletVolumes(extent, logsToRemove, logsToAdd, filesToRemove,
- filesToAdd, switchedDir, zooLock, context);
+ filesToAdd, zooLock, context);
if (replicate) {
Status status = StatusUtil.fileClosed();
log.debug("Tablet directory switched, need to record old log files {} {}", logsToRemove,
@@ -236,91 +194,8 @@ public class VolumeUtil {
}
}
- ret.dir = decommisionedTabletDir(context, zooLock, vm, extent, tabletDir);
- if (extent.isRootTablet()) {
- SortedMap<FileRef,DataFileValue> copy = ret.datafiles;
- ret.datafiles = new TreeMap<>();
- for (Entry<FileRef,DataFileValue> entry : copy.entrySet()) {
- ret.datafiles.put(
- new FileRef(new Path(ret.dir, entry.getKey().path().getName()).toString()),
- entry.getValue());
- }
- }
-
// method this should return the exact strings that are in the metadata table
+ ret.dirName = tabletFiles.dirName;
return ret;
}
-
- private static String decommisionedTabletDir(ServerContext context, ZooLock zooLock,
- VolumeManager vm, KeyExtent extent, String metaDir) throws IOException {
- Path dir = new Path(metaDir);
- if (isActiveVolume(context, dir))
- return metaDir;
-
- if (!dir.getParent().getParent().getName().equals(ServerConstants.TABLE_DIR)) {
- throw new IllegalArgumentException("Unexpected table dir " + dir);
- }
-
- VolumeChooserEnvironment chooserEnv =
- new VolumeChooserEnvironmentImpl(extent.getTableId(), extent.getEndRow(), context);
-
- Path newDir = new Path(vm.choose(chooserEnv, ServerConstants.getBaseUris(context))
- + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + dir.getParent().getName()
- + Path.SEPARATOR + dir.getName());
-
- log.info("Updating directory for {} from {} to {}", extent, dir, newDir);
-
- MetadataTableUtil.updateTabletDir(extent, newDir.toString(), context, zooLock);
- return newDir.toString();
-
- }
-
- static boolean same(FileSystem fs1, Path dir, FileSystem fs2, Path newDir)
- throws FileNotFoundException, IOException {
- // its possible that a user changes config in such a way that two uris point to the same thing.
- // Like hdfs://foo/a/b and hdfs://1.2.3.4/a/b both reference
- // the same thing because DNS resolves foo to 1.2.3.4. This method does not analyze uris to
- // determine if equivalent, instead it inspects the contents of
- // what the uris point to.
-
- // this code is called infrequently and does not need to be optimized.
-
- if (fs1.exists(dir) && fs2.exists(newDir)) {
-
- if (!fs1.getFileStatus(dir).isDirectory())
- throw new IllegalArgumentException("expected " + dir + " to be a directory");
-
- if (!fs2.getFileStatus(newDir).isDirectory())
- throw new IllegalArgumentException("expected " + newDir + " to be a directory");
-
- HashSet<String> names1 = getFileNames(fs1.listStatus(dir));
- HashSet<String> names2 = getFileNames(fs2.listStatus(newDir));
-
- if (names1.equals(names2)) {
- for (String name : names1)
- if (!hash(fs1, dir, name).equals(hash(fs2, newDir, name)))
- return false;
- return true;
- }
-
- }
- return false;
- }
-
- private static HashSet<String> getFileNames(FileStatus[] filesStatuses) {
- HashSet<String> names = new HashSet<>();
- for (FileStatus fileStatus : filesStatuses)
- if (fileStatus.isDirectory())
- throw new IllegalArgumentException("expected " + fileStatus.getPath() + " to be a file");
- else
- names.add(fileStatus.getPath().getName());
- return names;
- }
-
- private static String hash(FileSystem fs, Path dir, String name) throws IOException {
- try (FSDataInputStream in = fs.open(new Path(dir, name))) {
- return DigestUtils.sha512Hex(in);
- }
-
- }
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/gc/GcVolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/gc/GcVolumeUtil.java
new file mode 100644
index 0000000..79ae00d
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/gc/GcVolumeUtil.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.server.gc;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
+import org.apache.accumulo.core.volume.Volume;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.Path;
+
+public class GcVolumeUtil {
+ // AGCAV : Accumulo Garbage Collector All Volumes
+ private static final String ALL_VOLUMES_PREFIX = "agcav:/";
+
+ public static String getDeleteTabletOnAllVolumesUri(TableId tableId, String dirName) {
+ ServerColumnFamily.validateDirCol(dirName);
+ return ALL_VOLUMES_PREFIX + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId
+ + Path.SEPARATOR + dirName;
+ }
+
+ public static Collection<Path> expandAllVolumesUri(VolumeManager fs, Path path) {
+ if (path.toString().startsWith(ALL_VOLUMES_PREFIX)) {
+ String relPath = path.toString().substring(ALL_VOLUMES_PREFIX.length());
+
+ Collection<Volume> volumes = fs.getVolumes();
+
+ ArrayList<Path> ret = new ArrayList<Path>(volumes.size());
+ for (Volume vol : volumes) {
+ Path volPath = vol.prefixChild(relPath);
+ ret.add(volPath);
+ }
+
+ return ret;
+ } else {
+ return Collections.singleton(path);
+ }
+ }
+
+ public static boolean isAllVolumesUri(Path path) {
+ return path.toString().startsWith(ALL_VOLUMES_PREFIX);
+ }
+}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 236637b..55dc77c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -135,7 +135,7 @@ import jline.console.ConsoleReader;
public class Initialize implements KeywordExecutable {
private static final Logger log = LoggerFactory.getLogger(Initialize.class);
private static final String DEFAULT_ROOT_USER = "root";
- private static final String TABLE_TABLETS_TABLET_DIR = "/table_info";
+ private static final String TABLE_TABLETS_TABLET_DIR = "table_info";
private static ConsoleReader reader = null;
private static ZooReaderWriter zoo = null;
@@ -362,22 +362,25 @@ public class Initialize implements KeywordExecutable {
// the actual disk locations of the root table and tablets
String[] configuredVolumes = VolumeConfiguration.getVolumeUris(siteConfig, hadoopConf);
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(ChooserScope.INIT, null);
- final String rootTabletDir = new Path(
- fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR
- + Path.SEPARATOR + RootTable.ID + RootTable.ROOT_TABLET_LOCATION).toString();
-
+ String rootTabletDirName = RootTable.ROOT_TABLET_DIR_NAME;
String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
- String rootTabletFileName = rootTabletDir + Path.SEPARATOR + "00000_00000." + ext;
+ String rootTabletFileUri = new Path(fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR
+ + ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID + Path.SEPARATOR
+ + rootTabletDirName + Path.SEPARATOR + "00000_00000." + ext).toString();
try {
- initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDir, rootTabletFileName);
+ initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDirName, rootTabletFileUri);
} catch (Exception e) {
log.error("FATAL: Failed to initialize zookeeper", e);
return false;
}
try {
- initFileSystem(siteConfig, hadoopConf, fs, uuid, rootTabletDir, rootTabletFileName);
+ initFileSystem(siteConfig, hadoopConf, fs, uuid,
+ new Path(fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR
+ + ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID + rootTabletDirName)
+ .toString(),
+ rootTabletFileUri);
} catch (Exception e) {
log.error("FATAL Failed to initialize filesystem", e);
@@ -493,7 +496,7 @@ public class Initialize implements KeywordExecutable {
}
private void initFileSystem(SiteConfiguration siteConfig, Configuration hadoopConf,
- VolumeManager fs, UUID uuid, String rootTabletDir, String rootTabletFileName)
+ VolumeManager fs, UUID uuid, String rootTabletDirUri, String rootTabletFileUri)
throws IOException {
initDirs(fs, uuid, VolumeConfiguration.getVolumeUris(siteConfig, hadoopConf), false);
@@ -501,49 +504,53 @@ public class Initialize implements KeywordExecutable {
initSystemTablesConfig(zoo, Constants.ZROOT + "/" + uuid, hadoopConf);
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(ChooserScope.INIT, null);
- String tableMetadataTabletDir =
+ String tableMetadataTabletDirName = TABLE_TABLETS_TABLET_DIR;
+ String tableMetadataTabletDirUri =
fs.choose(chooserEnv, ServerConstants.getBaseUris(siteConfig, hadoopConf))
- + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID
- + TABLE_TABLETS_TABLET_DIR;
- String replicationTableDefaultTabletDir =
+ + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID + Path.SEPARATOR
+ + tableMetadataTabletDirName;
+ String replicationTableDefaultTabletDirName = ServerColumnFamily.DEFAULT_TABLET_DIR_NAME;
+ String replicationTableDefaultTabletDirUri =
fs.choose(chooserEnv, ServerConstants.getBaseUris(siteConfig, hadoopConf))
- + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID
- + Constants.DEFAULT_TABLET_LOCATION;
- String defaultMetadataTabletDir =
+ + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID + Path.SEPARATOR
+ + replicationTableDefaultTabletDirName;
+ String defaultMetadataTabletDirName = ServerColumnFamily.DEFAULT_TABLET_DIR_NAME;
+ String defaultMetadataTabletDirUri =
fs.choose(chooserEnv, ServerConstants.getBaseUris(siteConfig, hadoopConf))
- + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID
- + Constants.DEFAULT_TABLET_LOCATION;
+ + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID + Path.SEPARATOR
+ + defaultMetadataTabletDirName;
// create table and default tablets directories
- createDirectories(fs, rootTabletDir, tableMetadataTabletDir, defaultMetadataTabletDir,
- replicationTableDefaultTabletDir);
+ createDirectories(fs, rootTabletDirUri, tableMetadataTabletDirUri, defaultMetadataTabletDirUri,
+ replicationTableDefaultTabletDirUri);
String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
// populate the metadata tables tablet with info about the replication table's one initial
// tablet
- String metadataFileName = tableMetadataTabletDir + Path.SEPARATOR + "0_1." + ext;
+ String metadataFileName = tableMetadataTabletDirUri + Path.SEPARATOR + "0_1." + ext;
Tablet replicationTablet =
- new Tablet(ReplicationTable.ID, replicationTableDefaultTabletDir, null, null);
+ new Tablet(ReplicationTable.ID, replicationTableDefaultTabletDirName, null, null);
createMetadataFile(fs, metadataFileName, siteConfig, replicationTablet);
// populate the root tablet with info about the metadata table's two initial tablets
Text splitPoint = TabletsSection.getRange().getEndKey().getRow();
- Tablet tablesTablet =
- new Tablet(MetadataTable.ID, tableMetadataTabletDir, null, splitPoint, metadataFileName);
- Tablet defaultTablet = new Tablet(MetadataTable.ID, defaultMetadataTabletDir, splitPoint, null);
- createMetadataFile(fs, rootTabletFileName, siteConfig, tablesTablet, defaultTablet);
+ Tablet tablesTablet = new Tablet(MetadataTable.ID, tableMetadataTabletDirName, null, splitPoint,
+ metadataFileName);
+ Tablet defaultTablet =
+ new Tablet(MetadataTable.ID, defaultMetadataTabletDirName, splitPoint, null);
+ createMetadataFile(fs, rootTabletFileUri, siteConfig, tablesTablet, defaultTablet);
}
private static class Tablet {
TableId tableId;
- String dir;
+ String dirName;
Text prevEndRow, endRow;
String[] files;
- Tablet(TableId tableId, String dir, Text prevEndRow, Text endRow, String... files) {
+ Tablet(TableId tableId, String dirName, Text prevEndRow, Text endRow, String... files) {
this.tableId = tableId;
- this.dir = dir;
+ this.dirName = dirName;
this.prevEndRow = prevEndRow;
this.endRow = endRow;
this.files = files;
@@ -575,7 +582,7 @@ public class Initialize implements KeywordExecutable {
private static void createEntriesForTablet(TreeMap<Key,Value> map, Tablet tablet) {
Value EMPTY_SIZE = new DataFileValue(0, 0).encodeAsValue();
Text extent = new Text(TabletsSection.getRow(tablet.tableId, tablet.endRow));
- addEntry(map, extent, DIRECTORY_COLUMN, new Value(tablet.dir.getBytes(UTF_8)));
+ addEntry(map, extent, DIRECTORY_COLUMN, new Value(tablet.dirName.getBytes(UTF_8)));
addEntry(map, extent, TIME_COLUMN, new Value(new MetadataTime(0, TimeType.LOGICAL).encode()));
addEntry(map, extent, PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(tablet.prevEndRow));
for (String file : tablet.files) {
@@ -607,7 +614,7 @@ public class Initialize implements KeywordExecutable {
}
private static void initZooKeeper(Opts opts, String uuid, String instanceNamePath,
- String rootTabletDir, String rootTabletFileName)
+ String rootTabletDirName, String rootTabletFileUri)
throws KeeperException, InterruptedException {
// setup basic data in zookeeper
zoo.putPersistentData(Constants.ZROOT, new byte[0], -1, NodeExistsPolicy.SKIP,
@@ -646,7 +653,7 @@ public class Initialize implements KeywordExecutable {
zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, EMPTY_BYTE_ARRAY,
NodeExistsPolicy.FAIL);
zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET,
- RootTabletMetadata.getInitialJson(rootTabletDir, rootTabletFileName),
+ RootTabletMetadata.getInitialJson(rootTabletDirName, rootTabletFileUri),
NodeExistsPolicy.FAIL);
zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_GC_CANDIDATES,
new RootGcCandidates().toJson().getBytes(UTF_8), NodeExistsPolicy.FAIL);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java b/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
index dc8af4f..e68988f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
@@ -198,5 +198,4 @@ public class ServerAmpleImpl extends AmpleImpl implements Ample {
delFlag.put(EMPTY_TEXT, EMPTY_TEXT, DeletesSection.SkewedKeyValue.NAME);
return delFlag;
}
-
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/metadata/TabletMutatorBase.java b/server/base/src/main/java/org/apache/accumulo/server/metadata/TabletMutatorBase.java
index 1eda116..ac7e3ea 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/metadata/TabletMutatorBase.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/metadata/TabletMutatorBase.java
@@ -26,6 +26,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import org.apache.accumulo.core.metadata.schema.MetadataTime;
import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
import org.apache.accumulo.core.tabletserver.log.LogEntry;
@@ -59,9 +60,10 @@ public abstract class TabletMutatorBase implements Ample.TabletMutator {
}
@Override
- public Ample.TabletMutator putDir(String dir) {
+ public Ample.TabletMutator putDirName(String dirName) {
+ ServerColumnFamily.validateDirCol(dirName);
Preconditions.checkState(updatesEnabled, "Cannot make updates after calling mutate.");
- TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mutation, new Value(dir));
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mutation, new Value(dirName));
return this;
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
index 9540284..baca45c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -141,7 +141,12 @@ public class Admin implements KeywordExecutable {
boolean users = false;
}
- @Parameters(commandDescription = "redistribute tablet directories across the current volume list")
+ private static final String RV_DEPRECATION_MSG =
+ "Randomizing tablet directories is deprecated and now does nothing. Accumulo now always"
+ + " calls the volume chooser for each file created by a tablet, so its no longer "
+ + "necessary.";
+
+ @Parameters(commandDescription = RV_DEPRECATION_MSG)
static class RandomizeVolumesCommand {
@Parameter(names = {"-t"}, description = "table to update", required = true)
String tableName = null;
@@ -247,7 +252,7 @@ public class Admin implements KeywordExecutable {
} else if (cl.getParsedCommand().equals("volumes")) {
ListVolumesUsed.listVolumes(context);
} else if (cl.getParsedCommand().equals("randomizeVolumes")) {
- rc = RandomizeVolumes.randomize(context, randomizeVolumesOpts.tableName);
+ System.out.println(RV_DEPRECATION_MSG);
} else {
everything = cl.getParsedCommand().equals("stopAll");
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
index 1417dd5..2600729 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
@@ -67,8 +67,6 @@ public class ListVolumesUsed {
TabletMetadata rootMeta = context.getAmple().readTablet(RootTable.EXTENT);
- volumes.add(getTableURI(rootMeta.getDir()));
-
for (LogEntry logEntry : rootMeta.getLogs()) {
getLogURIs(volumes, logEntry);
}
@@ -88,7 +86,6 @@ public class ListVolumesUsed {
scanner.setRange(MetadataSchema.TabletsSection.getRange());
scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LogColumnFamily.NAME);
- MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
TreeSet<String> volumes = new TreeSet<>();
@@ -100,9 +97,6 @@ public class ListVolumesUsed {
.equals(MetadataSchema.TabletsSection.LogColumnFamily.NAME)) {
LogEntry le = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
getLogURIs(volumes, le);
- } else if (MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN
- .hasColumns(entry.getKey())) {
- volumes.add(getTableURI(entry.getValue().toString()));
}
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
index 55cc43b..688aa44 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
@@ -60,7 +60,7 @@ public class MasterMetadataUtil {
private static final Logger log = LoggerFactory.getLogger(MasterMetadataUtil.class);
- public static void addNewTablet(ServerContext context, KeyExtent extent, String path,
+ public static void addNewTablet(ServerContext context, KeyExtent extent, String dirName,
TServerInstance location, Map<FileRef,DataFileValue> datafileSizes,
Map<Long,? extends Collection<FileRef>> bulkLoadedFiles, MetadataTime time, long lastFlushID,
long lastCompactID, ZooLock zooLock) {
@@ -68,7 +68,7 @@ public class MasterMetadataUtil {
TabletMutator tablet = context.getAmple().mutateTablet(extent);
tablet.putPrevEndRow(extent.getPrevEndRow());
tablet.putZooLock(zooLock);
- tablet.putDir(path);
+ tablet.putDirName(dirName);
tablet.putTime(time);
if (lastFlushID > 0)
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index 937e686..9641913 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -84,14 +84,11 @@ import org.apache.accumulo.core.util.FastFormat;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.fate.FateTxId;
import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.server.ServerConstants;
import org.apache.accumulo.server.ServerContext;
import org.apache.accumulo.server.fs.FileRef;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.gc.GcVolumeUtil;
import org.apache.accumulo.server.metadata.ServerAmpleImpl;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -195,7 +192,7 @@ public class MetadataTableUtil {
public static void updateTabletDir(KeyExtent extent, String newDir, ServerContext context,
ZooLock zooLock) {
TabletMutator tablet = context.getAmple().mutateTablet(extent);
- tablet.putDir(newDir);
+ tablet.putDirName(newDir);
tablet.putZooLock(zooLock);
tablet.mutate();
}
@@ -204,7 +201,7 @@ public class MetadataTableUtil {
TimeType timeType, ZooLock zooLock) {
TabletMutator tablet = context.getAmple().mutateTablet(extent);
tablet.putPrevEndRow(extent.getPrevEndRow());
- tablet.putDir(path);
+ tablet.putDirName(path);
tablet.putTime(new MetadataTime(0, timeType));
tablet.putZooLock(zooLock);
tablet.mutate();
@@ -213,8 +210,7 @@ public class MetadataTableUtil {
public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove,
List<LogEntry> logsToAdd, List<FileRef> filesToRemove,
- SortedMap<FileRef,DataFileValue> filesToAdd, String newDir, ZooLock zooLock,
- ServerContext context) {
+ SortedMap<FileRef,DataFileValue> filesToAdd, ZooLock zooLock, ServerContext context) {
TabletMutator tabletMutator = context.getAmple().mutateTablet(extent);
logsToRemove.forEach(tabletMutator::deleteWal);
@@ -223,9 +219,6 @@ public class MetadataTableUtil {
filesToRemove.forEach(tabletMutator::deleteFile);
filesToAdd.forEach(tabletMutator::putFile);
- if (newDir != null)
- tabletMutator.putDir(newDir);
-
tabletMutator.putZooLock(zooLock);
tabletMutator.mutate();
@@ -379,8 +372,9 @@ public class MetadataTableUtil {
}
if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
- bw.addMutation(
- ServerAmpleImpl.createDeleteMutation(context, tableId, cell.getValue().toString()));
+ String uri =
+ GcVolumeUtil.getDeleteTabletOnAllVolumesUri(tableId, cell.getValue().toString());
+ bw.addMutation(ServerAmpleImpl.createDeleteMutation(context, tableId, uri));
}
}
@@ -603,8 +597,8 @@ public class MetadataTableUtil {
return rewrites;
}
- public static void cloneTable(ServerContext context, TableId srcTableId, TableId tableId,
- VolumeManager volumeManager) throws Exception {
+ public static void cloneTable(ServerContext context, TableId srcTableId, TableId tableId)
+ throws Exception {
try (BatchWriter bw = context.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig())) {
@@ -651,12 +645,9 @@ public class MetadataTableUtil {
Key k = entry.getKey();
Mutation m = new Mutation(k.getRow());
m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
- VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(tableId,
- new KeyExtent(k.getRow(), (Text) null).getEndRow(), context);
- String dir = volumeManager.choose(chooserEnv, ServerConstants.getBaseUris(context))
- + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + new String(
- FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES));
- TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes(UTF_8)));
+ byte[] dirName =
+ FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES);
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dirName));
bw.addMutation(m);
}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
deleted file mode 100644
index 2e4889b..0000000
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.util;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
-
-import java.io.IOException;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.dataImpl.KeyExtent;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.SimpleThreadPool;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.cli.ServerUtilOpts;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.htrace.TraceScope;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-public class RandomizeVolumes {
- private static final Logger log = LoggerFactory.getLogger(RandomizeVolumes.class);
-
- static class RandomizeOpts extends ServerUtilOpts {
- @Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
- String tableName;
- }
-
- public static void main(String[] args) {
- RandomizeOpts opts = new RandomizeOpts();
- try (TraceScope clientSpan = opts.parseArgsAndTrace(RandomizeVolumes.class.getName(), args)) {
- ServerContext context = opts.getServerContext();
- try {
- int status = randomize(context, opts.tableName);
- System.exit(status);
- } catch (Exception ex) {
- log.error("{}", ex.getMessage(), ex);
- System.exit(4);
- }
- }
- }
-
- public static int randomize(ServerContext context, String tableName)
- throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- final VolumeManager vm = context.getVolumeManager();
- if (vm.getVolumes().size() < 2) {
- log.error("There are not enough volumes configured");
- return 1;
- }
- String tblStr = context.tableOperations().tableIdMap().get(tableName);
- if (tblStr == null) {
- log.error("Could not determine the table ID for table {}", tableName);
- return 2;
- }
- TableId tableId = TableId.of(tblStr);
- TableState tableState = context.getTableManager().getTableState(tableId);
- if (tableState != TableState.OFFLINE) {
- log.info("Taking {} offline", tableName);
- context.tableOperations().offline(tableName, true);
- log.info("{} offline", tableName);
- }
- SimpleThreadPool pool = new SimpleThreadPool(50, "directory maker");
- log.info("Rewriting entries for {}", tableName);
- Scanner scanner = context.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- DIRECTORY_COLUMN.fetch(scanner);
- scanner.setRange(TabletsSection.getRange(tableId));
- BatchWriter writer = context.createBatchWriter(MetadataTable.NAME, null);
- int count = 0;
- for (Entry<Key,Value> entry : scanner) {
- String oldLocation = entry.getValue().toString();
- String directory;
- if (oldLocation.contains(":")) {
- String[] parts = oldLocation.split(Path.SEPARATOR);
- TableId tableIdEntry = TableId.of(parts[parts.length - 2]);
- if (!tableIdEntry.equals(tableId)) {
- log.error("Unexpected table id found: {}, expected {}; skipping", tableIdEntry, tableId);
- continue;
- }
- directory = parts[parts.length - 1];
- } else {
- directory = oldLocation.substring(Path.SEPARATOR.length());
- }
- Key key = entry.getKey();
- Mutation m = new Mutation(key.getRow());
-
- VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(tableId,
- new KeyExtent(key.getRow(), (Text) null).getEndRow(), context);
- final String newLocation =
- vm.choose(chooserEnv, ServerConstants.getBaseUris(context)) + Path.SEPARATOR
- + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + directory;
- m.put(key.getColumnFamily(), key.getColumnQualifier(),
- new Value(newLocation.getBytes(UTF_8)));
- if (log.isTraceEnabled()) {
- log.trace("Replacing {} with {}", oldLocation, newLocation);
- }
- writer.addMutation(m);
- pool.submit(() -> {
- try {
- vm.mkdirs(new Path(newLocation));
- } catch (IOException ex) {
- // nevermind
- }
- });
- count++;
- }
- writer.close();
- pool.shutdown();
- while (!pool.isTerminated()) {
- log.trace("Waiting for mkdir() calls to finish");
- try {
- pool.awaitTermination(5, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- break;
- }
- }
- log.info("Updated {} entries for table {}", count, tableName);
- if (tableState != TableState.OFFLINE) {
- context.tableOperations().online(tableName, true);
- log.info("table {} back online", tableName);
- }
- return 0;
- }
-
-}
diff --git a/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java b/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
index dbe2937..bf24f16 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
@@ -17,21 +17,14 @@
package org.apache.accumulo.server.fs;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import java.io.File;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.server.fs.VolumeManager.FileType;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Rule;
import org.junit.Test;
@@ -165,70 +158,6 @@ public class VolumeUtilTest {
}
@Test
- public void testSame() throws Exception {
- FileSystem fs = FileSystem.getLocal(new Configuration());
-
- Path subdir1 = new Path(tempFolder.newFolder().toURI());
- Path subdir2 = new Path(tempFolder.newFolder().toURI());
- Path subdir3 = new Path(tempFolder.newFolder().toURI());
-
- assertFalse(VolumeUtil.same(fs, subdir1, fs,
- new Path(tempFolder.getRoot().toURI().toString(), "8854339269459287524098238497")));
- assertFalse(VolumeUtil.same(fs,
- new Path(tempFolder.getRoot().toURI().toString(), "8854339269459287524098238497"), fs,
- subdir1));
- assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir1));
-
- writeFile(fs, subdir1, "abc", "foo");
- writeFile(fs, subdir2, "abc", "bar");
- writeFile(fs, subdir3, "abc", "foo");
-
- assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir1));
- assertFalse(VolumeUtil.same(fs, subdir1, fs, subdir2));
- assertFalse(VolumeUtil.same(fs, subdir2, fs, subdir1));
- assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir3));
- assertTrue(VolumeUtil.same(fs, subdir3, fs, subdir1));
-
- writeFile(fs, subdir1, "def", "123456");
- writeFile(fs, subdir2, "def", "123456");
- writeFile(fs, subdir3, "def", "123456");
-
- assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir1));
- assertFalse(VolumeUtil.same(fs, subdir1, fs, subdir2));
- assertFalse(VolumeUtil.same(fs, subdir2, fs, subdir1));
- assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir3));
- assertTrue(VolumeUtil.same(fs, subdir3, fs, subdir1));
-
- writeFile(fs, subdir3, "ghi", "09876");
-
- assertFalse(VolumeUtil.same(fs, subdir1, fs, subdir3));
- assertFalse(VolumeUtil.same(fs, subdir3, fs, subdir1));
-
- fs.mkdirs(new Path(subdir2, "dir1"));
-
- try {
- VolumeUtil.same(fs, subdir1, fs, subdir2);
- fail();
- } catch (IllegalArgumentException e) {}
-
- try {
- VolumeUtil.same(fs, subdir2, fs, subdir1);
- fail();
- } catch (IllegalArgumentException e) {}
-
- try {
- VolumeUtil.same(fs, subdir1, fs, new Path(subdir2, "def"));
- fail();
- } catch (IllegalArgumentException e) {}
-
- try {
- VolumeUtil.same(fs, new Path(subdir2, "def"), fs, subdir3);
- fail();
- } catch (IllegalArgumentException e) {}
-
- }
-
- @Test
public void testRootTableReplacement() {
List<Pair<Path,Path>> replacements = new ArrayList<>();
replacements.add(new Pair<>(new Path("file:/foo/v1"), new Path("file:/foo/v8")));
@@ -239,10 +168,4 @@ public class VolumeUtilTest {
assertEquals("file:/foo/v8/tables/+r/root_tablet",
VolumeUtil.switchVolume("file:/foo/v1/tables/+r/root_tablet", ft, replacements));
}
-
- private void writeFile(FileSystem fs, Path dir, String filename, String data) throws IOException {
- try (FSDataOutputStream out = fs.create(new Path(dir, filename))) {
- out.writeUTF(data);
- }
- }
}
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
index 83e7ce3..3acb91e 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
@@ -31,6 +31,7 @@ import java.util.TreeMap;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import org.apache.accumulo.gc.GarbageCollectionEnvironment.Reference;
import org.apache.accumulo.server.ServerConstants;
import org.apache.accumulo.server.replication.StatusUtil;
@@ -187,12 +188,10 @@ public class GarbageCollectionAlgorithm {
} else {
String tableID = ref.id.toString();
- String dir = ref.ref;
- if (!dir.contains(":")) {
- if (!dir.startsWith("/"))
- throw new RuntimeException("Bad directory " + dir);
- dir = "/" + tableID + dir;
- }
+ String dirName = ref.ref;
+ ServerColumnFamily.validateDirCol(dirName);
+
+ String dir = "/" + tableID + "/" + dirName;
dir = makeRelative(dir, 2);
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
index 16d4a78..de07a02 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
@@ -24,7 +24,9 @@ import static org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
@@ -68,6 +70,7 @@ import org.apache.accumulo.core.util.NamingThreadFactory;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.core.util.ServerServices;
import org.apache.accumulo.core.util.ServerServices.Service;
+import org.apache.accumulo.core.volume.Volume;
import org.apache.accumulo.fate.zookeeper.ZooLock;
import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
@@ -80,6 +83,7 @@ import org.apache.accumulo.server.ServerOpts;
import org.apache.accumulo.server.fs.VolumeManager;
import org.apache.accumulo.server.fs.VolumeManager.FileType;
import org.apache.accumulo.server.fs.VolumeUtil;
+import org.apache.accumulo.server.gc.GcVolumeUtil;
import org.apache.accumulo.server.master.LiveTServerSet;
import org.apache.accumulo.server.replication.proto.Replication.Status;
import org.apache.accumulo.server.rpc.ServerAddress;
@@ -96,6 +100,7 @@ import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import com.google.protobuf.InvalidProtocolBufferException;
@@ -251,8 +256,9 @@ public class SimpleGarbageCollector extends AbstractServer implements Iface {
Stream<Reference> refStream = tabletStream.flatMap(tm -> {
Stream<Reference> refs = Stream.concat(tm.getFiles().stream(), tm.getScans().stream())
.map(f -> new Reference(tm.getTableId(), f, false));
- if (tm.getDir() != null) {
- refs = Stream.concat(refs, Stream.of(new Reference(tm.getTableId(), tm.getDir(), true)));
+ if (tm.getDirName() != null) {
+ refs =
+ Stream.concat(refs, Stream.of(new Reference(tm.getTableId(), tm.getDirName(), true)));
}
return refs;
});
@@ -283,31 +289,9 @@ public class SimpleGarbageCollector extends AbstractServer implements Iface {
return;
}
- // when deleting a dir and all files in that dir, only need to delete the dir
- // the dir will sort right before the files... so remove the files in this case
- // to minimize namenode ops
- Iterator<Entry<String,String>> cdIter = confirmedDeletes.entrySet().iterator();
-
List<String> processedDeletes = Collections.synchronizedList(new ArrayList<String>());
- String lastDir = null;
- while (cdIter.hasNext()) {
- Entry<String,String> entry = cdIter.next();
- String relPath = entry.getKey();
- String absPath = fs.getFullPath(FileType.TABLE, entry.getValue()).toString();
-
- if (isDir(relPath)) {
- lastDir = absPath;
- } else if (lastDir != null) {
- if (absPath.startsWith(lastDir)) {
- log.debug("Ignoring {} because {} exist", entry.getValue(), lastDir);
- processedDeletes.add(entry.getValue());
- cdIter.remove();
- } else {
- lastDir = null;
- }
- }
- }
+ minimizeDeletes(confirmedDeletes, processedDeletes, fs);
ExecutorService deleteThreadPool =
Executors.newFixedThreadPool(getNumDeleteThreads(), new NamingThreadFactory("deleting"));
@@ -318,7 +302,7 @@ public class SimpleGarbageCollector extends AbstractServer implements Iface {
for (final String delete : confirmedDeletes.values()) {
Runnable deleteTask = () -> {
- boolean removeFlag;
+ boolean removeFlag = false;
try {
Path fullPath;
@@ -338,41 +322,44 @@ public class SimpleGarbageCollector extends AbstractServer implements Iface {
fullPath = fs.getFullPath(FileType.TABLE, delete);
}
- log.debug("Deleting {}", fullPath);
+ for (Path pathToDel : GcVolumeUtil.expandAllVolumesUri(fs, fullPath)) {
+ log.debug("Deleting {}", pathToDel);
- if (moveToTrash(fullPath) || fs.deleteRecursively(fullPath)) {
- // delete succeeded, still want to delete
- removeFlag = true;
- synchronized (SimpleGarbageCollector.this) {
- ++status.current.deleted;
- }
- } else if (fs.exists(fullPath)) {
- // leave the entry in the metadata; we'll try again later
- removeFlag = false;
- synchronized (SimpleGarbageCollector.this) {
- ++status.current.errors;
- }
- log.warn("File exists, but was not deleted for an unknown reason: {}", fullPath);
- } else {
- // this failure, we still want to remove the metadata entry
- removeFlag = true;
- synchronized (SimpleGarbageCollector.this) {
- ++status.current.errors;
- }
- String[] parts = fullPath.toString().split(Constants.ZTABLES)[1].split("/");
- if (parts.length > 2) {
- TableId tableId = TableId.of(parts[1]);
- String tabletDir = parts[2];
- getContext().getTableManager().updateTableStateCache(tableId);
- TableState tableState = getContext().getTableManager().getTableState(tableId);
- if (tableState != null && tableState != TableState.DELETING) {
- // clone directories don't always exist
- if (!tabletDir.startsWith(Constants.CLONE_PREFIX)) {
- log.debug("File doesn't exist: {}", fullPath);
- }
+ if (moveToTrash(pathToDel) || fs.deleteRecursively(pathToDel)) {
+ // delete succeeded, still want to delete
+ removeFlag = true;
+ synchronized (SimpleGarbageCollector.this) {
+ ++status.current.deleted;
}
+ } else if (fs.exists(pathToDel)) {
+ // leave the entry in the metadata; we'll try again later
+ removeFlag = false;
+ synchronized (SimpleGarbageCollector.this) {
+ ++status.current.errors;
+ }
+ log.warn("File exists, but was not deleted for an unknown reason: {}", pathToDel);
+ break;
} else {
- log.warn("Very strange path name: {}", delete);
+ // this failure, we still want to remove the metadata entry
+ removeFlag = true;
+ synchronized (SimpleGarbageCollector.this) {
+ ++status.current.errors;
+ }
+ String[] parts = pathToDel.toString().split(Constants.ZTABLES)[1].split("/");
+ if (parts.length > 2) {
+ TableId tableId = TableId.of(parts[1]);
+ String tabletDir = parts[2];
+ getContext().getTableManager().updateTableStateCache(tableId);
+ TableState tableState = getContext().getTableManager().getTableState(tableId);
+ if (tableState != null && tableState != TableState.DELETING) {
+ // clone directories don't always exist
+ if (!tabletDir.startsWith(Constants.CLONE_PREFIX)) {
+ log.debug("File doesn't exist: {}", pathToDel);
+ }
+ }
+ } else {
+ log.warn("Very strange path name: {}", delete);
+ }
}
}
@@ -700,6 +687,7 @@ public class SimpleGarbageCollector extends AbstractServer implements Iface {
if (delete == null) {
return false;
}
+
int slashCount = 0;
for (int i = 0; i < delete.length(); i++) {
if (delete.charAt(i) == '/') {
@@ -709,6 +697,61 @@ public class SimpleGarbageCollector extends AbstractServer implements Iface {
return slashCount == 1;
}
+ @VisibleForTesting
+ static void minimizeDeletes(SortedMap<String,String> confirmedDeletes,
+ List<String> processedDeletes, VolumeManager fs) {
+ Set<Path> seenVolumes = new HashSet<Path>();
+ Collection<Volume> volumes = fs.getVolumes();
+
+ // when deleting a dir and all files in that dir, only need to delete the dir
+ // the dir will sort right before the files... so remove the files in this case
+ // to minimize namenode ops
+ Iterator<Entry<String,String>> cdIter = confirmedDeletes.entrySet().iterator();
+
+ String lastDirRel = null;
+ Path lastDirAbs = null;
+ while (cdIter.hasNext()) {
+ Entry<String,String> entry = cdIter.next();
+ String relPath = entry.getKey();
+ Path absPath = fs.getFullPath(FileType.TABLE, entry.getValue());
+
+ if (isDir(relPath)) {
+ lastDirRel = relPath;
+ lastDirAbs = absPath;
+ } else if (lastDirRel != null) {
+ if (relPath.startsWith(lastDirRel)) {
+ Path vol = FileType.TABLE.getVolume(absPath);
+
+ boolean sameVol = false;
+
+ if (GcVolumeUtil.isAllVolumesUri(lastDirAbs)) {
+ if (seenVolumes.contains(vol)) {
+ sameVol = true;
+ } else {
+ for (Volume cvol : volumes) {
+ if (cvol.isValidPath(vol)) {
+ seenVolumes.add(vol);
+ sameVol = true;
+ }
+ }
+ }
+ } else {
+ sameVol = FileType.TABLE.getVolume(lastDirAbs).equals(vol);
+ }
+
+ if (sameVol) {
+ log.info("Ignoring {} because {} exist", entry.getValue(), lastDirAbs);
+ processedDeletes.add(entry.getValue());
+ cdIter.remove();
+ }
+ } else {
+ lastDirRel = null;
+ lastDirAbs = null;
+ }
+ }
+ }
+ }
+
@Override
public GCStatus getStatus(TInfo info, TCredentials credentials) {
return status;
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
index bb39719..0b60ec6 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
@@ -275,10 +275,10 @@ public class GarbageCollectionTest {
gce.candidates.add("/c/t-0");
gce.candidates.add("hdfs://foo:6000/accumulo/tables/d/t-0");
- gce.addDirReference("4", null, "/t-0");
- gce.addDirReference("5", null, "/t-0");
- gce.addDirReference("6", null, "hdfs://foo.com:6000/accumulo/tables/6/t-0");
- gce.addDirReference("7", null, "hdfs://foo.com:6000/accumulo/tables/7/t-0");
+ gce.addDirReference("4", null, "t-0");
+ gce.addDirReference("5", null, "t-0");
+ gce.addDirReference("6", null, "t-0");
+ gce.addDirReference("7", null, "t-0");
gce.addFileReference("8", "m", "/t-0/F00.rf");
gce.addFileReference("9", "m", "/t-0/F00.rf");
@@ -339,10 +339,10 @@ public class GarbageCollectionTest {
gce.candidates.add("/c/t-0");
gce.candidates.add("hdfs://foo:6000/user/foo/tables/d/t-0");
- gce.addDirReference("4", null, "/t-0");
- gce.addDirReference("5", null, "/t-0");
- gce.addDirReference("6", null, "hdfs://foo.com:6000/user/foo/tables/6/t-0");
- gce.addDirReference("7", null, "hdfs://foo.com:6000/user/foo/tables/7/t-0");
+ gce.addDirReference("4", null, "t-0");
+ gce.addDirReference("5", null, "t-0");
+ gce.addDirReference("6", null, "t-0");
+ gce.addDirReference("7", null, "t-0");
gce.addFileReference("8", "m", "/t-0/F00.rf");
gce.addFileReference("9", "m", "/t-0/F00.rf");
@@ -464,7 +464,7 @@ public class GarbageCollectionTest {
TestGCE gce = new TestGCE();
gce.candidates.add("/1636/default_tablet");
- gce.addDirReference("1636", null, "/default_tablet");
+ gce.addDirReference("1636", null, "default_tablet");
gca.collect(gce);
assertRemoved(gce);
@@ -481,7 +481,7 @@ public class GarbageCollectionTest {
// have an indirect file reference
gce = new TestGCE();
gce.addFileReference("1636", null, "../9/default_tablet/someFile");
- gce.addDirReference("1636", null, "/default_tablet");
+ gce.addDirReference("1636", null, "default_tablet");
gce.candidates.add("/9/default_tablet/someFile");
gca.collect(gce);
assertRemoved(gce);
@@ -520,7 +520,7 @@ public class GarbageCollectionTest {
gce.candidates.add("/6/t-0");
gce.candidates.add("hdfs://foo:6000/accumulo/tables/7/t-0/");
- gce.addDirReference("7", null, "hdfs://foo.com:6000/accumulo/tables/7/t-0");
+ gce.addDirReference("7", null, "t-0");
gca.collect(gce);
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
index a9f32b6..e1b856b 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
@@ -17,9 +17,12 @@
package org.apache.accumulo.gc;
import static org.apache.accumulo.gc.SimpleGarbageCollector.CANDIDATE_MEMORY_PERCENTAGE;
+import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.eq;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.getCurrentArguments;
import static org.easymock.EasyMock.partialMockBuilder;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
@@ -29,15 +32,24 @@ import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.io.FileNotFoundException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
+import java.util.TreeMap;
import org.apache.accumulo.core.clientImpl.Credentials;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.conf.SiteConfiguration;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.volume.Volume;
import org.apache.accumulo.server.ServerContext;
import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManager.FileType;
+import org.apache.accumulo.server.gc.GcVolumeUtil;
import org.apache.accumulo.server.security.SystemCredentials;
import org.apache.hadoop.fs.Path;
import org.junit.Before;
@@ -147,10 +159,67 @@ public class SimpleGarbageCollectorTest {
@Test
public void testIsDir() {
+ assertTrue(SimpleGarbageCollector.isDir("tid1/dir1"));
assertTrue(SimpleGarbageCollector.isDir("/dir1"));
assertFalse(SimpleGarbageCollector.isDir("file1"));
assertFalse(SimpleGarbageCollector.isDir("/dir1/file1"));
assertFalse(SimpleGarbageCollector.isDir(""));
assertFalse(SimpleGarbageCollector.isDir(null));
}
+
+ @Test
+ public void testMinimizeDeletes() {
+ Volume vol1 = createMock(Volume.class);
+ expect(vol1.isValidPath(anyObject()))
+ .andAnswer(() -> getCurrentArguments()[0].toString().startsWith("hdfs://nn1/accumulo"))
+ .anyTimes();
+
+ Volume vol2 = createMock(Volume.class);
+ expect(vol2.isValidPath(anyObject()))
+ .andAnswer(() -> getCurrentArguments()[0].toString().startsWith("hdfs://nn2/accumulo"))
+ .anyTimes();
+
+ Collection<Volume> vols = Arrays.asList(vol1, vol2);
+
+ VolumeManager volMgr2 = createMock(VolumeManager.class);
+ expect(volMgr2.getVolumes()).andReturn(vols).anyTimes();
+ expect(volMgr2.getFullPath(eq(FileType.TABLE), anyObject()))
+ .andAnswer(() -> new Path(getCurrentArguments()[1].toString())).anyTimes();
+
+ replay(vol1, vol2, volMgr2);
+
+ TreeMap<String,String> confirmed = new TreeMap<>();
+ confirmed.put("5a/t-0001", "hdfs://nn1/accumulo/tables/5a/t-0001");
+ confirmed.put("5a/t-0001/F0001.rf", "hdfs://nn1/accumulo/tables/5a/t-0001/F0001.rf");
+ confirmed.put("5a/t-0001/F0002.rf", "hdfs://nn1/accumulo/tables/5a/t-0001/F0002.rf");
+ confirmed.put("5a/t-0002/F0001.rf", "hdfs://nn1/accumulo/tables/5a/t-0002/F0001.rf");
+ confirmed.put("5b/t-0003",
+ GcVolumeUtil.getDeleteTabletOnAllVolumesUri(TableId.of("5b"), "t-0003"));
+ confirmed.put("5b/t-0003/F0001.rf", "hdfs://nn1/accumulo/tables/5b/t-0003/F0001.rf");
+ confirmed.put("5b/t-0003/F0002.rf", "hdfs://nn2/accumulo/tables/5b/t-0003/F0002.rf");
+ confirmed.put("5b/t-0003/F0003.rf", "hdfs://nn3/accumulo/tables/5b/t-0003/F0003.rf");
+ confirmed.put("5b/t-0004",
+ GcVolumeUtil.getDeleteTabletOnAllVolumesUri(TableId.of("5b"), "t-0004"));
+ confirmed.put("5b/t-0004/F0001.rf", "hdfs://nn1/accumulo/tables/5b/t-0004/F0001.rf");
+
+ List<String> processedDeletes = new ArrayList<>();
+
+ SimpleGarbageCollector.minimizeDeletes(confirmed, processedDeletes, volMgr2);
+
+ TreeMap<String,String> expected = new TreeMap<>();
+ expected.put("5a/t-0001", "hdfs://nn1/accumulo/tables/5a/t-0001");
+ expected.put("5a/t-0002/F0001.rf", "hdfs://nn1/accumulo/tables/5a/t-0002/F0001.rf");
+ expected.put("5b/t-0003",
+ GcVolumeUtil.getDeleteTabletOnAllVolumesUri(TableId.of("5b"), "t-0003"));
+ expected.put("5b/t-0003/F0003.rf", "hdfs://nn3/accumulo/tables/5b/t-0003/F0003.rf");
+ expected.put("5b/t-0004",
+ GcVolumeUtil.getDeleteTabletOnAllVolumesUri(TableId.of("5b"), "t-0004"));
+
+ assertEquals(expected, confirmed);
+ assertEquals(Arrays.asList("hdfs://nn1/accumulo/tables/5a/t-0001/F0001.rf",
+ "hdfs://nn1/accumulo/tables/5a/t-0001/F0002.rf",
+ "hdfs://nn1/accumulo/tables/5b/t-0003/F0001.rf",
+ "hdfs://nn2/accumulo/tables/5b/t-0003/F0002.rf",
+ "hdfs://nn1/accumulo/tables/5b/t-0004/F0001.rf"), processedDeletes);
+ }
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
index 217573a..179b7c6 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
@@ -34,7 +34,6 @@ import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.TimeUnit;
-import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.BatchWriter;
@@ -61,6 +60,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Ch
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.FutureLocationColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import org.apache.accumulo.core.metadata.schema.MetadataTime;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
@@ -69,12 +69,9 @@ import org.apache.accumulo.master.Master.TabletGoalState;
import org.apache.accumulo.master.state.MergeStats;
import org.apache.accumulo.master.state.TableCounts;
import org.apache.accumulo.master.state.TableStats;
-import org.apache.accumulo.server.ServerConstants;
import org.apache.accumulo.server.conf.TableConfiguration;
import org.apache.accumulo.server.fs.FileRef;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
-import org.apache.accumulo.server.fs.VolumeManager.FileType;
+import org.apache.accumulo.server.gc.GcVolumeUtil;
import org.apache.accumulo.server.log.WalStateManager;
import org.apache.accumulo.server.log.WalStateManager.WalMarkerException;
import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
@@ -626,17 +623,9 @@ abstract class TabletGroupWatcher extends Daemon {
throw new IllegalStateException(
"Tablet " + key.getRow() + " is assigned during a merge!");
} else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
- // ACCUMULO-2974 Need to include the TableID when converting a relative path to an
- // absolute path.
- // The value has the leading path separator already included so it doesn't need it
- // included.
- String path = entry.getValue().toString();
- if (path.contains(":")) {
- datafiles.add(new FileRef(path));
- } else {
- datafiles.add(new FileRef(path, this.master.fs.getFullPath(FileType.TABLE,
- Path.SEPARATOR + extent.getTableId() + path)));
- }
+ String path = GcVolumeUtil.getDeleteTabletOnAllVolumesUri(extent.getTableId(),
+ entry.getValue().toString());
+ datafiles.add(new FileRef(path));
if (datafiles.size() > 1000) {
MetadataTableUtil.addDeleteEntries(extent, datafiles, master.getContext());
datafiles.clear();
@@ -666,16 +655,10 @@ abstract class TabletGroupWatcher extends Daemon {
}
} else {
// Recreate the default tablet to hold the end of the table
- Master.log.debug("Recreating the last tablet to point to {}", extent.getPrevEndRow());
- VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(extent.getTableId(),
- extent.getEndRow(), master.getContext());
-
- String tdir = master.getFileSystem().choose(chooserEnv,
- ServerConstants.getBaseUris(master.getContext())) + Constants.HDFS_TABLES_DIR
- + Path.SEPARATOR + extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION;
MetadataTableUtil.addTablet(
- new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir,
- master.getContext(), metadataTime.getType(), this.master.masterLock);
+ new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()),
+ ServerColumnFamily.DEFAULT_TABLET_DIR_NAME, master.getContext(), metadataTime.getType(),
+ this.master.masterLock);
}
} catch (RuntimeException | TableNotFoundException ex) {
throw new AccumuloException(ex);
@@ -727,8 +710,10 @@ abstract class TabletGroupWatcher extends Daemon {
maxLogicalTime =
TabletTime.maxMetadataTime(maxLogicalTime, MetadataTime.parse(value.toString()));
} else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
- bw.addMutation(ServerAmpleImpl.createDeleteMutation(master.getContext(),
- range.getTableId(), entry.getValue().toString()));
+ String uri =
+ GcVolumeUtil.getDeleteTabletOnAllVolumesUri(range.getTableId(), value.toString());
+ bw.addMutation(
+ ServerAmpleImpl.createDeleteMutation(master.getContext(), range.getTableId(), uri));
}
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java
index cf11383..210c876 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java
@@ -44,7 +44,6 @@ public class TableInfo implements Serializable {
private String splitDirsFile;
public Map<String,String> props;
- public String defaultTabletDir = null;
public String getTableName() {
return tableName;
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/clone/CloneMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/clone/CloneMetadata.java
index a71f9cf..02e2f77 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/clone/CloneMetadata.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/clone/CloneMetadata.java
@@ -45,8 +45,7 @@ class CloneMetadata extends MasterRepo {
// died before and is executing again
MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment.getContext(),
environment.getMasterLock());
- MetadataTableUtil.cloneTable(environment.getContext(), cloneInfo.srcTableId, cloneInfo.tableId,
- environment.getFileSystem());
+ MetadataTableUtil.cloneTable(environment.getContext(), cloneInfo.srcTableId, cloneInfo.tableId);
return new FinishCloneTable(cloneInfo);
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java
index f9ede43..fcc15c5 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java
@@ -26,9 +26,6 @@ import org.apache.accumulo.master.Master;
import org.apache.accumulo.master.tableOps.MasterRepo;
import org.apache.accumulo.master.tableOps.TableInfo;
import org.apache.accumulo.master.tableOps.Utils;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
import org.apache.accumulo.server.fs.VolumeManager;
import org.apache.accumulo.server.tablets.UniqueNameAllocator;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -52,21 +49,10 @@ class ChooseDir extends MasterRepo {
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
- // Constants.DEFAULT_TABLET_LOCATION has a leading slash prepended to it so we don't need to add
- // one here
-
- VolumeChooserEnvironment chooserEnv =
- new VolumeChooserEnvironmentImpl(tableInfo.getTableId(), null, master.getContext());
-
- String baseDir =
- master.getFileSystem().choose(chooserEnv, ServerConstants.getBaseUris(master.getContext()))
- + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableInfo.getTableId();
- tableInfo.defaultTabletDir = baseDir + Constants.DEFAULT_TABLET_LOCATION;
-
if (tableInfo.getInitialSplitSize() > 0) {
- createTableDirectoriesInfo(master, baseDir);
+ createTableDirectoriesInfo(master);
}
- return new CreateDir(tableInfo);
+ return new PopulateMetadata(tableInfo);
}
@Override
@@ -79,11 +65,10 @@ class ChooseDir extends MasterRepo {
* Create unique table directory names that will be associated with split values. Then write these
* to the file system for later use during this FATE operation.
*/
- private void createTableDirectoriesInfo(Master master, String baseDir) throws IOException {
+ private void createTableDirectoriesInfo(Master master) throws IOException {
SortedSet<Text> splits =
Utils.getSortedSetFromFile(master.getInputStream(tableInfo.getSplitFile()), true);
- SortedSet<Text> tabletDirectoryInfo =
- createTabletDirectoriesSet(master, splits.size(), baseDir);
+ SortedSet<Text> tabletDirectoryInfo = createTabletDirectoriesSet(master, splits.size());
writeTabletDirectoriesToFileSystem(master, tabletDirectoryInfo);
}
@@ -91,13 +76,13 @@ class ChooseDir extends MasterRepo {
* Create a set of unique table directories. These will be associated with splits in a follow-on
* FATE step.
*/
- private SortedSet<Text> createTabletDirectoriesSet(Master master, int num, String baseDir) {
+ private SortedSet<Text> createTabletDirectoriesSet(Master master, int num) {
String tabletDir;
UniqueNameAllocator namer = master.getContext().getUniqueNameAllocator();
SortedSet<Text> splitDirs = new TreeSet<>();
for (int i = 0; i < num; i++) {
- tabletDir = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
- splitDirs.add(new Text(baseDir + "/" + new Path(tabletDir).getName()));
+ tabletDir = Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
+ splitDirs.add(new Text(tabletDir));
}
return splitDirs;
}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/CreateDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/CreateDir.java
deleted file mode 100644
index 8ef9747..0000000
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/CreateDir.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.master.tableOps.create;
-
-import java.io.IOException;
-import java.util.SortedSet;
-
-import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.master.Master;
-import org.apache.accumulo.master.tableOps.MasterRepo;
-import org.apache.accumulo.master.tableOps.TableInfo;
-import org.apache.accumulo.master.tableOps.Utils;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-
-class CreateDir extends MasterRepo {
- private static final long serialVersionUID = 1L;
-
- private final TableInfo tableInfo;
-
- CreateDir(TableInfo ti) {
- this.tableInfo = ti;
- }
-
- @Override
- public long isReady(long tid, Master environment) {
- return 0;
- }
-
- @Override
- public Repo<Master> call(long tid, Master master) throws Exception {
- VolumeManager fs = master.getFileSystem();
- fs.mkdirs(new Path(tableInfo.defaultTabletDir));
-
- // read in the splitDir info file and create a directory for each item
- if (tableInfo.getInitialSplitSize() > 0) {
- SortedSet<Text> dirInfo =
- Utils.getSortedSetFromFile(master.getInputStream(tableInfo.getSplitDirsFile()), false);
- createTabletDirectories(master.getFileSystem(), dirInfo);
- }
- return new PopulateMetadata(tableInfo);
- }
-
- @Override
- public void undo(long tid, Master master) throws Exception {
- VolumeManager fs = master.getFileSystem();
- fs.deleteRecursively(new Path(tableInfo.defaultTabletDir));
-
- if (tableInfo.getInitialSplitSize() > 0) {
- SortedSet<Text> dirInfo =
- Utils.getSortedSetFromFile(master.getInputStream(tableInfo.getSplitDirsFile()), false);
- for (Text dirname : dirInfo) {
- fs.deleteRecursively(new Path(dirname.toString()));
- }
- }
- }
-
- private void createTabletDirectories(VolumeManager fs, SortedSet<Text> dirInfo)
- throws IOException {
-
- for (Text dir : dirInfo) {
- if (!fs.mkdirs(new Path(dir.toString())))
- throw new IOException("Failed to create tablet directory: " + dir);
- }
- }
-}
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/PopulateMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/PopulateMetadata.java
index a5ff669..043a310 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/PopulateMetadata.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/create/PopulateMetadata.java
@@ -30,6 +30,7 @@ import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import org.apache.accumulo.core.metadata.schema.MetadataTime;
import org.apache.accumulo.fate.Repo;
import org.apache.accumulo.fate.zookeeper.ZooLock;
@@ -62,8 +63,8 @@ class PopulateMetadata extends MasterRepo {
@Override
public Repo<Master> call(long tid, Master environment) throws Exception {
KeyExtent extent = new KeyExtent(tableInfo.getTableId(), null, null);
- MetadataTableUtil.addTablet(extent, tableInfo.defaultTabletDir, environment.getContext(),
- tableInfo.getTimeType(), environment.getMasterLock());
+ MetadataTableUtil.addTablet(extent, ServerColumnFamily.DEFAULT_TABLET_DIR_NAME,
+ environment.getContext(), tableInfo.getTimeType(), environment.getMasterLock());
if (tableInfo.getInitialSplitSize() > 0) {
SortedSet<Text> splits =
@@ -86,8 +87,8 @@ class PopulateMetadata extends MasterRepo {
Value dirValue;
for (Text split : Iterables.concat(splits, Collections.singleton(null))) {
Mutation mut = new KeyExtent(tableId, split, prevSplit).getPrevRowUpdateMutation();
- dirValue =
- (split == null) ? new Value(tableInfo.defaultTabletDir) : new Value(data.get(split));
+ dirValue = (split == null) ? new Value(ServerColumnFamily.DEFAULT_TABLET_DIR_NAME)
+ : new Value(data.get(split));
MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, dirValue);
MetadataSchema.TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut,
new Value(new MetadataTime(0, timeType).encode()));
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/PopulateMetadataTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/PopulateMetadataTable.java
index 3f4767c..859960e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/PopulateMetadataTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/PopulateMetadataTable.java
@@ -45,9 +45,6 @@ import org.apache.accumulo.core.util.FastFormat;
import org.apache.accumulo.fate.Repo;
import org.apache.accumulo.master.Master;
import org.apache.accumulo.master.tableOps.MasterRepo;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
import org.apache.accumulo.server.fs.VolumeManager;
import org.apache.accumulo.server.util.MetadataTableUtil;
import org.apache.hadoop.fs.Path;
@@ -107,8 +104,6 @@ class PopulateMetadataTable extends MasterRepo {
// hdfs://localhost:8020/path/to/accumulo/tables/...
final String bulkDir = tableInfo.importDir;
- final String[] volumes = ServerConstants.getBaseUris(master.getContext());
-
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
@@ -152,12 +147,8 @@ class PopulateMetadataTable extends MasterRepo {
FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES),
UTF_8);
- // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
- String absolutePath = getClonedTabletDir(master, endRow, volumes, tabletDir);
-
m = new Mutation(metadataRow);
- TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m,
- new Value(absolutePath.getBytes(UTF_8)));
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(tabletDir));
currentRow = metadataRow;
}
@@ -170,12 +161,8 @@ class PopulateMetadataTable extends MasterRepo {
FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES),
UTF_8);
- // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
- String absolutePath = getClonedTabletDir(master, endRow, volumes, tabletDir);
-
m = new Mutation(metadataRow);
- TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m,
- new Value(absolutePath.getBytes(UTF_8)));
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(tabletDir));
}
m.put(key.getColumnFamily(), cq, val);
@@ -212,23 +199,6 @@ class PopulateMetadataTable extends MasterRepo {
}
}
- /**
- * Given options for tables (across multiple volumes), construct an absolute path using the unique
- * name within the chosen volume
- *
- * @return An absolute, unique path for the imported table
- */
- protected String getClonedTabletDir(Master master, Text endRow, String[] volumes,
- String tabletDir) {
- // We can try to spread out the tablet dirs across all volumes
- VolumeChooserEnvironment chooserEnv =
- new VolumeChooserEnvironmentImpl(tableInfo.tableId, endRow, master.getContext());
- String volume = master.getFileSystem().choose(chooserEnv, volumes);
-
- // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
- return volume + "/" + ServerConstants.TABLE_DIR + "/" + tableInfo.tableId + "/" + tabletDir;
- }
-
@Override
public void undo(long tid, Master environment) throws Exception {
MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment.getContext(),
diff --git a/server/master/src/main/java/org/apache/accumulo/master/upgrade/Upgrader9to10.java b/server/master/src/main/java/org/apache/accumulo/master/upgrade/Upgrader9to10.java
index c349f8d..99d0ebe 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/upgrade/Upgrader9to10.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/upgrade/Upgrader9to10.java
@@ -20,6 +20,7 @@ package org.apache.accumulo.master.upgrade;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.core.metadata.RootTable.ZROOT_TABLET;
import static org.apache.accumulo.core.metadata.RootTable.ZROOT_TABLET_GC_CANDIDATES;
+import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
import static org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly;
import static org.apache.accumulo.server.util.MetadataTableUtil.EMPTY_TEXT;
@@ -32,19 +33,23 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import java.util.stream.StreamSupport;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.FileOperations;
import org.apache.accumulo.core.file.FileSKVIterator;
@@ -65,6 +70,7 @@ import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
import org.apache.accumulo.server.ServerContext;
import org.apache.accumulo.server.fs.FileRef;
import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.gc.GcVolumeUtil;
import org.apache.accumulo.server.master.state.TServerInstance;
import org.apache.accumulo.server.metadata.RootGcCandidates;
import org.apache.accumulo.server.metadata.ServerAmpleImpl;
@@ -73,9 +79,11 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
@@ -107,15 +115,17 @@ public class Upgrader9to10 implements Upgrader {
@Override
public void upgradeMetadata(ServerContext ctx) {
+ upgradeDirColumns(ctx, Ample.DataLevel.METADATA);
upgradeFileDeletes(ctx, Ample.DataLevel.METADATA);
- upgradeFileDeletes(ctx, Ample.DataLevel.USER);
+ upgradeDirColumns(ctx, Ample.DataLevel.USER);
+ upgradeFileDeletes(ctx, Ample.DataLevel.USER);
}
private void upgradeRootTabletMetadata(ServerContext ctx) {
String rootMetaSer = getFromZK(ctx, ZROOT_TABLET);
- if (rootMetaSer.isEmpty()) {
+ if (rootMetaSer == null || rootMetaSer.isEmpty()) {
String dir = getFromZK(ctx, ZROOT_TABLET_PATH);
List<LogEntry> logs = getRootLogEntries(ctx);
@@ -127,7 +137,7 @@ public class Upgrader9to10 implements Upgrader {
tabletMutator.putPrevEndRow(RootTable.EXTENT.getPrevEndRow());
- tabletMutator.putDir(dir);
+ tabletMutator.putDirName(upgradeDirColumn(dir));
if (last != null)
tabletMutator.putLocation(last, LocationType.LAST);
@@ -264,6 +274,8 @@ public class Upgrader9to10 implements Upgrader {
return null;
return new String(data, StandardCharsets.UTF_8);
+ } catch (NoNodeException e) {
+ return null;
} catch (KeeperException | InterruptedException e) {
throw new RuntimeException(e);
}
@@ -281,6 +293,8 @@ public class Upgrader9to10 implements Upgrader {
MetadataTime computeRootTabletTime(ServerContext context, Collection<String> goodPaths) {
try {
+ context.setupCrypto();
+
long rtime = Long.MIN_VALUE;
for (String good : goodPaths) {
Path path = new Path(good);
@@ -397,7 +411,11 @@ public class Upgrader9to10 implements Upgrader {
for (String olddelete : deletes) {
// create new formatted delete
log.trace("upgrading delete entry for {}", olddelete);
- writer.addMutation(ServerAmpleImpl.createDeleteMutation(ctx, level.tableId(), olddelete));
+
+ String updatedDel = switchToAllVolumes(olddelete);
+
+ writer
+ .addMutation(ServerAmpleImpl.createDeleteMutation(ctx, level.tableId(), updatedDel));
}
writer.flush();
// if nothing thrown then we're good so mark all deleted
@@ -414,6 +432,26 @@ public class Upgrader9to10 implements Upgrader {
}
}
+ @VisibleForTesting
+ static String switchToAllVolumes(String olddelete) {
+ Path relPath = VolumeManager.FileType.TABLE.removeVolume(new Path(olddelete));
+
+ if (relPath == null) {
+ // An old style relative delete marker of the form /<table id>/<tablet dir>[/<file>]
+ relPath = new Path("/" + VolumeManager.FileType.TABLE.getDirectory() + olddelete);
+ Preconditions.checkState(
+ olddelete.startsWith("/") && relPath.depth() == 3 || relPath.depth() == 4,
+ "Unrecongnized relative delete marker {}", olddelete);
+ }
+
+ if (relPath.depth() == 3 && !relPath.getName().startsWith(Constants.BULK_PREFIX)) {
+ return GcVolumeUtil.getDeleteTabletOnAllVolumesUri(TableId.of(relPath.getParent().getName()),
+ relPath.getName());
+ } else {
+ return olddelete;
+ }
+ }
+
private Iterator<String> getOldCandidates(ServerContext ctx, String tableName)
throws TableNotFoundException {
Range range = MetadataSchema.DeletesSection.getRange();
@@ -453,4 +491,25 @@ public class Upgrader9to10 implements Upgrader {
return false;
}
+ public void upgradeDirColumns(ServerContext ctx, Ample.DataLevel level) {
+ String tableName = level.metaTable();
+ AccumuloClient c = ctx;
+
+ try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
+ BatchWriter writer = c.createBatchWriter(tableName, new BatchWriterConfig())) {
+ DIRECTORY_COLUMN.fetch(scanner);
+
+ for (Entry<Key,Value> entry : scanner) {
+ Mutation m = new Mutation(entry.getKey().getRow());
+ DIRECTORY_COLUMN.put(m, new Value(upgradeDirColumn(entry.getValue().toString())));
+ writer.addMutation(m);
+ }
+ } catch (TableNotFoundException | AccumuloException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static String upgradeDirColumn(String dir) {
+ return new Path(dir).getName();
+ }
}
diff --git a/server/master/src/test/java/org/apache/accumulo/master/state/RootTabletStateStoreTest.java b/server/master/src/test/java/org/apache/accumulo/master/state/RootTabletStateStoreTest.java
index 5743676..2feb8c7 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/state/RootTabletStateStoreTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/state/RootTabletStateStoreTest.java
@@ -48,9 +48,8 @@ public class RootTabletStateStoreTest {
private static class TestAmple implements Ample {
- private String json =
- new String(RootTabletMetadata.getInitialJson("/some/dir", "/some/dir/0000.rf"),
- StandardCharsets.UTF_8);
+ private String json = new String(RootTabletMetadata.getInitialJson("dir", "/some/dir/0000.rf"),
+ StandardCharsets.UTF_8);
@Override
public TabletMetadata readTablet(KeyExtent extent, ColumnType... colsToFetch) {
diff --git a/server/master/src/test/java/org/apache/accumulo/master/tableOps/tableImport/ImportTableTest.java b/server/master/src/test/java/org/apache/accumulo/master/tableOps/tableImport/ImportTableTest.java
deleted file mode 100644
index 5bd1d07..0000000
--- a/server/master/src/test/java/org/apache/accumulo/master/tableOps/tableImport/ImportTableTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.master.tableOps.tableImport;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
-import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.easymock.EasyMock;
-import org.junit.Test;
-
-public class ImportTableTest {
-
- @Test
- public void testTabletDir() {
- Master master = EasyMock.createMock(Master.class);
- VolumeManager volumeManager = EasyMock.createMock(VolumeManager.class);
- ImportedTableInfo iti = new ImportedTableInfo();
- iti.tableId = TableId.of("5");
-
- // Different volumes with different paths
- String[] volumes = {"hdfs://nn1:8020/apps/accumulo1", "hdfs://nn2:8020/applications/accumulo"};
- // This needs to be unique WRT the importtable command
- String tabletDir = "/c-00000001";
-
- EasyMock.expect(master.getContext()).andReturn(null);
- EasyMock.expect(master.getFileSystem()).andReturn(volumeManager);
- // Choose the 2nd element
- VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(iti.tableId, null, null);
- EasyMock.expect(volumeManager.choose(EasyMock.eq(chooserEnv), EasyMock.eq(volumes)))
- .andReturn(volumes[1]);
-
- EasyMock.replay(master, volumeManager);
-
- PopulateMetadataTable pmt = new PopulateMetadataTable(iti);
- assertEquals(volumes[1] + "/" + ServerConstants.TABLE_DIR + "/" + iti.tableId + "/" + tabletDir,
- pmt.getClonedTabletDir(master, null, volumes, tabletDir));
-
- EasyMock.verify(master, volumeManager);
- }
-
-}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 0d49dd4..ec8c636 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -2257,8 +2257,8 @@ public class TabletServer extends AbstractServer {
// tell the master
enqueueMasterMessage(new SplitReportMessage(tablet.getExtent(), newTablets[0].getExtent(),
- new Text("/" + newTablets[0].getLocation().getName()), newTablets[1].getExtent(),
- new Text("/" + newTablets[1].getLocation().getName())));
+ new Text("/" + newTablets[0].getDirName()), newTablets[1].getExtent(),
+ new Text("/" + newTablets[1].getDirName())));
statsKeeper.updateTime(Operation.SPLIT, start, false);
long t2 = System.currentTimeMillis();
@@ -2961,7 +2961,7 @@ public class TabletServer extends AbstractServer {
return false;
}
- if (meta.getDir() == null) {
+ if (meta.getDirName() == null) {
throw new AccumuloException(
"Metadata entry does not have directory (" + meta.getExtent() + ")");
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
index 1e89624..2eebd74 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
@@ -39,6 +39,7 @@ import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
@@ -62,7 +63,6 @@ import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
@@ -78,6 +78,7 @@ import org.apache.accumulo.core.master.thrift.BulkImportState;
import org.apache.accumulo.core.master.thrift.TabletLoadState;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import org.apache.accumulo.core.metadata.schema.MetadataTime;
import org.apache.accumulo.core.protobuf.ProtobufUtil;
import org.apache.accumulo.core.replication.ReplicationConfigurationUtil;
@@ -91,6 +92,7 @@ import org.apache.accumulo.core.util.LocalityGroupUtil;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.core.util.ShutdownUtil;
import org.apache.accumulo.core.util.ratelimit.RateLimiter;
+import org.apache.accumulo.core.volume.Volume;
import org.apache.accumulo.server.ServerConstants;
import org.apache.accumulo.server.ServerContext;
import org.apache.accumulo.server.conf.TableConfiguration;
@@ -98,7 +100,6 @@ import org.apache.accumulo.server.fs.FileRef;
import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.fs.VolumeManager.FileType;
import org.apache.accumulo.server.fs.VolumeUtil;
import org.apache.accumulo.server.fs.VolumeUtil.TabletFiles;
import org.apache.accumulo.server.master.state.TServerInstance;
@@ -170,8 +171,7 @@ public class Tablet {
private final TabletResourceManager tabletResources;
private final DatafileManager datafileManager;
private final TableConfiguration tableConfiguration;
- private final String tabletDirectory;
- private final Path location; // absolute path of this tablets dir
+ private final String dirName;
private final TabletMemory tabletMemory;
@@ -180,7 +180,7 @@ public class Tablet {
private long persistedTime;
private TServerInstance lastLocation = null;
- private volatile boolean tableDirChecked = false;
+ private volatile Set<Path> checkedTabletDirs = new ConcurrentSkipListSet<>();
private final AtomicLong dataSourceDeletions = new AtomicLong(0);
@@ -274,32 +274,38 @@ public class Tablet {
public boolean closed = false;
}
+ private String chooseTabletDir() throws IOException {
+ VolumeChooserEnvironment chooserEnv =
+ new VolumeChooserEnvironmentImpl(extent.getTableId(), extent.getEndRow(), context);
+ String dirUri =
+ tabletServer.getFileSystem().choose(chooserEnv, ServerConstants.getBaseUris(context))
+ + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId() + Path.SEPARATOR
+ + dirName;
+ checkTabletDir(new Path(dirUri));
+ return dirUri;
+ }
+
FileRef getNextMapFilename(String prefix) throws IOException {
String extension = FileOperations.getNewFileExtension(tableConfiguration);
- checkTabletDir();
- return new FileRef(
- location + "/" + prefix + context.getUniqueNameAllocator().getNextName() + "." + extension);
+ return new FileRef(chooseTabletDir() + "/" + prefix
+ + context.getUniqueNameAllocator().getNextName() + "." + extension);
}
- private void checkTabletDir() throws IOException {
- if (!tableDirChecked) {
+ private void checkTabletDir(Path path) throws IOException {
+ if (!checkedTabletDirs.contains(path)) {
FileStatus[] files = null;
try {
- files = getTabletServer().getFileSystem().listStatus(location);
+ files = getTabletServer().getFileSystem().listStatus(path);
} catch (FileNotFoundException ex) {
// ignored
}
if (files == null) {
- if (location.getName().startsWith(Constants.CLONE_PREFIX)) {
- log.debug("Tablet {} had no dir, creating {}", extent, location); // its a clone dir...
- } else {
- log.warn("Tablet {} had no dir, creating {}", extent, location);
- }
+ log.debug("Tablet {} had no dir, creating {}", extent, path);
- getTabletServer().getFileSystem().mkdirs(location);
+ getTabletServer().getFileSystem().mkdirs(path);
}
- tableDirChecked = true;
+ checkedTabletDirs.add(path);
}
}
@@ -333,20 +339,12 @@ public class Tablet {
boolean replicationEnabled =
ReplicationConfigurationUtil.isEnabled(extent, this.tableConfiguration);
TabletFiles tabletPaths =
- new TabletFiles(data.getDirectory(), data.getLogEntries(), data.getDataFiles());
+ new TabletFiles(data.getDirectoryName(), data.getLogEntries(), data.getDataFiles());
tabletPaths = VolumeUtil.updateTabletVolumes(tabletServer.getContext(), tabletServer.getLock(),
fs, extent, tabletPaths, replicationEnabled);
- // deal with relative path for the directory
- Path locationPath;
- if (tabletPaths.dir.contains(":")) {
- locationPath = new Path(tabletPaths.dir);
- } else {
- locationPath = tabletServer.getFileSystem().getFullPath(FileType.TABLE,
- extent.getTableId() + tabletPaths.dir);
- }
- this.location = locationPath;
- this.tabletDirectory = tabletPaths.dir;
+ this.dirName = data.getDirectoryName();
+
for (Entry<Long,List<FileRef>> entry : data.getBulkImported().entrySet()) {
this.bulkImported.put(entry.getKey(), new ArrayList<>(entry.getValue()));
}
@@ -487,17 +485,22 @@ public class Tablet {
private void removeOldTemporaryFiles() {
// remove any temporary files created by a previous tablet server
try {
- for (FileStatus tmp : getTabletServer().getFileSystem()
- .globStatus(new Path(location, "*_tmp"))) {
- try {
- log.debug("Removing old temp file {}", tmp.getPath());
- getTabletServer().getFileSystem().delete(tmp.getPath());
- } catch (IOException ex) {
- log.error("Unable to remove old temp file " + tmp.getPath() + ": " + ex);
+ Collection<Volume> volumes = getTabletServer().getFileSystem().getVolumes();
+ for (Volume volume : volumes) {
+ String dirUri = volume.getBasePath() + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + dirName;
+
+ for (FileStatus tmp : getTabletServer().getFileSystem()
+ .globStatus(new Path(dirUri, "*_tmp"))) {
+ try {
+ log.debug("Removing old temp file {}", tmp.getPath());
+ getTabletServer().getFileSystem().delete(tmp.getPath());
+ } catch (IOException ex) {
+ log.error("Unable to remove old temp file " + tmp.getPath() + ": " + ex);
+ }
}
}
} catch (IOException ex) {
- log.error("Error scanning for old temp files in {}", location);
+ log.error("Error scanning for old temp files", ex);
}
}
@@ -1453,15 +1456,6 @@ public class Tablet {
// TODO check lastFlushID and lostCompactID - ACCUMULO-1290
}
- /**
- * Returns a Path object representing the tablet's location on the DFS.
- *
- * @return location
- */
- public Path getLocation() {
- return location;
- }
-
public synchronized void initiateMajorCompaction(MajorCompactionReason reason) {
if (isClosing() || isClosed() || !needsMajorCompaction(reason) || isMajorCompactionRunning()
@@ -1532,7 +1526,7 @@ public class Tablet {
try {
// we should make .25 below configurable
- keys = FileUtil.findMidPoint(context, tabletDirectory, extent.getPrevEndRow(),
+ keys = FileUtil.findMidPoint(context, chooseTabletDir(), extent.getPrevEndRow(),
extent.getEndRow(), FileUtil.toPathStrings(files), .25);
} catch (IOException e) {
log.error("Failed to find midpoint {}", e.getMessage());
@@ -2210,7 +2204,7 @@ public class Tablet {
} else {
Text tsp = new Text(sp);
splitPoint = new SplitRowSpec(
- FileUtil.estimatePercentageLTE(context, tabletDirectory, extent.getPrevEndRow(),
+ FileUtil.estimatePercentageLTE(context, chooseTabletDir(), extent.getPrevEndRow(),
extent.getEndRow(), FileUtil.toPathStrings(getDatafileManager().getFiles()), tsp),
tsp);
}
@@ -2230,8 +2224,7 @@ public class Tablet {
KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
- String lowDirectory = createTabletDirectory(context, getTabletServer().getFileSystem(),
- extent.getTableId(), midRow);
+ String lowDirectoryName = createTabletDirectoryName(context, midRow);
// write new tablet information to MetadataTable
SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<>();
@@ -2249,7 +2242,7 @@ public class Tablet {
MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio,
getTabletServer().getContext(), getTabletServer().getLock());
- MasterMetadataUtil.addNewTablet(getTabletServer().getContext(), low, lowDirectory,
+ MasterMetadataUtil.addNewTablet(getTabletServer().getContext(), low, lowDirectoryName,
getTabletServer().getTabletSession(), lowDatafileSizes, bulkImported, time, lastFlushID,
lastCompactID, getTabletServer().getLock());
MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove,
@@ -2257,9 +2250,9 @@ public class Tablet {
log.debug("TABLET_HIST {} split {} {}", extent, low, high);
- newTablets.put(high, new TabletData(tabletDirectory, highDatafileSizes, time, lastFlushID,
+ newTablets.put(high, new TabletData(dirName, highDatafileSizes, time, lastFlushID,
lastCompactID, lastLocation, bulkImported));
- newTablets.put(low, new TabletData(lowDirectory, lowDatafileSizes, time, lastFlushID,
+ newTablets.put(low, new TabletData(lowDirectoryName, lowDatafileSizes, time, lastFlushID,
lastCompactID, lastLocation, bulkImported));
long t2 = System.currentTimeMillis();
@@ -2806,49 +2799,12 @@ public class Tablet {
return scannedCount;
}
- private static String createTabletDirectory(ServerContext context, VolumeManager fs,
- TableId tableId, Text endRow) {
- String lowDirectory;
-
- UniqueNameAllocator namer = context.getUniqueNameAllocator();
- VolumeChooserEnvironment chooserEnv =
- new VolumeChooserEnvironmentImpl(tableId, endRow, context);
- String volume = fs.choose(chooserEnv, ServerConstants.getBaseUris(context))
- + Constants.HDFS_TABLES_DIR + Path.SEPARATOR;
-
- while (true) {
- try {
- if (endRow == null) {
- lowDirectory = Constants.DEFAULT_TABLET_LOCATION;
- Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
- if (fs.exists(lowDirectoryPath) || fs.mkdirs(lowDirectoryPath)) {
- FileSystem pathFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
- return lowDirectoryPath.makeQualified(pathFs.getUri(), pathFs.getWorkingDirectory())
- .toString();
- }
- log.warn("Failed to create {} for unknown reason", lowDirectoryPath);
- } else {
- lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
- Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
- if (fs.exists(lowDirectoryPath)) {
- throw new IllegalStateException("Attempting to create tablet dir for tableID " + tableId
- + " and dir exists when it should not: " + lowDirectoryPath);
- }
- if (fs.mkdirs(lowDirectoryPath)) {
- FileSystem lowDirectoryFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
- return lowDirectoryPath
- .makeQualified(lowDirectoryFs.getUri(), lowDirectoryFs.getWorkingDirectory())
- .toString();
- }
- }
- } catch (IOException e) {
- log.warn("{}", e.getMessage(), e);
- }
-
- log.warn("Failed to create dir for tablet in table {} in volume {} will retry ...", tableId,
- volume);
- sleepUninterruptibly(3, TimeUnit.SECONDS);
-
+ private static String createTabletDirectoryName(ServerContext context, Text endRow) {
+ if (endRow == null) {
+ return ServerColumnFamily.DEFAULT_TABLET_DIR_NAME;
+ } else {
+ UniqueNameAllocator namer = context.getUniqueNameAllocator();
+ return Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
}
}
@@ -2860,4 +2816,7 @@ public class Tablet {
bulkImported.keySet().removeAll(tids);
}
+ public String getDirName() {
+ return dirName;
+ }
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java
index 4f501a7..67acdac 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java
@@ -46,7 +46,7 @@ public class TabletData {
private TServerInstance lastLocation = null;
private Map<Long,List<FileRef>> bulkImported = new HashMap<>();
private long splitTime = 0;
- private String directory = null;
+ private String directoryName = null;
// Read tablet data from metadata tables
public TabletData(KeyExtent extent, VolumeManager fs, TabletMetadata meta) {
@@ -54,7 +54,7 @@ public class TabletData {
this.time = meta.getTime();
this.compactID = meta.getCompactId().orElse(-1);
this.flushID = meta.getFlushId().orElse(-1);
- this.directory = meta.getDir();
+ this.directoryName = meta.getDirName();
this.logEntries.addAll(meta.getLogs());
meta.getScans().forEach(path -> scanFiles.add(new FileRef(fs, path, meta.getTableId())));
@@ -72,10 +72,10 @@ public class TabletData {
}
// Data pulled from an existing tablet to make a split
- public TabletData(String tabletDirectory, SortedMap<FileRef,DataFileValue> highDatafileSizes,
+ public TabletData(String dirName, SortedMap<FileRef,DataFileValue> highDatafileSizes,
MetadataTime time, long lastFlushID, long lastCompactID, TServerInstance lastLocation,
Map<Long,List<FileRef>> bulkIngestedFiles) {
- this.directory = tabletDirectory;
+ this.directoryName = dirName;
this.dataFiles = highDatafileSizes;
this.time = time;
this.flushID = lastFlushID;
@@ -117,8 +117,8 @@ public class TabletData {
return bulkImported;
}
- public String getDirectory() {
- return directory;
+ public String getDirectoryName() {
+ return directoryName;
}
public long getSplitTime() {
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/CheckTabletMetadataTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/CheckTabletMetadataTest.java
index bdfa737..26236ab 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/CheckTabletMetadataTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/CheckTabletMetadataTest.java
@@ -89,7 +89,7 @@ public class CheckTabletMetadataTest {
put(tabletMeta, "1<", TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN,
KeyExtent.encodePrevEndRow(null).get());
- put(tabletMeta, "1<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t1".getBytes());
+ put(tabletMeta, "1<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "t1".getBytes());
put(tabletMeta, "1<", TabletsSection.ServerColumnFamily.TIME_COLUMN, "M0".getBytes());
put(tabletMeta, "1<", TabletsSection.FutureLocationColumnFamily.NAME, "4", "127.0.0.1:9997");
diff --git a/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java b/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
deleted file mode 100644
index ebf8c03..0000000
--- a/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.util.Collections;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Accumulo;
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.init.Initialize;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.accumulo.server.util.RandomizeVolumes;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.commons.configuration2.PropertiesConfiguration;
-import org.apache.commons.configuration2.builder.FileBasedConfigurationBuilder;
-import org.apache.commons.configuration2.builder.fluent.Parameters;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-// ACCUMULO-3263
-public class RewriteTabletDirectoriesIT extends ConfigurableMacBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- private Path v1, v2;
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- File baseDir = cfg.getDir();
- File volDirBase = new File(baseDir, "volumes");
- File v1f = new File(volDirBase, "v1");
- File v2f = new File(volDirBase, "v2");
- v1 = new Path("file://" + v1f.getAbsolutePath());
- v2 = new Path("file://" + v2f.getAbsolutePath());
-
- // Use a VolumeChooser which should be more fair
- cfg.setProperty(Property.GENERAL_VOLUME_CHOOSER, FairVolumeChooser.class.getName());
- // Run MAC on two locations in the local file system
- cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString());
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- super.configure(cfg, hadoopCoreSite);
- }
-
- @Test
- public void test() throws Exception {
- try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
- c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME,
- TablePermission.WRITE);
- final String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- // Write some data to a table and add some splits
- final SortedSet<Text> splits = new TreeSet<>();
- try (BatchWriter bw = c.createBatchWriter(tableName)) {
- for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
- splits.add(new Text(split));
- Mutation m = new Mutation(new Text(split));
- m.put(new byte[] {}, new byte[] {}, new byte[] {});
- bw.addMutation(m);
- }
- }
- c.tableOperations().addSplits(tableName, splits);
-
- try (BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME)) {
- DIRECTORY_COLUMN.fetch(scanner);
- TableId tableId = TableId.of(c.tableOperations().tableIdMap().get(tableName));
- assertNotNull("TableID for " + tableName + " was null", tableId);
- scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId)));
- // verify the directory entries are all on v1, make a few entries relative
- int count = 0;
- try (BatchWriter bw = c.createBatchWriter(MetadataTable.NAME)) {
- for (Entry<Key,Value> entry : scanner) {
- assertTrue("Expected " + entry.getValue() + " to contain " + v1,
- entry.getValue().toString().contains(v1.toString()));
- count++;
- if (count % 2 == 0) {
- String[] parts = entry.getValue().toString().split("/");
- Key key = entry.getKey();
- Mutation m = new Mutation(key.getRow());
- m.put(key.getColumnFamily(), key.getColumnQualifier(),
- new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes()));
- bw.addMutation(m);
- }
- }
- }
- assertEquals(splits.size() + 1, count);
-
- // This should fail: only one volume
- assertEquals(1,
- cluster
- .exec(RandomizeVolumes.class, "-c", cluster.getClientPropsPath(), "-t", tableName)
- .getProcess().waitFor());
-
- cluster.stop();
-
- // add the 2nd volume
- FileBasedConfigurationBuilder<PropertiesConfiguration> propsBuilder =
- new FileBasedConfigurationBuilder<>(PropertiesConfiguration.class).configure(
- new Parameters().properties().setFileName(cluster.getAccumuloPropertiesPath()));
- propsBuilder.getConfiguration().setProperty(Property.INSTANCE_VOLUMES.getKey(),
- v1 + "," + v2);
- propsBuilder.save();
-
- // initialize volume
- assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").getProcess().waitFor());
- cluster.start();
-
- // change the directory entries
- assertEquals(0,
- cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).getProcess().waitFor());
-
- // verify a more equal sharing
- int v1Count = 0, v2Count = 0;
- for (Entry<Key,Value> entry : scanner) {
- if (entry.getValue().toString().contains(v1.toString())) {
- v1Count++;
- }
- if (entry.getValue().toString().contains(v2.toString())) {
- v2Count++;
- }
- }
-
- log.info("Count for volume1: {}", v1Count);
- log.info("Count for volume2: {}", v2Count);
-
- assertEquals(splits.size() + 1, v1Count + v2Count);
- // a fair chooser will differ by less than count(volumes)
- assertTrue("Expected the number of files to differ between volumes by less than 10. "
- + v1Count + " " + v2Count, Math.abs(v1Count - v2Count) < 2);
- // verify we can read the old data
- count = 0;
- for (Entry<Key,Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
- assertTrue("Found unexpected entry in table: " + entry,
- splits.contains(entry.getKey().getRow()));
- count++;
- }
- assertEquals(splits.size(), count);
- }
- }
- }
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
index 1e9e01e..2947dae 100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
@@ -41,6 +41,7 @@ import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
import org.apache.accumulo.core.client.admin.DiskUsage;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
@@ -314,6 +315,7 @@ public class VolumeIT extends ConfigurableMacBase {
String[] tableNames = getUniqueNames(2);
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
+
String uuid = verifyAndShutdownCluster(client, tableNames[0]);
FileBasedConfigurationBuilder<PropertiesConfiguration> propsBuilder =
@@ -387,21 +389,13 @@ public class VolumeIT extends ConfigurableMacBase {
TableId tableId = TableId.of(client.tableOperations().tableIdMap().get(tableName));
try (Scanner metaScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
- MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(metaScanner);
metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
metaScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
int[] counts = new int[paths.length];
outer: for (Entry<Key,Value> entry : metaScanner) {
- String cf = entry.getKey().getColumnFamily().toString();
- String cq = entry.getKey().getColumnQualifier().toString();
-
- String path;
- if (cf.equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME.toString()))
- path = cq;
- else
- path = entry.getValue().toString();
+ String path = entry.getKey().getColumnQualifier().toString();
for (int i = 0; i < paths.length; i++) {
if (path.startsWith(paths[i].toString())) {
@@ -448,7 +442,7 @@ public class VolumeIT extends ConfigurableMacBase {
sum += count;
}
- assertEquals(200, sum);
+ assertEquals(100, sum);
}
}
@@ -476,11 +470,17 @@ public class VolumeIT extends ConfigurableMacBase {
verifyVolumesUsed(client, tableNames[0], true, v2);
+ client.tableOperations().compact(RootTable.NAME, new CompactionConfig().setWait(true));
+
// check that root tablet is not on volume 1
- String rootTabletDir =
- ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT).getDir();
+ int count = 0;
+ for (String file : ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT)
+ .getFiles()) {
+ assertTrue(file.startsWith(v2.toString()));
+ count++;
+ }
- assertTrue(rootTabletDir.startsWith(v2.toString()));
+ assertTrue(count > 0);
client.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(),
new HashSet<>());
@@ -540,10 +540,17 @@ public class VolumeIT extends ConfigurableMacBase {
verifyVolumesUsed(client, tableNames[0], true, v8, v9);
verifyVolumesUsed(client, tableNames[1], true, v8, v9);
+ client.tableOperations().compact(RootTable.NAME, new CompactionConfig().setWait(true));
+
// check that root tablet is not on volume 1 or 2
- String rootTabletDir =
- ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT).getDir();
- assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString()));
+ int count = 0;
+ for (String file : ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT)
+ .getFiles()) {
+ assertTrue(file.startsWith(v8.toString()) || file.startsWith(v9.toString()));
+ count++;
+ }
+
+ assertTrue(count > 0);
client.tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<>(),
new HashSet<>());
diff --git a/test/src/main/java/org/apache/accumulo/test/upgrade/GCUpgrade9to10TestIT.java b/test/src/main/java/org/apache/accumulo/test/upgrade/GCUpgrade9to10TestIT.java
index b4b231a..8cb4f0c 100644
--- a/test/src/main/java/org/apache/accumulo/test/upgrade/GCUpgrade9to10TestIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/upgrade/GCUpgrade9to10TestIT.java
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.metadata.schema.Ample;
import org.apache.accumulo.core.metadata.schema.MetadataSchema;
import org.apache.accumulo.core.security.Authorizations;
@@ -45,6 +46,7 @@ import org.apache.accumulo.minicluster.MemoryUnit;
import org.apache.accumulo.minicluster.ServerType;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
import org.apache.accumulo.miniclusterImpl.ProcessNotFoundException;
+import org.apache.accumulo.server.gc.GcVolumeUtil;
import org.apache.accumulo.test.functional.ConfigurableMacBase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.RawLocalFileSystem;
@@ -145,7 +147,7 @@ public class GCUpgrade9to10TestIT extends ConfigurableMacBase {
throw new RuntimeException(e);
}
scanner.setRange(range);
- assertEquals(somebignumber, Iterators.size(scanner.iterator()));
+ assertEquals(somebignumber + somebignumber / 10, Iterators.size(scanner.iterator()));
}
}
@@ -189,6 +191,7 @@ public class GCUpgrade9to10TestIT extends ConfigurableMacBase {
scanner.iterator().forEachRemaining(entry -> {
actual.put(entry.getKey().getRow().toString(), entry.getValue().toString());
});
+
assertEquals(expected, actual);
}
}
@@ -206,12 +209,30 @@ public class GCUpgrade9to10TestIT extends ConfigurableMacBase {
Map<String,String> expected = new TreeMap<>();
try (BatchWriter bw = client.createBatchWriter(table)) {
for (int i = 0; i < count; ++i) {
- String longpath = String.format("hdfs://localhost:8020/%020d/%s", i, filename);
+ String longpath =
+ String.format("hdfs://localhost:8020/accumulo/tables/5a/t-%08x/%s", i, filename);
Mutation delFlag = createOldDelMutation(longpath, "", "", "");
bw.addMutation(delFlag);
expected.put(MetadataSchema.DeletesSection.encodeRow(longpath),
Upgrader9to10.UPGRADED.toString());
}
+
+ // create directory delete entries
+
+ TableId tableId = TableId.of("5a");
+
+ for (int i = 0; i < count; i += 10) {
+ String dirName = String.format("t-%08x", i);
+ String longpath =
+ String.format("hdfs://localhost:8020/accumulo/tables/%s/%s", tableId, dirName);
+ Mutation delFlag = createOldDelMutation(longpath, "", "", "");
+ bw.addMutation(delFlag);
+ expected.put(
+ MetadataSchema.DeletesSection
+ .encodeRow(GcVolumeUtil.getDeleteTabletOnAllVolumesUri(tableId, dirName)),
+ Upgrader9to10.UPGRADED.toString());
+ }
+
return expected;
}
}