You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2015/01/09 03:45:06 UTC
[62/66] [abbrv] accumulo git commit: Merge branch '1.5' into 1.6
Merge branch '1.5' into 1.6
Conflicts:
core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/TFile.java
core/src/main/java/org/apache/accumulo/core/iterators/OptionDescriber.java
core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultSecretKeyEncryptionStrategy.java
core/src/main/java/org/apache/accumulo/core/util/format/BinaryFormatter.java
core/src/main/java/org/apache/accumulo/core/util/shell/commands/DUCommand.java
core/src/main/java/org/apache/accumulo/core/util/shell/commands/HistoryCommand.java
server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
start/src/main/java/org/apache/accumulo/start/Main.java
Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1368d09c
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1368d09c
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1368d09c
Branch: refs/heads/1.6
Commit: 1368d09c198c898a856e9fe3e25150ade33fdb97
Parents: 627e525 901d60e
Author: Christopher Tubbs <ct...@apache.org>
Authored: Thu Jan 8 21:29:28 2015 -0500
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Thu Jan 8 21:29:28 2015 -0500
----------------------------------------------------------------------
.../org/apache/accumulo/core/cli/ClientOpts.java | 6 +++---
.../core/client/admin/InstanceOperations.java | 6 +++---
.../apache/accumulo/core/file/rfile/CreateEmpty.java | 4 ++--
.../accumulo/core/file/rfile/bcfile/Utils.java | 1 +
.../org/apache/accumulo/core/iterators/Combiner.java | 2 +-
.../apache/accumulo/core/iterators/LongCombiner.java | 4 ++--
.../accumulo/core/iterators/OptionDescriber.java | 4 ++--
.../accumulo/core/iterators/TypedValueCombiner.java | 10 +++++-----
.../core/iterators/user/SummingArrayCombiner.java | 5 +++--
.../accumulo/core/util/shell/commands/DUCommand.java | 3 ++-
.../core/util/shell/commands/EGrepCommand.java | 3 ++-
.../util/shell/commands/QuotedStringTokenizer.java | 15 ++++++---------
.../examples/simple/mapreduce/TableToFile.java | 4 ++--
.../java/org/apache/accumulo/server/Accumulo.java | 4 ++--
.../java/org/apache/accumulo/tserver/Compactor.java | 5 +++--
.../main/java/org/apache/accumulo/start/Main.java | 4 ++--
.../start/classloader/AccumuloClassLoader.java | 4 ++--
.../test/randomwalk/security/CreateTable.java | 5 ++---
.../test/randomwalk/security/CreateUser.java | 5 ++---
19 files changed, 47 insertions(+), 47 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
index f1465bc,82ac73d..8bb8b3f
--- a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
+++ b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
@@@ -166,14 -163,6 +166,14 @@@ public class ClientOpts extends Help
@Parameter(names = "--site-file", description = "Read the given accumulo site file to find the accumulo instance")
public String siteFile = null;
+ @Parameter(names = "--ssl", description = "Connect to accumulo over SSL")
+ public boolean sslEnabled = false;
+
- @Parameter(
- names = "--config-file",
- description = "Read the given client config file. If omitted, the path searched can be specified with $ACCUMULO_CLIENT_CONF_PATH, which defaults to ~/.accumulo/config:$ACCUMULO_CONF_DIR/client.conf:/etc/accumulo/client.conf")
++ @Parameter(names = "--config-file", description = "Read the given client config file. "
++ + "If omitted, the path searched can be specified with $ACCUMULO_CLIENT_CONF_PATH, "
++ + "which defaults to ~/.accumulo/config:$ACCUMULO_CONF_DIR/client.conf:/etc/accumulo/client.conf")
+ public String clientConfigFile = null;
+
public void startDebugLogging() {
if (debug)
Logger.getLogger(Constants.CORE_PACKAGE_NAME).setLevel(Level.TRACE);
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/core/src/main/java/org/apache/accumulo/core/client/admin/InstanceOperations.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/admin/InstanceOperations.java
index 49dd1d5,8cb656c..cc9d282
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/InstanceOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/InstanceOperations.java
@@@ -103,10 -103,10 +103,10 @@@ public interface InstanceOperations
* Throws an exception if a tablet server can not be contacted.
*
* @param tserver
- * The tablet server address should be of the form <ip address>:<port>
+ * The tablet server address should be of the form {@code <ip address>:<port>}
* @since 1.5.0
*/
- public void ping(String tserver) throws AccumuloException;
+ void ping(String tserver) throws AccumuloException;
/**
* Test to see if the instance can load the given class as the given type. This check does not consider per table classpaths, see
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
index cd6bff8,8d54088..dcbaf9e
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
@@@ -60,9 -58,9 +60,9 @@@ public class CreateEmpty
static class Opts extends Help {
@Parameter(names = {"-c", "--codec"}, description = "the compression codec to use.", validateWith = IsSupportedCompressionAlgorithm.class)
- String codec = TFile.COMPRESSION_NONE;
+ String codec = Compression.COMPRESSION_NONE;
- @Parameter(
- description = " <path> { <path> ... } Each path given is a URL. Relative paths are resolved according to the default filesystem defined in your Hadoop configuration, which is usually an HDFS instance.",
+ @Parameter(description = " <path> { <path> ... } Each path given is a URL. "
+ + "Relative paths are resolved according to the default filesystem defined in your Hadoop configuration, which is usually an HDFS instance.",
required = true, validateWith = NamedLikeRFile.class)
List<String> files = new ArrayList<String>();
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/core/src/main/java/org/apache/accumulo/core/iterators/OptionDescriber.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/core/src/main/java/org/apache/accumulo/core/iterators/TypedValueCombiner.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/core/src/main/java/org/apache/accumulo/core/util/shell/commands/DUCommand.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index 1b93c05,0000000..147a7c2
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@@ -1,336 -1,0 +1,336 @@@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server;
+
+import static com.google.common.base.Charsets.UTF_8;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.trace.DistributedTrace;
+import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.util.Version;
+import org.apache.accumulo.core.volume.Volume;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.ReadOnlyStore;
+import org.apache.accumulo.fate.ReadOnlyTStore;
+import org.apache.accumulo.fate.ZooStore;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.util.time.SimpleTimer;
+import org.apache.accumulo.server.watcher.Log4jConfiguration;
+import org.apache.accumulo.server.watcher.MonitorLog4jWatcher;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.log4j.Logger;
+import org.apache.log4j.helpers.LogLog;
+import org.apache.zookeeper.KeeperException;
+
+public class Accumulo {
+
+ private static final Logger log = Logger.getLogger(Accumulo.class);
+
+ public static synchronized void updateAccumuloVersion(VolumeManager fs, int oldVersion) {
+ for (Volume volume : fs.getVolumes()) {
+ try {
+ if (getAccumuloPersistentVersion(fs) == oldVersion) {
+ log.debug("Attempting to upgrade " + volume);
+ Path dataVersionLocation = ServerConstants.getDataVersionLocation(volume);
+ fs.create(new Path(dataVersionLocation, Integer.toString(ServerConstants.DATA_VERSION))).close();
+ // TODO document failure mode & recovery if FS permissions cause above to work and below to fail ACCUMULO-2596
+ Path prevDataVersionLoc = new Path(dataVersionLocation, Integer.toString(oldVersion));
+ if (!fs.delete(prevDataVersionLoc)) {
+ throw new RuntimeException("Could not delete previous data version location (" + prevDataVersionLoc + ") for " + volume);
+ }
+ }
+ } catch (IOException e) {
+ throw new RuntimeException("Unable to set accumulo version: an error occurred.", e);
+ }
+ }
+ }
+
+ public static synchronized int getAccumuloPersistentVersion(FileSystem fs, Path path) {
+ int dataVersion;
+ try {
+ FileStatus[] files = fs.listStatus(path);
+ if (files == null || files.length == 0) {
+ dataVersion = -1; // assume it is 0.5 or earlier
+ } else {
+ dataVersion = Integer.parseInt(files[0].getPath().getName());
+ }
+ return dataVersion;
+ } catch (IOException e) {
+ throw new RuntimeException("Unable to read accumulo version: an error occurred.", e);
+ }
+ }
+
+ public static synchronized int getAccumuloPersistentVersion(VolumeManager fs) {
+ // It doesn't matter which Volume is used as they should all have the data version stored
+ Volume v = fs.getVolumes().iterator().next();
+ Path path = ServerConstants.getDataVersionLocation(v);
+ return getAccumuloPersistentVersion(v.getFileSystem(), path);
+ }
+
+ public static synchronized Path getAccumuloInstanceIdPath(VolumeManager fs) {
+ // It doesn't matter which Volume is used as they should all have the instance ID stored
+ Volume v = fs.getVolumes().iterator().next();
+ return ServerConstants.getInstanceIdLocation(v);
+ }
+
+ public static void enableTracing(String address, String application) {
+ try {
+ DistributedTrace.enable(HdfsZooInstance.getInstance(), ZooReaderWriter.getInstance(), application, address);
+ } catch (Exception ex) {
+ log.error("creating remote sink for trace spans", ex);
+ }
+ }
+
+ /**
+ * Finds the best log4j configuration file. A generic file is used only if an application-specific file is not available. An XML file is preferred over a
+ * properties file, if possible.
+ *
+ * @param confDir
+ * directory where configuration files should reside
+ * @param application
+ * application name for configuration file name
+ * @return configuration file name
+ */
+ static String locateLogConfig(String confDir, String application) {
+ String explicitConfigFile = System.getProperty("log4j.configuration");
+ if (explicitConfigFile != null) {
+ return explicitConfigFile;
+ }
+ String[] configFiles = {String.format("%s/%s_logger.xml", confDir, application), String.format("%s/%s_logger.properties", confDir, application),
+ String.format("%s/generic_logger.xml", confDir), String.format("%s/generic_logger.properties", confDir)};
+ String defaultConfigFile = configFiles[2]; // generic_logger.xml
+ for (String f : configFiles) {
+ if (new File(f).exists()) {
+ return f;
+ }
+ }
+ return defaultConfigFile;
+ }
+
+ public static void setupLogging(String application) throws UnknownHostException {
+ System.setProperty("org.apache.accumulo.core.application", application);
+
+ if (System.getenv("ACCUMULO_LOG_DIR") != null)
+ System.setProperty("org.apache.accumulo.core.dir.log", System.getenv("ACCUMULO_LOG_DIR"));
+ else
+ System.setProperty("org.apache.accumulo.core.dir.log", System.getenv("ACCUMULO_HOME") + "/logs/");
+
+ String localhost = InetAddress.getLocalHost().getHostName();
+ System.setProperty("org.apache.accumulo.core.ip.localhost.hostname", localhost);
+
+ // Use a specific log config, if it exists
+ String logConfigFile = locateLogConfig(System.getenv("ACCUMULO_CONF_DIR"), application);
+ // Turn off messages about not being able to reach the remote logger... we protect against that.
+ LogLog.setQuietMode(true);
+
+ // Set up local file-based logging right away
+ Log4jConfiguration logConf = new Log4jConfiguration(logConfigFile);
+ logConf.resetLogger();
+ }
+
+ public static void init(VolumeManager fs, ServerConfiguration serverConfig, String application) throws IOException {
+ final AccumuloConfiguration conf = serverConfig.getConfiguration();
+ final Instance instance = serverConfig.getInstance();
+
+ // Use a specific log config, if it exists
+ final String logConfigFile = locateLogConfig(System.getenv("ACCUMULO_CONF_DIR"), application);
+
+ // Set up polling log4j updates and log-forwarding using information advertised in zookeeper by the monitor
+ MonitorLog4jWatcher logConfigWatcher = new MonitorLog4jWatcher(instance.getInstanceID(), logConfigFile);
+ logConfigWatcher.setDelay(5000L);
+ logConfigWatcher.start();
+
+ // Makes sure the log-forwarding to the monitor is configured
+ int logPort = conf.getPort(Property.MONITOR_LOG4J_PORT);
+ System.setProperty("org.apache.accumulo.core.host.log.port", Integer.toString(logPort));
+
+ log.info(application + " starting");
+ log.info("Instance " + serverConfig.getInstance().getInstanceID());
+ int dataVersion = Accumulo.getAccumuloPersistentVersion(fs);
+ log.info("Data Version " + dataVersion);
+ Accumulo.waitForZookeeperAndHdfs(fs);
+
+ Version codeVersion = new Version(Constants.VERSION);
+ if (!(canUpgradeFromDataVersion(dataVersion))) {
+ throw new RuntimeException("This version of accumulo (" + codeVersion + ") is not compatible with files stored using data version " + dataVersion);
+ }
+
+ TreeMap<String,String> sortedProps = new TreeMap<String,String>();
+ for (Entry<String,String> entry : conf)
+ sortedProps.put(entry.getKey(), entry.getValue());
+
+ for (Entry<String,String> entry : sortedProps.entrySet()) {
+ String key = entry.getKey();
+ log.info(key + " = " + (Property.isSensitive(key) ? "<hidden>" : entry.getValue()));
+ }
+
+ monitorSwappiness();
+
+ // Encourage users to configure TLS
+ final String SSL = "SSL";
+ for (Property sslProtocolProperty : Arrays.asList(Property.RPC_SSL_CLIENT_PROTOCOL, Property.RPC_SSL_ENABLED_PROTOCOLS,
+ Property.MONITOR_SSL_INCLUDE_PROTOCOLS)) {
+ String value = conf.get(sslProtocolProperty);
+ if (value.contains(SSL)) {
+ log.warn("It is recommended that " + sslProtocolProperty + " only allow TLS");
+ }
+ }
+
+ }
+
+ /**
+ * Sanity check that the current persistent version is allowed to upgrade to the version of Accumulo running.
+ *
+ * @param dataVersion
+ * the version that is persisted in the backing Volumes
+ */
+ public static boolean canUpgradeFromDataVersion(final int dataVersion) {
+ return dataVersion == ServerConstants.DATA_VERSION || dataVersion == ServerConstants.PREV_DATA_VERSION
+ || dataVersion == ServerConstants.TWO_DATA_VERSIONS_AGO;
+ }
+
+ /**
+ * Does the data version number stored in the backing Volumes indicate we need to upgrade something?
+ */
+ public static boolean persistentVersionNeedsUpgrade(final int accumuloPersistentVersion) {
+ return accumuloPersistentVersion == ServerConstants.TWO_DATA_VERSIONS_AGO || accumuloPersistentVersion == ServerConstants.PREV_DATA_VERSION;
+ }
+
+ /**
+ *
+ */
+ public static void monitorSwappiness() {
+ SimpleTimer.getInstance().schedule(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ String procFile = "/proc/sys/vm/swappiness";
+ File swappiness = new File(procFile);
+ if (swappiness.exists() && swappiness.canRead()) {
+ InputStream is = new FileInputStream(procFile);
+ try {
+ byte[] buffer = new byte[10];
+ int bytes = is.read(buffer);
+ String setting = new String(buffer, 0, bytes, UTF_8);
+ setting = setting.trim();
+ if (bytes > 0 && Integer.parseInt(setting) > 10) {
+ log.warn("System swappiness setting is greater than ten (" + setting + ") which can cause time-sensitive operations to be delayed. "
+ + " Accumulo is time sensitive because it needs to maintain distributed lock agreement.");
+ }
+ } finally {
+ is.close();
+ }
+ }
+ } catch (Throwable t) {
+ log.error(t, t);
+ }
+ }
+ }, 1000, 10 * 60 * 1000);
+ }
+
+ public static void waitForZookeeperAndHdfs(VolumeManager fs) {
+ log.info("Attempting to talk to zookeeper");
+ while (true) {
+ try {
+ ZooReaderWriter.getInstance().getChildren(Constants.ZROOT);
+ break;
+ } catch (InterruptedException e) {
+ // ignored
+ } catch (KeeperException ex) {
+ log.info("Waiting for accumulo to be initialized");
+ UtilWaitThread.sleep(1000);
+ }
+ }
+ log.info("Zookeeper connected and initialized, attemping to talk to HDFS");
+ long sleep = 1000;
+ int unknownHostTries = 3;
+ while (true) {
+ try {
+ if (fs.isReady())
+ break;
+ log.warn("Waiting for the NameNode to leave safemode");
+ } catch (IOException ex) {
+ log.warn("Unable to connect to HDFS", ex);
+ } catch (IllegalArgumentException exception) {
+ /* Unwrap the UnknownHostException so we can deal with it directly */
+ if (exception.getCause() instanceof UnknownHostException) {
+ if (unknownHostTries > 0) {
+ log.warn("Unable to connect to HDFS, will retry. cause: " + exception.getCause());
+ /* We need to make sure our sleep period is long enough to avoid getting a cached failure of the host lookup. */
+ sleep = Math.max(sleep, (AddressUtil.getAddressCacheNegativeTtl((UnknownHostException) (exception.getCause())) + 1) * 1000);
+ } else {
+ log.error("Unable to connect to HDFS and have exceeded max number of retries.", exception);
+ throw exception;
+ }
+ unknownHostTries--;
+ } else {
+ throw exception;
+ }
+ }
+ log.info("Backing off due to failure; current sleep period is " + sleep / 1000. + " seconds");
+ UtilWaitThread.sleep(sleep);
+ /* Back off to give transient failures more time to clear. */
+ sleep = Math.min(60 * 1000, sleep * 2);
+ }
+ log.info("Connected to HDFS");
+ }
+
+ /**
+ * Exit loudly if there are outstanding Fate operations. Since Fate serializes class names, we need to make sure there are no queued transactions from a
+ * previous version before continuing an upgrade. The status of the operations is irrelevant; those in SUCCESSFUL status cause the same problem as those just
+ * queued.
+ *
+ * Note that the Master should not allow write access to Fate until after all upgrade steps are complete.
+ *
+ * Should be called as a guard before performing any upgrade steps, after determining that an upgrade is needed.
+ *
+ * see ACCUMULO-2519
+ */
+ public static void abortIfFateTransactions() {
+ try {
+ final ReadOnlyTStore<Accumulo> fate = new ReadOnlyStore<Accumulo>(new ZooStore<Accumulo>(
+ ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZFATE, ZooReaderWriter.getInstance()));
+ if (!(fate.list().isEmpty())) {
- throw new AccumuloException(
- "Aborting upgrade because there are outstanding FATE transactions from a previous Accumulo version. Please see the README document for instructions on what to do under your previous version.");
++ throw new AccumuloException("Aborting upgrade because there are outstanding FATE transactions from a previous Accumulo version. "
++ + "Please see the README document for instructions on what to do under your previous version.");
+ }
+ } catch (Exception exception) {
+ log.fatal("Problem verifying Fate readiness", exception);
+ System.exit(1);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/server/tserver/src/main/java/org/apache/accumulo/tserver/Compactor.java
----------------------------------------------------------------------
diff --cc server/tserver/src/main/java/org/apache/accumulo/tserver/Compactor.java
index 822171c,0000000..381f75c
mode 100644,000000..100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/Compactor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/Compactor.java
@@@ -1,548 -1,0 +1,549 @@@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver;
+
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.thrift.IterInfo;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVIterator;
+import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.IteratorUtil;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.WrappingIterator;
+import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
+import org.apache.accumulo.core.iterators.system.DeletingIterator;
+import org.apache.accumulo.core.iterators.system.MultiIterator;
+import org.apache.accumulo.core.iterators.system.TimeSettingIterator;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction;
+import org.apache.accumulo.core.tabletserver.thrift.CompactionReason;
+import org.apache.accumulo.core.tabletserver.thrift.CompactionType;
+import org.apache.accumulo.core.util.LocalityGroupUtil;
+import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.problems.ProblemReport;
+import org.apache.accumulo.server.problems.ProblemReportingIterator;
+import org.apache.accumulo.server.problems.ProblemReports;
+import org.apache.accumulo.server.problems.ProblemType;
+import org.apache.accumulo.trace.instrument.Span;
+import org.apache.accumulo.trace.instrument.Trace;
+import org.apache.accumulo.tserver.Tablet.MinorCompactionReason;
+import org.apache.accumulo.tserver.compaction.MajorCompactionReason;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.log4j.Logger;
+
+public class Compactor implements Callable<CompactionStats> {
+
+ public static class CountingIterator extends WrappingIterator {
+
+ private long count;
+ private ArrayList<CountingIterator> deepCopies;
+ private AtomicLong entriesRead;
+
+ @Override
+ public CountingIterator deepCopy(IteratorEnvironment env) {
+ return new CountingIterator(this, env);
+ }
+
+ private CountingIterator(CountingIterator other, IteratorEnvironment env) {
+ setSource(other.getSource().deepCopy(env));
+ count = 0;
+ this.deepCopies = other.deepCopies;
+ this.entriesRead = other.entriesRead;
+ deepCopies.add(this);
+ }
+
+ public CountingIterator(SortedKeyValueIterator<Key,Value> source, AtomicLong entriesRead) {
+ deepCopies = new ArrayList<Compactor.CountingIterator>();
+ this.setSource(source);
+ count = 0;
+ this.entriesRead = entriesRead;
+ }
+
+ @Override
+ public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void next() throws IOException {
+ super.next();
+ count++;
+ if (count % 1024 == 0) {
+ entriesRead.addAndGet(1024);
+ }
+ }
+
+ public long getCount() {
+ long sum = 0;
+ for (CountingIterator dc : deepCopies) {
+ sum += dc.count;
+ }
+
+ return count + sum;
+ }
+ }
+
+ private static final Logger log = Logger.getLogger(Compactor.class);
+
+ static class CompactionCanceledException extends Exception {
+ private static final long serialVersionUID = 1L;
+ }
+
+ interface CompactionEnv {
+ boolean isCompactionEnabled();
+
+ IteratorScope getIteratorScope();
+ }
+
+ private Map<FileRef,DataFileValue> filesToCompact;
+ private InMemoryMap imm;
+ private FileRef outputFile;
+ private boolean propogateDeletes;
+ private AccumuloConfiguration acuTableConf;
+ private CompactionEnv env;
+ private Configuration conf;
+ private VolumeManager fs;
+ protected KeyExtent extent;
+ private List<IteratorSetting> iterators;
+
+ // things to report
+ private String currentLocalityGroup = "";
+ private long startTime;
+
+ private MajorCompactionReason reason;
+ protected MinorCompactionReason mincReason;
+
+ private AtomicLong entriesRead = new AtomicLong(0);
+ private AtomicLong entriesWritten = new AtomicLong(0);
+ private DateFormat dateFormatter = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss.SSS");
+
+ private static AtomicLong nextCompactorID = new AtomicLong(0);
+
+ // a unique id to identify a compactor
+ private long compactorID = nextCompactorID.getAndIncrement();
+
+ protected volatile Thread thread;
+
+ private synchronized void setLocalityGroup(String name) {
+ this.currentLocalityGroup = name;
+ }
+
+ private void clearStats() {
+ entriesRead.set(0);
+ entriesWritten.set(0);
+ }
+
+ protected static final Set<Compactor> runningCompactions = Collections.synchronizedSet(new HashSet<Compactor>());
+
+ public static class CompactionInfo {
+
+ private Compactor compactor;
+ private String localityGroup;
+ private long entriesRead;
+ private long entriesWritten;
+
+ CompactionInfo(Compactor compactor) {
+ this.localityGroup = compactor.currentLocalityGroup;
+ this.entriesRead = compactor.entriesRead.get();
+ this.entriesWritten = compactor.entriesWritten.get();
+ this.compactor = compactor;
+ }
+
+ public long getID() {
+ return compactor.compactorID;
+ }
+
+ public KeyExtent getExtent() {
+ return compactor.getExtent();
+ }
+
+ public long getEntriesRead() {
+ return entriesRead;
+ }
+
+ public long getEntriesWritten() {
+ return entriesWritten;
+ }
+
+ public Thread getThread() {
+ return compactor.thread;
+ }
+
+ public String getOutputFile() {
+ return compactor.getOutputFile();
+ }
+
+ public ActiveCompaction toThrift() {
+
+ CompactionType type;
+
+ if (compactor.imm != null)
+ if (compactor.filesToCompact.size() > 0)
+ type = CompactionType.MERGE;
+ else
+ type = CompactionType.MINOR;
+ else if (!compactor.propogateDeletes)
+ type = CompactionType.FULL;
+ else
+ type = CompactionType.MAJOR;
+
+ CompactionReason reason;
+
- if (compactor.imm != null)
++ if (compactor.imm != null) {
+ switch (compactor.mincReason) {
+ case USER:
+ reason = CompactionReason.USER;
+ break;
+ case CLOSE:
+ reason = CompactionReason.CLOSE;
+ break;
+ case SYSTEM:
+ default:
+ reason = CompactionReason.SYSTEM;
+ break;
+ }
- else
++ } else {
+ switch (compactor.reason) {
+ case USER:
+ reason = CompactionReason.USER;
+ break;
+ case CHOP:
+ reason = CompactionReason.CHOP;
+ break;
+ case IDLE:
+ reason = CompactionReason.IDLE;
+ break;
+ case NORMAL:
+ default:
+ reason = CompactionReason.SYSTEM;
+ break;
+ }
++ }
+
+ List<IterInfo> iiList = new ArrayList<IterInfo>();
+ Map<String,Map<String,String>> iterOptions = new HashMap<String,Map<String,String>>();
+
+ for (IteratorSetting iterSetting : compactor.iterators) {
+ iiList.add(new IterInfo(iterSetting.getPriority(), iterSetting.getIteratorClass(), iterSetting.getName()));
+ iterOptions.put(iterSetting.getName(), iterSetting.getOptions());
+ }
+ List<String> filesToCompact = new ArrayList<String>();
+ for (FileRef ref : compactor.filesToCompact.keySet())
+ filesToCompact.add(ref.toString());
+ return new ActiveCompaction(compactor.extent.toThrift(), System.currentTimeMillis() - compactor.startTime, filesToCompact,
+ compactor.outputFile.toString(), type, reason, localityGroup, entriesRead, entriesWritten, iiList, iterOptions);
+ }
+ }
+
+ public static List<CompactionInfo> getRunningCompactions() {
+ ArrayList<CompactionInfo> compactions = new ArrayList<Compactor.CompactionInfo>();
+
+ synchronized (runningCompactions) {
+ for (Compactor compactor : runningCompactions) {
+ compactions.add(new CompactionInfo(compactor));
+ }
+ }
+
+ return compactions;
+ }
+
+ Compactor(Configuration conf, VolumeManager fs, Map<FileRef,DataFileValue> files, InMemoryMap imm, FileRef outputFile, boolean propogateDeletes,
+ AccumuloConfiguration acuTableConf, KeyExtent extent, CompactionEnv env, List<IteratorSetting> iterators, MajorCompactionReason reason) {
+ this.extent = extent;
+ this.conf = conf;
+ this.fs = fs;
+ this.filesToCompact = files;
+ this.imm = imm;
+ this.outputFile = outputFile;
+ this.propogateDeletes = propogateDeletes;
+ this.acuTableConf = acuTableConf;
+ this.env = env;
+ this.iterators = iterators;
+ this.reason = reason;
+
+ startTime = System.currentTimeMillis();
+ }
+
+ Compactor(Configuration conf, VolumeManager fs, Map<FileRef,DataFileValue> files, InMemoryMap imm, FileRef outputFile, boolean propogateDeletes,
+ AccumuloConfiguration acuTableConf, KeyExtent extent, CompactionEnv env) {
+ this(conf, fs, files, imm, outputFile, propogateDeletes, acuTableConf, extent, env, new ArrayList<IteratorSetting>(), null);
+ }
+
+ public VolumeManager getFileSystem() {
+ return fs;
+ }
+
+ KeyExtent getExtent() {
+ return extent;
+ }
+
+ String getOutputFile() {
+ return outputFile.toString();
+ }
+
+ @Override
+ public CompactionStats call() throws IOException, CompactionCanceledException {
+
+ FileSKVWriter mfw = null;
+
+ CompactionStats majCStats = new CompactionStats();
+
+ boolean remove = runningCompactions.add(this);
+
+ clearStats();
+
+ String oldThreadName = Thread.currentThread().getName();
+ String newThreadName = "MajC compacting " + extent.toString() + " started " + dateFormatter.format(new Date()) + " file: " + outputFile;
+ Thread.currentThread().setName(newThreadName);
+ thread = Thread.currentThread();
+ try {
+ FileOperations fileFactory = FileOperations.getInstance();
+ FileSystem ns = this.fs.getVolumeByPath(outputFile.path()).getFileSystem();
+ mfw = fileFactory.openWriter(outputFile.path().toString(), ns, ns.getConf(), acuTableConf);
+
+ Map<String,Set<ByteSequence>> lGroups;
+ try {
+ lGroups = LocalityGroupUtil.getLocalityGroups(acuTableConf);
+ } catch (LocalityGroupConfigurationError e) {
+ throw new IOException(e);
+ }
+
+ long t1 = System.currentTimeMillis();
+
+ HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
+
+ if (mfw.supportsLocalityGroups()) {
+ for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
+ setLocalityGroup(entry.getKey());
+ compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
+ allColumnFamilies.addAll(entry.getValue());
+ }
+ }
+
+ setLocalityGroup("");
+ compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);
+
+ long t2 = System.currentTimeMillis();
+
+ FileSKVWriter mfwTmp = mfw;
+ mfw = null; // set this to null so we do not try to close it again in finally if the close fails
+ mfwTmp.close(); // if the close fails it will cause the compaction to fail
+
+ // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
+ try {
+ FileSKVIterator openReader = fileFactory.openReader(outputFile.path().toString(), false, ns, ns.getConf(), acuTableConf);
+ openReader.close();
+ } catch (IOException ex) {
+ log.error("Verification of successful compaction fails!!! " + extent + " " + outputFile, ex);
+ throw ex;
+ }
+
+ log.debug(String.format("Compaction %s %,d read | %,d written | %,6d entries/sec | %6.3f secs", extent, majCStats.getEntriesRead(),
+ majCStats.getEntriesWritten(), (int) (majCStats.getEntriesRead() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0));
+
+ majCStats.setFileSize(fileFactory.getFileSize(outputFile.path().toString(), ns, ns.getConf(), acuTableConf));
+ return majCStats;
+ } catch (IOException e) {
+ log.error(e, e);
+ throw e;
+ } catch (RuntimeException e) {
+ log.error(e, e);
+ throw e;
+ } finally {
+ Thread.currentThread().setName(oldThreadName);
+ if (remove) {
+ thread = null;
+ runningCompactions.remove(this);
+ }
+
+ try {
+ if (mfw != null) {
+ // compaction must not have finished successfully, so close its output file
+ try {
+ mfw.close();
+ } finally {
+ if (!fs.deleteRecursively(outputFile.path()))
+ if (fs.exists(outputFile.path()))
+ log.error("Unable to delete " + outputFile);
+ }
+ }
+ } catch (IOException e) {
+ log.warn(e, e);
+ } catch (RuntimeException exception) {
+ log.warn(exception, exception);
+ }
+ }
+ }
+
+ private List<SortedKeyValueIterator<Key,Value>> openMapDataFiles(String lgName, ArrayList<FileSKVIterator> readers) throws IOException {
+
+ List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(filesToCompact.size());
+
+ for (FileRef mapFile : filesToCompact.keySet()) {
+ try {
+
+ FileOperations fileFactory = FileOperations.getInstance();
+ FileSystem fs = this.fs.getVolumeByPath(mapFile.path()).getFileSystem();
+ FileSKVIterator reader;
+
+ reader = fileFactory.openReader(mapFile.path().toString(), false, fs, conf, acuTableConf);
+
+ readers.add(reader);
+
+ SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile.path().toString(), false, reader);
+
+ if (filesToCompact.get(mapFile).isTimeSet()) {
+ iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
+ }
+
+ iters.add(iter);
+
+ } catch (Throwable e) {
+
+ ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_READ, mapFile.path().toString(), e));
+
+ log.warn("Some problem opening map file " + mapFile + " " + e.getMessage(), e);
+ // failed to open some map file... close the ones that were opened
+ for (FileSKVIterator reader : readers) {
+ try {
+ reader.close();
+ } catch (Throwable e2) {
+ log.warn("Failed to close map file", e2);
+ }
+ }
+
+ readers.clear();
+
+ if (e instanceof IOException)
+ throw (IOException) e;
+ throw new IOException("Failed to open map data files", e);
+ }
+ }
+
+ return iters;
+ }
+
+ private void compactLocalityGroup(String lgName, Set<ByteSequence> columnFamilies, boolean inclusive, FileSKVWriter mfw, CompactionStats majCStats)
+ throws IOException, CompactionCanceledException {
+ ArrayList<FileSKVIterator> readers = new ArrayList<FileSKVIterator>(filesToCompact.size());
+ Span span = Trace.start("compact");
+ try {
+ long entriesCompacted = 0;
+ List<SortedKeyValueIterator<Key,Value>> iters = openMapDataFiles(lgName, readers);
+
+ if (imm != null) {
+ iters.add(imm.compactionIterator());
+ }
+
+ CountingIterator citr = new CountingIterator(new MultiIterator(iters, extent.toDataRange()), entriesRead);
+ DeletingIterator delIter = new DeletingIterator(citr, propogateDeletes);
+ ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(delIter);
+
+ // if(env.getIteratorScope() )
+
+ TabletIteratorEnvironment iterEnv;
+ if (env.getIteratorScope() == IteratorScope.majc)
+ iterEnv = new TabletIteratorEnvironment(IteratorScope.majc, !propogateDeletes, acuTableConf);
+ else if (env.getIteratorScope() == IteratorScope.minc)
+ iterEnv = new TabletIteratorEnvironment(IteratorScope.minc, acuTableConf);
+ else
+ throw new IllegalArgumentException();
+
+ SortedKeyValueIterator<Key,Value> itr = iterEnv.getTopLevelIterator(IteratorUtil.loadIterators(env.getIteratorScope(), cfsi, extent, acuTableConf,
+ iterators, iterEnv));
+
+ itr.seek(extent.toDataRange(), columnFamilies, inclusive);
+
+ if (!inclusive) {
+ mfw.startDefaultLocalityGroup();
+ } else {
+ mfw.startNewLocalityGroup(lgName, columnFamilies);
+ }
+
+ Span write = Trace.start("write");
+ try {
+ while (itr.hasTop() && env.isCompactionEnabled()) {
+ mfw.append(itr.getTopKey(), itr.getTopValue());
+ itr.next();
+ entriesCompacted++;
+
+ if (entriesCompacted % 1024 == 0) {
+ // Periodically update stats, do not want to do this too often since its volatile
+ entriesWritten.addAndGet(1024);
+ }
+ }
+
+ if (itr.hasTop() && !env.isCompactionEnabled()) {
+ // cancel major compaction operation
+ try {
+ try {
+ mfw.close();
+ } catch (IOException e) {
+ log.error(e, e);
+ }
+ fs.deleteRecursively(outputFile.path());
+ } catch (Exception e) {
+ log.warn("Failed to delete Canceled compaction output file " + outputFile, e);
+ }
+ throw new CompactionCanceledException();
+ }
+
+ } finally {
+ CompactionStats lgMajcStats = new CompactionStats(citr.getCount(), entriesCompacted);
+ majCStats.add(lgMajcStats);
+ write.stop();
+ }
+
+ } finally {
+ // close sequence files opened
+ for (FileSKVIterator reader : readers) {
+ try {
+ reader.close();
+ } catch (Throwable e) {
+ log.warn("Failed to close map file", e);
+ }
+ }
+ span.stop();
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/start/src/main/java/org/apache/accumulo/start/Main.java
----------------------------------------------------------------------
diff --cc start/src/main/java/org/apache/accumulo/start/Main.java
index c4aaea1,a80ebe6..b9753bf
--- a/start/src/main/java/org/apache/accumulo/start/Main.java
+++ b/start/src/main/java/org/apache/accumulo/start/Main.java
@@@ -175,28 -141,7 +175,28 @@@ public class Main
}
private static void printUsage() {
- System.out
- .println("accumulo init | master | tserver | monitor | shell | admin | gc | classpath | rfile-info | login-info | tracer | minicluster | proxy | zookeeper | create-token | info | version | help | jar <jar> [<main class>] args | <accumulo class> args");
+ System.out.println("accumulo init | master | tserver | monitor | shell | admin | gc | classpath | rfile-info | login-info "
- + "| tracer | proxy | zookeeper | info | version | help | <accumulo class> args");
++ + "| tracer | minicluster | proxy | zookeeper | create-token | info | version | help | jar <jar> [<main class>] args | <accumulo class> args");
+ }
+
+ // feature: will work even if main class isn't in the JAR
+ static Class<?> loadClassFromJar(String[] args, JarFile f, ClassLoader cl) throws IOException, ClassNotFoundException {
+ ClassNotFoundException explicitNotFound = null;
+ if (args.length >= 3) {
+ try {
+ return cl.loadClass(args[2]); // jar jar-file main-class
+ } catch (ClassNotFoundException cnfe) {
+ // assume this is the first argument, look for main class in JAR manifest
+ explicitNotFound = cnfe;
+ }
+ }
+ String mainClass = f.getManifest().getMainAttributes().getValue(Attributes.Name.MAIN_CLASS);
+ if (mainClass == null) {
+ if (explicitNotFound != null) {
+ throw explicitNotFound;
+ }
+ throw new ClassNotFoundException("No main class was specified, and the JAR manifest does not specify one");
+ }
+ return cl.loadClass(mainClass);
}
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/1368d09c/test/src/main/java/org/apache/accumulo/test/randomwalk/security/CreateTable.java
----------------------------------------------------------------------