You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by el...@apache.org on 2015/05/12 19:32:08 UTC
[3/3] accumulo git commit: ACCUMULO-3803 Resurected mapreduce.lib.util
ACCUMULO-3803 Resurected mapreduce.lib.util
Ran the following command in the 1.7 branch to do this. The commit below is what was at head of the 1.6 branch at the time I ran this command.
git checkout c4eff0c2eb1320e411ac3e41b6f2db89c2d3ba33 -- core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util
I checked in the 1.6 branch and found that this code has no references (like test).
Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/88132508
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/88132508
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/88132508
Branch: refs/heads/branch-1.7.0
Commit: 88132508872d1dd38b11b226259f125fa73ddf1a
Parents: 3a7cb00
Author: Keith Turner <kt...@apache.org>
Authored: Tue May 12 12:51:18 2015 -0400
Committer: Josh Elser <el...@apache.org>
Committed: Tue May 12 13:31:07 2015 -0400
----------------------------------------------------------------------
.../mapreduce/lib/util/ConfiguratorBase.java | 277 +++++++++++
.../lib/util/FileOutputConfigurator.java | 170 +++++++
.../mapreduce/lib/util/InputConfigurator.java | 462 +++++++++++++++++++
.../mapreduce/lib/util/OutputConfigurator.java | 196 ++++++++
.../client/mapreduce/lib/util/package-info.java | 22 +
5 files changed, 1127 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/88132508/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
new file mode 100644
index 0000000..20fbbea
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/ConfiguratorBase.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce.lib.util;
+
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Level;
+
+/**
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+@Deprecated
+public class ConfiguratorBase {
+
+ /**
+ * Configuration keys for {@link Instance#getConnector(String, AuthenticationToken)}.
+ *
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static enum ConnectorInfo {
+ IS_CONFIGURED, PRINCIPAL, TOKEN, TOKEN_CLASS
+ }
+
+ /**
+ * Configuration keys for {@link Instance}, {@link ZooKeeperInstance}, and {@link MockInstance}.
+ *
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ protected static enum InstanceOpts {
+ TYPE, NAME, ZOO_KEEPERS;
+ }
+
+ /**
+ * Configuration keys for general configuration options.
+ *
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ protected static enum GeneralOpts {
+ LOG_LEVEL
+ }
+
+ /**
+ * Provides a configuration key for a given feature enum, prefixed by the implementingClass
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param e
+ * the enum used to provide the unique part of the configuration key
+ * @return the configuration key
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ protected static String enumToConfKey(Class<?> implementingClass, Enum<?> e) {
+ return implementingClass.getSimpleName() + "." + e.getDeclaringClass().getSimpleName() + "." + StringUtils.camelize(e.name().toLowerCase());
+ }
+
+ /**
+ * Sets the connector information needed to communicate with Accumulo in this job.
+ *
+ * <p>
+ * <b>WARNING:</b> The serialized token is stored in the configuration and shared with all MapReduce tasks. It is BASE64 encoded to provide a charset safe
+ * conversion to a string, and is not intended to be secure.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param principal
+ * a valid Accumulo user name
+ * @param token
+ * the user's password
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setConnectorInfo(Class<?> implementingClass, Configuration conf, String principal, AuthenticationToken token)
+ throws AccumuloSecurityException {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.setConnectorInfo(implementingClass, conf, principal, token);
+ }
+
+ /**
+ * Determines if the connector info has already been set for this instance.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return true if the connector info has already been set, false otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
+ */
+ @Deprecated
+ public static Boolean isConnectorInfoSet(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.isConnectorInfoSet(implementingClass, conf);
+ }
+
+ /**
+ * Gets the user name from the configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return the principal
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
+ */
+ @Deprecated
+ public static String getPrincipal(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getPrincipal(implementingClass, conf);
+ }
+
+ /**
+ * DON'T USE THIS. No, really, don't use this. You already have an {@link AuthenticationToken} with
+ * {@link org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase#getAuthenticationToken(Class, Configuration)}. You don't need to construct it
+ * yourself.
+ * <p>
+ * Gets the serialized token class from the configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return the principal
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
+ */
+ @Deprecated
+ public static String getTokenClass(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getAuthenticationToken(implementingClass, conf).getClass().getName();
+ }
+
+ /**
+ * DON'T USE THIS. No, really, don't use this. You already have an {@link AuthenticationToken} with
+ * {@link org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase#getAuthenticationToken(Class, Configuration)}. You don't need to construct it
+ * yourself.
+ * <p>
+ * Gets the password from the configuration. WARNING: The password is stored in the Configuration and shared with all MapReduce tasks; It is BASE64 encoded to
+ * provide a charset safe conversion to a string, and is not intended to be secure.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return the decoded principal's authentication token
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setConnectorInfo(Class, Configuration, String, AuthenticationToken)
+ */
+ @Deprecated
+ public static byte[] getToken(Class<?> implementingClass, Configuration conf) {
+ return AuthenticationTokenSerializer.serialize(org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getAuthenticationToken(
+ implementingClass, conf));
+ }
+
+ /**
+ * Configures a {@link ZooKeeperInstance} for this job.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param instanceName
+ * the Accumulo instance name
+ * @param zooKeepers
+ * a comma-separated list of zookeeper servers
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setZooKeeperInstance(Class<?> implementingClass, Configuration conf, String instanceName, String zooKeepers) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.setZooKeeperInstance(implementingClass, conf,
+ new ClientConfiguration().withInstance(instanceName).withZkHosts(zooKeepers));
+ }
+
+ /**
+ * Configures a {@link MockInstance} for this job.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param instanceName
+ * the Accumulo instance name
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setMockInstance(Class<?> implementingClass, Configuration conf, String instanceName) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.setMockInstance(implementingClass, conf, instanceName);
+ }
+
+ /**
+ * Initializes an Accumulo {@link Instance} based on the configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return an Accumulo instance
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setZooKeeperInstance(Class, Configuration, String, String)
+ * @see #setMockInstance(Class, Configuration, String)
+ */
+ @Deprecated
+ public static Instance getInstance(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getInstance(implementingClass, conf);
+ }
+
+ /**
+ * Sets the log level for this job.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param level
+ * the logging level
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setLogLevel(Class<?> implementingClass, Configuration conf, Level level) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.setLogLevel(implementingClass, conf, level);
+ }
+
+ /**
+ * Gets the log level from this configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return the log level
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setLogLevel(Class, Configuration, Level)
+ */
+ @Deprecated
+ public static Level getLogLevel(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.ConfiguratorBase.getLogLevel(implementingClass, conf);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/88132508/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java
new file mode 100644
index 0000000..d43ecda
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/FileOutputConfigurator.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce.lib.util;
+
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+@Deprecated
+public class FileOutputConfigurator extends ConfiguratorBase {
+
+ /**
+ * Configuration keys for {@link AccumuloConfiguration}.
+ *
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static enum Opts {
+ ACCUMULO_PROPERTIES;
+ }
+
+ /**
+ * The supported Accumulo properties we set in this OutputFormat, that change the behavior of the RecordWriter.<br />
+ * These properties correspond to the supported public static setter methods available to this class.
+ *
+ * @param property
+ * the Accumulo property to check
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ protected static Boolean isSupportedAccumuloProperty(Property property) {
+ switch (property) {
+ case TABLE_FILE_COMPRESSION_TYPE:
+ case TABLE_FILE_COMPRESSED_BLOCK_SIZE:
+ case TABLE_FILE_BLOCK_SIZE:
+ case TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX:
+ case TABLE_FILE_REPLICATION:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /**
+ * This helper method provides an AccumuloConfiguration object constructed from the Accumulo defaults, and overridden with Accumulo properties that have been
+ * stored in the Job's configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static AccumuloConfiguration getAccumuloConfiguration(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.getAccumuloConfiguration(implementingClass, conf);
+ }
+
+ /**
+ * Sets the compression type to use for data blocks. Specifying a compression may require additional libraries to be available to your Job.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param compressionType
+ * one of "none", "gz", "lzo", or "snappy"
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setCompressionType(Class<?> implementingClass, Configuration conf, String compressionType) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setCompressionType(implementingClass, conf, compressionType);
+ }
+
+ /**
+ * Sets the size for data blocks within each file.<br />
+ * Data blocks are a span of key/value pairs stored in the file that are compressed and indexed as a group.
+ *
+ * <p>
+ * Making this value smaller may increase seek performance, but at the cost of increasing the size of the indexes (which can also affect seek performance).
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param dataBlockSize
+ * the block size, in bytes
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setDataBlockSize(Class<?> implementingClass, Configuration conf, long dataBlockSize) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setDataBlockSize(implementingClass, conf, dataBlockSize);
+ }
+
+ /**
+ * Sets the size for file blocks in the file system; file blocks are managed, and replicated, by the underlying file system.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param fileBlockSize
+ * the block size, in bytes
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setFileBlockSize(Class<?> implementingClass, Configuration conf, long fileBlockSize) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setFileBlockSize(implementingClass, conf, fileBlockSize);
+ }
+
+ /**
+ * Sets the size for index blocks within each file; smaller blocks means a deeper index hierarchy within the file, while larger blocks mean a more shallow
+ * index hierarchy within the file. This can affect the performance of queries.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param indexBlockSize
+ * the block size, in bytes
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setIndexBlockSize(Class<?> implementingClass, Configuration conf, long indexBlockSize) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setIndexBlockSize(implementingClass, conf, indexBlockSize);
+ }
+
+ /**
+ * Sets the file system replication factor for the resulting file, overriding the file system default.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param replication
+ * the number of replicas for produced files
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setReplication(Class<?> implementingClass, Configuration conf, int replication) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.FileOutputConfigurator.setReplication(implementingClass, conf, replication);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/88132508/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
new file mode 100644
index 0000000..8d0c4b1
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
@@ -0,0 +1,462 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce.lib.util;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.ClientSideIteratorScanner;
+import org.apache.accumulo.core.client.IsolatedScanner;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.TabletLocator;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+
+/**
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+@Deprecated
+public class InputConfigurator extends ConfiguratorBase {
+
+ /**
+ * Configuration keys for {@link Scanner}.
+ *
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static enum ScanOpts {
+ TABLE_NAME, AUTHORIZATIONS, RANGES, COLUMNS, ITERATORS
+ }
+
+ /**
+ * Configuration keys for various features.
+ *
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static enum Features {
+ AUTO_ADJUST_RANGES, SCAN_ISOLATION, USE_LOCAL_ITERATORS, SCAN_OFFLINE
+ }
+
+ /**
+ * Sets the name of the input table, over which this job will scan.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param tableName
+ * the table to use when the tablename is null in the write call
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setInputTableName(Class<?> implementingClass, Configuration conf, String tableName) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.setInputTableName(implementingClass, conf, tableName);
+ }
+
+ /**
+ * Gets the table name from the configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return the table name
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setInputTableName(Class, Configuration, String)
+ */
+ @Deprecated
+ public static String getInputTableName(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.getInputTableName(implementingClass, conf);
+ }
+
+ /**
+ * Sets the {@link Authorizations} used to scan. Must be a subset of the user's authorization. Defaults to the empty set.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param auths
+ * the user's authorizations
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setScanAuthorizations(Class<?> implementingClass, Configuration conf, Authorizations auths) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.setScanAuthorizations(implementingClass, conf, auths);
+ }
+
+ /**
+ * Gets the authorizations to set for the scans from the configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return the Accumulo scan authorizations
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setScanAuthorizations(Class, Configuration, Authorizations)
+ */
+ @Deprecated
+ public static Authorizations getScanAuthorizations(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.getScanAuthorizations(implementingClass, conf);
+ }
+
+ /**
+ * Sets the input ranges to scan for this job. If not set, the entire table will be scanned.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param ranges
+ * the ranges that will be mapped over
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setRanges(Class<?> implementingClass, Configuration conf, Collection<Range> ranges) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.setRanges(implementingClass, conf, ranges);
+ }
+
+ /**
+ * Gets the ranges to scan over from a job.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return the ranges
+ * @throws IOException
+ * if the ranges have been encoded improperly
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setRanges(Class, Configuration, Collection)
+ */
+ @Deprecated
+ public static List<Range> getRanges(Class<?> implementingClass, Configuration conf) throws IOException {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.getRanges(implementingClass, conf);
+ }
+
+ /**
+ * Restricts the columns that will be mapped over for this job.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param columnFamilyColumnQualifierPairs
+ * a pair of {@link Text} objects corresponding to column family and column qualifier. If the column qualifier is null, the entire column family is
+ * selected. An empty set is the default and is equivalent to scanning the all columns.
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void fetchColumns(Class<?> implementingClass, Configuration conf, Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.fetchColumns(implementingClass, conf, columnFamilyColumnQualifierPairs);
+ }
+
+ /**
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ */
+ @Deprecated
+ public static String[] serializeColumns(Collection<Pair<Text,Text>> columnFamilyColumnQualifierPairs) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.serializeColumns(columnFamilyColumnQualifierPairs);
+ }
+
+ /**
+ * Gets the columns to be mapped over from this job.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return a set of columns
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #fetchColumns(Class, Configuration, Collection)
+ */
+ @Deprecated
+ public static Set<Pair<Text,Text>> getFetchedColumns(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.getFetchedColumns(implementingClass, conf);
+ }
+
+ /**
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ */
+ @Deprecated
+ public static Set<Pair<Text,Text>> deserializeFetchedColumns(Collection<String> serialized) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.deserializeFetchedColumns(serialized);
+ }
+
+ /**
+ * Encode an iterator on the input for this job.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param cfg
+ * the configuration of the iterator
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void addIterator(Class<?> implementingClass, Configuration conf, IteratorSetting cfg) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.addIterator(implementingClass, conf, cfg);
+ }
+
+ /**
+ * Gets a list of the iterator settings (for iterators to apply to a scanner) from this configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return a list of iterators
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #addIterator(Class, Configuration, IteratorSetting)
+ */
+ @Deprecated
+ public static List<IteratorSetting> getIterators(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.getIterators(implementingClass, conf);
+ }
+
+ /**
+ * Controls the automatic adjustment of ranges for this job. This feature merges overlapping ranges, then splits them to align with tablet boundaries.
+ * Disabling this feature will cause exactly one Map task to be created for each specified range. The default setting is enabled. *
+ *
+ * <p>
+ * By default, this feature is <b>enabled</b>.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param enableFeature
+ * the feature is enabled if true, disabled otherwise
+ * @see #setRanges(Class, Configuration, Collection)
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setAutoAdjustRanges(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.setAutoAdjustRanges(implementingClass, conf, enableFeature);
+ }
+
+ /**
+ * Determines whether a configuration has auto-adjust ranges enabled.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return false if the feature is disabled, true otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setAutoAdjustRanges(Class, Configuration, boolean)
+ */
+ @Deprecated
+ public static Boolean getAutoAdjustRanges(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.getAutoAdjustRanges(implementingClass, conf);
+ }
+
+ /**
+ * Controls the use of the {@link IsolatedScanner} in this job.
+ *
+ * <p>
+ * By default, this feature is <b>disabled</b>.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param enableFeature
+ * the feature is enabled if true, disabled otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setScanIsolation(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.setScanIsolation(implementingClass, conf, enableFeature);
+ }
+
+ /**
+ * Determines whether a configuration has isolation enabled.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return true if the feature is enabled, false otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setScanIsolation(Class, Configuration, boolean)
+ */
+ @Deprecated
+ public static Boolean isIsolated(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.isIsolated(implementingClass, conf);
+ }
+
+ /**
+ * Controls the use of the {@link ClientSideIteratorScanner} in this job. Enabling this feature will cause the iterator stack to be constructed within the Map
+ * task, rather than within the Accumulo TServer. To use this feature, all classes needed for those iterators must be available on the classpath for the task.
+ *
+ * <p>
+ * By default, this feature is <b>disabled</b>.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param enableFeature
+ * the feature is enabled if true, disabled otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setLocalIterators(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.setLocalIterators(implementingClass, conf, enableFeature);
+ }
+
+ /**
+ * Determines whether a configuration uses local iterators.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return true if the feature is enabled, false otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setLocalIterators(Class, Configuration, boolean)
+ */
+ @Deprecated
+ public static Boolean usesLocalIterators(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.usesLocalIterators(implementingClass, conf);
+ }
+
+ /**
+ * <p>
+ * Enable reading offline tables. By default, this feature is disabled and only online tables are scanned. This will make the map reduce job directly read the
+ * table's files. If the table is not offline, then the job will fail. If the table comes online during the map reduce job, it is likely that the job will
+ * fail.
+ *
+ * <p>
+ * To use this option, the map reduce user will need access to read the Accumulo directory in HDFS.
+ *
+ * <p>
+ * Reading the offline table will create the scan time iterator stack in the map process. So any iterators that are configured for the table will need to be
+ * on the mapper's classpath. The accumulo-site.xml may need to be on the mapper's classpath if HDFS or the Accumulo directory in HDFS are non-standard.
+ *
+ * <p>
+ * One way to use this feature is to clone a table, take the clone offline, and use the clone as the input table for a map reduce job. If you plan to map
+ * reduce over the data many times, it may be better to the compact the table, clone it, take it offline, and use the clone for all map reduce jobs. The
+ * reason to do this is that compaction will reduce each tablet in the table to one file, and it is faster to read from one file.
+ *
+ * <p>
+ * There are two possible advantages to reading a tables file directly out of HDFS. First, you may see better read performance. Second, it will support
+ * speculative execution better. When reading an online table speculative execution can put more load on an already slow tablet server.
+ *
+ * <p>
+ * By default, this feature is <b>disabled</b>.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param enableFeature
+ * the feature is enabled if true, disabled otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setOfflineTableScan(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.setOfflineTableScan(implementingClass, conf, enableFeature);
+ }
+
+ /**
+ * Determines whether a configuration has the offline table scan feature enabled.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return true if the feature is enabled, false otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setOfflineTableScan(Class, Configuration, boolean)
+ */
+ @Deprecated
+ public static Boolean isOfflineScan(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.isOfflineScan(implementingClass, conf);
+ }
+
+ /**
+ * Initializes an Accumulo {@link TabletLocator} based on the configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return an Accumulo tablet locator
+ * @throws TableNotFoundException
+ * if the table name set on the configuration doesn't exist
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static TabletLocator getTabletLocator(Class<?> implementingClass, Configuration conf) throws TableNotFoundException {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.getTabletLocator(implementingClass, conf,
+ Tables.getTableId(getInstance(implementingClass, conf), getInputTableName(implementingClass, conf)));
+ }
+
+ // InputFormat doesn't have the equivalent of OutputFormat's checkOutputSpecs(JobContext job)
+ /**
+ * Check whether a configuration is fully configured to be used with an Accumulo {@link org.apache.hadoop.mapreduce.InputFormat}.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @throws IOException
+ * if the context is improperly configured
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void validateOptions(Class<?> implementingClass, Configuration conf) throws IOException {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.validateOptions(implementingClass, conf);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/88132508/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/OutputConfigurator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/OutputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/OutputConfigurator.java
new file mode 100644
index 0000000..39163a6
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/OutputConfigurator.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.mapreduce.lib.util;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+@Deprecated
+public class OutputConfigurator extends ConfiguratorBase {
+
+ /**
+ * Configuration keys for {@link BatchWriter}.
+ *
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static enum WriteOpts {
+ DEFAULT_TABLE_NAME, BATCH_WRITER_CONFIG
+ }
+
+ /**
+ * Configuration keys for various features.
+ *
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static enum Features {
+ CAN_CREATE_TABLES, SIMULATION_MODE
+ }
+
+ /**
+ * Sets the default table name to use if one emits a null in place of a table name for a given mutation. Table names can only be alpha-numeric and
+ * underscores.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param tableName
+ * the table to use when the tablename is null in the write call
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setDefaultTableName(Class<?> implementingClass, Configuration conf, String tableName) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator.setDefaultTableName(implementingClass, conf, tableName);
+ }
+
+ /**
+ * Gets the default table name from the configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return the default table name
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setDefaultTableName(Class, Configuration, String)
+ */
+ @Deprecated
+ public static String getDefaultTableName(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator.getDefaultTableName(implementingClass, conf);
+ }
+
+ /**
+ * Sets the configuration for for the job's {@link BatchWriter} instances. If not set, a new {@link BatchWriterConfig}, with sensible built-in defaults is
+ * used. Setting the configuration multiple times overwrites any previous configuration.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param bwConfig
+ * the configuration for the {@link BatchWriter}
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setBatchWriterOptions(Class<?> implementingClass, Configuration conf, BatchWriterConfig bwConfig) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator.setBatchWriterOptions(implementingClass, conf, bwConfig);
+ }
+
+ /**
+ * Gets the {@link BatchWriterConfig} settings.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return the configuration object
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setBatchWriterOptions(Class, Configuration, BatchWriterConfig)
+ */
+ @Deprecated
+ public static BatchWriterConfig getBatchWriterOptions(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator.getBatchWriterOptions(implementingClass, conf);
+ }
+
+ /**
+ * Sets the directive to create new tables, as necessary. Table names can only be alpha-numeric and underscores.
+ *
+ * <p>
+ * By default, this feature is <b>disabled</b>.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param enableFeature
+ * the feature is enabled if true, disabled otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setCreateTables(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator.setCreateTables(implementingClass, conf, enableFeature);
+ }
+
+ /**
+ * Determines whether tables are permitted to be created as needed.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return true if the feature is disabled, false otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setCreateTables(Class, Configuration, boolean)
+ */
+ @Deprecated
+ public static Boolean canCreateTables(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator.canCreateTables(implementingClass, conf);
+ }
+
+ /**
+ * Sets the directive to use simulation mode for this job. In simulation mode, no output is produced. This is useful for testing.
+ *
+ * <p>
+ * By default, this feature is <b>disabled</b>.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @param enableFeature
+ * the feature is enabled if true, disabled otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ */
+ @Deprecated
+ public static void setSimulationMode(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
+ org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator.setSimulationMode(implementingClass, conf, enableFeature);
+ }
+
+ /**
+ * Determines whether this feature is enabled.
+ *
+ * @param implementingClass
+ * the class whose name will be used as a prefix for the property configuration key
+ * @param conf
+ * the Hadoop configuration object to configure
+ * @return true if the feature is enabled, false otherwise
+ * @deprecated since 1.6.0; Configure your job with the appropriate InputFormat or OutputFormat.
+ * @since 1.5.0
+ * @see #setSimulationMode(Class, Configuration, boolean)
+ */
+ @Deprecated
+ public static Boolean getSimulationMode(Class<?> implementingClass, Configuration conf) {
+ return org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator.getSimulationMode(implementingClass, conf);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/88132508/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/package-info.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/package-info.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/package-info.java
new file mode 100644
index 0000000..269ffea
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @deprecated since 1.6.0; This package was moved out of the public API.
+ * @since 1.5.0
+ */
+package org.apache.accumulo.core.client.mapreduce.lib.util;
+