You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2016/10/12 20:12:18 UTC

[01/52] [abbrv] hadoop git commit: HDFS-10957. Retire BKJM from trunk (Vinayakumar B)

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13037 846ada2de -> 6476934ae


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java
deleted file mode 100644
index 169a8a8..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.bookkeeper.util.LocalBookKeeper;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.server.NIOServerCnxnFactory;
-import org.apache.zookeeper.server.ZooKeeperServer;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests that read, update, clear api from CurrentInprogress
- */
-public class TestCurrentInprogress {
-  private static final Log LOG = LogFactory.getLog(TestCurrentInprogress.class);
-  private static final String CURRENT_NODE_PATH = "/test";
-  private static final String HOSTPORT = "127.0.0.1:2181";
-  private static final int CONNECTION_TIMEOUT = 30000;
-  private static NIOServerCnxnFactory serverFactory;
-  private static ZooKeeperServer zks;
-  private static ZooKeeper zkc;
-  private static int ZooKeeperDefaultPort = 2181;
-  private static File zkTmpDir;
-
-  private static ZooKeeper connectZooKeeper(String ensemble)
-      throws IOException, KeeperException, InterruptedException {
-    final CountDownLatch latch = new CountDownLatch(1);
-
-    ZooKeeper zkc = new ZooKeeper(HOSTPORT, 3600, new Watcher() {
-      public void process(WatchedEvent event) {
-        if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
-          latch.countDown();
-        }
-      }
-    });
-    if (!latch.await(10, TimeUnit.SECONDS)) {
-      throw new IOException("Zookeeper took too long to connect");
-    }
-    return zkc;
-  }
-
-  @BeforeClass
-  public static void setupZooKeeper() throws Exception {
-    LOG.info("Starting ZK server");
-    zkTmpDir = File.createTempFile("zookeeper", "test");
-    zkTmpDir.delete();
-    zkTmpDir.mkdir();
-    try {
-      zks = new ZooKeeperServer(zkTmpDir, zkTmpDir, ZooKeeperDefaultPort);
-      serverFactory = new NIOServerCnxnFactory();
-      serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), 10);
-      serverFactory.startup(zks);
-    } catch (Exception e) {
-      LOG.error("Exception while instantiating ZooKeeper", e);
-    }
-    boolean b = LocalBookKeeper.waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT);
-    LOG.debug("ZooKeeper server up: " + b);
-  }
-
-  @AfterClass
-  public static void shutDownServer() {
-    if (null != zks) {
-      zks.shutdown();
-    }
-    zkTmpDir.delete();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    zkc = connectZooKeeper(HOSTPORT);
-  }
-
-  @After
-  public void teardown() throws Exception {
-    if (null != zkc) {
-      zkc.close();
-    }
-
-  }
-
-  /**
-   * Tests that read should be able to read the data which updated with update
-   * api
-   */
-  @Test
-  public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
-    String data = "inprogressNode";
-    CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
-    ci.init();
-    ci.update(data);
-    String inprogressNodePath = ci.read();
-    assertEquals("Not returning inprogressZnode", "inprogressNode",
-        inprogressNodePath);
-  }
-
-  /**
-   * Tests that read should return null if we clear the updated data in
-   * CurrentInprogress node
-   */
-  @Test
-  public void testReadShouldReturnNullAfterClear() throws Exception {
-    CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
-    ci.init();
-    ci.update("myInprogressZnode");
-    ci.read();
-    ci.clear();
-    String inprogressNodePath = ci.read();
-    assertEquals("Expecting null to be return", null, inprogressNodePath);
-  }
-
-  /**
-   * Tests that update should throw IOE, if version number modifies between read
-   * and update
-   */
-  @Test(expected = IOException.class)
-  public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead()
-      throws Exception {
-    CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
-    ci.init();
-    ci.update("myInprogressZnode");
-    assertEquals("Not returning myInprogressZnode", "myInprogressZnode", ci
-        .read());
-    // Updating data in-between to change the data to change the version number
-    ci.update("YourInprogressZnode");
-    ci.update("myInprogressZnode");
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
deleted file mode 100644
index 7a7af06..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
-
-/**
- * Utilities for testing edit logs
- */
-public class FSEditLogTestUtil {
-  private static OpInstanceCache cache = new OpInstanceCache();
-
-  public static FSEditLogOp getNoOpInstance() {
-    return FSEditLogOp.LogSegmentOp.getInstance(cache,
-        FSEditLogOpCodes.OP_END_LOG_SEGMENT);
-  }
-
-  public static long countTransactionsInStream(EditLogInputStream in) 
-      throws IOException {
-    FSEditLogLoader.EditLogValidation validation =
-        FSEditLogLoader.scanEditLog(in, Long.MAX_VALUE);
-    return (validation.getEndTxId() - in.getFirstTxId()) + 1;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
deleted file mode 100644
index 52aac43..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# 
-#
-
-#
-# Bookkeeper Journal Logging Configuration
-#
-
-# Format is "<default threshold> (, <appender>)+
-
-# DEFAULT: console appender only
-log4j.rootLogger=DEBUG, CONSOLE
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=INFO
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p - [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=DEBUG
-log4j.appender.ROLLINGFILE.File=hdfs-namenode.log
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p - [%t:%C{1}@%L] - %m%n
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
index b743233..5625244 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
@@ -38,7 +38,6 @@ HDFS High Availability
         * [Securing access to ZooKeeper](#Securing_access_to_ZooKeeper)
         * [Verifying automatic failover](#Verifying_automatic_failover)
     * [Automatic Failover FAQ](#Automatic_Failover_FAQ)
-    * [BookKeeper as a Shared storage (EXPERIMENTAL)](#BookKeeper_as_a_Shared_storage_EXPERIMENTAL)
 
 Purpose
 -------
@@ -572,116 +571,3 @@ Automatic Failover FAQ
     using the same `hdfs haadmin` command. It will perform a coordinated
     failover.
 
-BookKeeper as a Shared storage (EXPERIMENTAL)
----------------------------------------------
-
-One option for shared storage for the NameNode is BookKeeper. BookKeeper achieves high availability and strong durability guarantees by replicating edit log entries across multiple storage nodes. The edit log can be striped across the storage nodes for high performance. Fencing is supported in the protocol, i.e, BookKeeper will not allow two writers to write the single edit log.
-
-The meta data for BookKeeper is stored in ZooKeeper. In current HA architecture, a Zookeeper cluster is required for ZKFC. The same cluster can be for BookKeeper metadata.
-
-For more details on building a BookKeeper cluster, please refer to the [BookKeeper documentation](http://zookeeper.apache.org/bookkeeper/docs/trunk/bookkeeperConfig.html )
-
-The BookKeeperJournalManager is an implementation of the HDFS JournalManager interface, which allows custom write ahead logging implementations to be plugged into the HDFS NameNode.
-
-*   **BookKeeper Journal Manager**
-
-    To use BookKeeperJournalManager, add the following to hdfs-site.xml.
-
-            <property>
-              <name>dfs.namenode.shared.edits.dir</name>
-              <value>bookkeeper://zk1:2181;zk2:2181;zk3:2181/hdfsjournal</value>
-            </property>
-
-            <property>
-              <name>dfs.namenode.edits.journal-plugin.bookkeeper</name>
-              <value>org.apache.hadoop.contrib.bkjournal.BookKeeperJournalManager</value>
-            </property>
-
-    The URI format for bookkeeper is `bookkeeper://[zkEnsemble]/[rootZnode] [zookkeeper ensemble]`
-    is a list of semi-colon separated, zookeeper host:port
-    pairs. In the example above there are 3 servers, in the ensemble,
-    zk1, zk2 & zk3, each one listening on port 2181.
-
-    `[root znode]` is the path of the zookeeper znode, under which the edit log
-    information will be stored.
-
-    The class specified for the journal-plugin must be available in the NameNode's
-    classpath. We explain how to generate a jar file with the journal manager and
-    its dependencies, and how to put it into the classpath below.
-
-*   **More configuration options**
-
-    *   **dfs.namenode.bookkeeperjournal.output-buffer-size** -
-        Number of bytes a bookkeeper journal stream will buffer before
-        forcing a flush. Default is 1024.
-
-                   <property>
-                     <name>dfs.namenode.bookkeeperjournal.output-buffer-size</name>
-                     <value>1024</value>
-                   </property>
-
-    *   **dfs.namenode.bookkeeperjournal.ensemble-size** -
-    Number of bookkeeper servers in edit log ensembles. This
-    is the number of bookkeeper servers which need to be available
-    for the edit log to be writable. Default is 3.
-
-                   <property>
-                     <name>dfs.namenode.bookkeeperjournal.ensemble-size</name>
-                     <value>3</value>
-                   </property>
-
-    * **dfs.namenode.bookkeeperjournal.quorum-size** -
-    Number of bookkeeper servers in the write quorum. This is the
-    number of bookkeeper servers which must have acknowledged the
-    write of an entry before it is considered written. Default is 2.
-
-                   <property>
-                     <name>dfs.namenode.bookkeeperjournal.quorum-size</name>
-                     <value>2</value>
-                   </property>
-
-    * **dfs.namenode.bookkeeperjournal.digestPw** -
-    Password to use when creating edit log segments.
-
-                   <property>
-                    <name>dfs.namenode.bookkeeperjournal.digestPw</name>
-                    <value>myPassword</value>
-                   </property>
-
-    * **dfs.namenode.bookkeeperjournal.zk.session.timeout** -
-    Session timeout for Zookeeper client from BookKeeper Journal Manager.
-    Hadoop recommends that this value should be less than the ZKFC
-    session timeout value. Default value is 3000.
-
-                   <property>
-                     <name>dfs.namenode.bookkeeperjournal.zk.session.timeout</name>
-                     <value>3000</value>
-                   </property>
-
-*   **Building BookKeeper Journal Manager plugin jar**
-
-    To generate the distribution packages for BK journal, do the following.
-
-    $ mvn clean package -Pdist
-
-    This will generate a jar with the BookKeeperJournalManager,
-    hadoop-hdfs/src/contrib/bkjournal/target/hadoop-hdfs-bkjournal-*VERSION*.jar
-
-    Note that the -Pdist part of the build command is important, this would
-    copy the dependent bookkeeper-server jar under
-    hadoop-hdfs/src/contrib/bkjournal/target/lib.
-
-*   **Putting the BookKeeperJournalManager in the NameNode classpath**
-
-    To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal and
-    bookkeeper-server jar, mentioned above, into the lib directory of hdfs. In the
-    standard distribution of HDFS, this is at $HADOOP\_HDFS\_HOME/share/hadoop/hdfs/lib/
-
-    cp hadoop-hdfs/src/contrib/bkjournal/target/hadoop-hdfs-bkjournal-*VERSION*.jar $HADOOP\_HDFS\_HOME/share/hadoop/hdfs/lib/
-
-*   **Current limitations**
-
-    1) Security in BookKeeper. BookKeeper does not support SASL nor SSL for
-    connections between the NameNode and BookKeeper storage nodes.
-
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml
index 4bb0545..a0993c9 100644
--- a/hadoop-hdfs-project/pom.xml
+++ b/hadoop-hdfs-project/pom.xml
@@ -35,7 +35,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <module>hadoop-hdfs-client</module>
     <module>hadoop-hdfs-native-client</module>
     <module>hadoop-hdfs-httpfs</module>
-    <module>hadoop-hdfs/src/contrib/bkjournal</module>
     <module>hadoop-hdfs-nfs</module>
   </modules>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 26ac16c..58abaf3 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -930,12 +930,6 @@
         </exclusions>
       </dependency>
       <dependency>
-        <groupId>org.apache.bookkeeper</groupId>
-        <artifactId>bookkeeper-server</artifactId>
-        <version>4.2.3</version>
-        <scope>compile</scope>
-      </dependency>
-      <dependency>
         <groupId>org.hsqldb</groupId>
         <artifactId>hsqldb</artifactId>
         <version>2.0.0</version>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/52] [abbrv] hadoop git commit: HDFS-10957. Retire BKJM from trunk (Vinayakumar B)

Posted by cn...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java
deleted file mode 100644
index b1fc3d7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/BKJMUtil.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import static org.junit.Assert.*;
-
-import java.net.URI;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.KeeperException;
-
-import org.apache.bookkeeper.proto.BookieServer;
-import org.apache.bookkeeper.conf.ServerConfiguration;
-import org.apache.bookkeeper.util.LocalBookKeeper;
-
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.List;
-
-import java.io.IOException;
-import java.io.File;
-
-/**
- * Utility class for setting up bookkeeper ensembles
- * and bringing individual bookies up and down
- */
-class BKJMUtil {
-  protected static final Log LOG = LogFactory.getLog(BKJMUtil.class);
-
-  int nextPort = 6000; // next port for additionally created bookies
-  private Thread bkthread = null;
-  private final static String zkEnsemble = "127.0.0.1:2181";
-  int numBookies;
-
-  BKJMUtil(final int numBookies) throws Exception {
-    this.numBookies = numBookies;
-
-    bkthread = new Thread() {
-        public void run() {
-          try {
-            String[] args = new String[1];
-            args[0] = String.valueOf(numBookies);
-            LOG.info("Starting bk");
-            LocalBookKeeper.main(args);
-          } catch (InterruptedException e) {
-            // go away quietly
-          } catch (Exception e) {
-            LOG.error("Error starting local bk", e);
-          }
-        }
-      };
-  }
-
-  void start() throws Exception {
-    bkthread.start();
-    if (!LocalBookKeeper.waitForServerUp(zkEnsemble, 10000)) {
-      throw new Exception("Error starting zookeeper/bookkeeper");
-    }
-    assertEquals("Not all bookies started",
-                 numBookies, checkBookiesUp(numBookies, 10));
-  }
-
-  void teardown() throws Exception {
-    if (bkthread != null) {
-      bkthread.interrupt();
-      bkthread.join();
-    }
-  }
-
-  static ZooKeeper connectZooKeeper()
-      throws IOException, KeeperException, InterruptedException {
-    final CountDownLatch latch = new CountDownLatch(1);
-
-    ZooKeeper zkc = new ZooKeeper(zkEnsemble, 3600, new Watcher() {
-        public void process(WatchedEvent event) {
-          if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
-            latch.countDown();
-          }
-        }
-      });
-    if (!latch.await(3, TimeUnit.SECONDS)) {
-      throw new IOException("Zookeeper took too long to connect");
-    }
-    return zkc;
-  }
-
-  static URI createJournalURI(String path) throws Exception {
-    return URI.create("bookkeeper://" + zkEnsemble + path);
-  }
-
-  static void addJournalManagerDefinition(Configuration conf) {
-    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".bookkeeper",
-             "org.apache.hadoop.contrib.bkjournal.BookKeeperJournalManager");
-  }
-
-  BookieServer newBookie() throws Exception {
-    int port = nextPort++;
-    ServerConfiguration bookieConf = new ServerConfiguration();
-    bookieConf.setBookiePort(port);
-    File tmpdir = File.createTempFile("bookie" + Integer.toString(port) + "_",
-                                      "test");
-    tmpdir.delete();
-    tmpdir.mkdir();
-
-    bookieConf.setZkServers(zkEnsemble);
-    bookieConf.setJournalDirName(tmpdir.getPath());
-    bookieConf.setLedgerDirNames(new String[] { tmpdir.getPath() });
-
-    BookieServer b = new BookieServer(bookieConf);
-    b.start();
-    for (int i = 0; i < 10 && !b.isRunning(); i++) {
-      Thread.sleep(10000);
-    }
-    if (!b.isRunning()) {
-      throw new IOException("Bookie would not start");
-    }
-    return b;
-  }
-
-  /**
-   * Check that a number of bookies are available
-   * @param count number of bookies required
-   * @param timeout number of seconds to wait for bookies to start
-   * @throws IOException if bookies are not started by the time the timeout hits
-   */
-  int checkBookiesUp(int count, int timeout) throws Exception {
-    ZooKeeper zkc = connectZooKeeper();
-    try {
-      int mostRecentSize = 0;
-      for (int i = 0; i < timeout; i++) {
-        try {
-          List<String> children = zkc.getChildren("/ledgers/available",
-                                                  false);
-          mostRecentSize = children.size();
-          // Skip 'readonly znode' which is used for keeping R-O bookie details
-          if (children.contains("readonly")) {
-            mostRecentSize = children.size() - 1;
-          }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Found " + mostRecentSize + " bookies up, "
-                      + "waiting for " + count);
-            if (LOG.isTraceEnabled()) {
-              for (String child : children) {
-                LOG.trace(" server: " + child);
-              }
-            }
-          }
-          if (mostRecentSize == count) {
-            break;
-          }
-        } catch (KeeperException e) {
-          // ignore
-        }
-        Thread.sleep(1000);
-      }
-      return mostRecentSize;
-    } finally {
-      zkc.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
deleted file mode 100644
index ff8c00d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
+++ /dev/null
@@ -1,414 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import static org.junit.Assert.*;
-
-import org.junit.Test;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ha.ServiceFailedException;
-import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
-import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-
-import org.apache.hadoop.hdfs.HAUtil;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-
-import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-
-import org.apache.hadoop.ipc.RemoteException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.ExitUtil;
-import org.apache.hadoop.util.ExitUtil.ExitException;
-
-import org.apache.bookkeeper.proto.BookieServer;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Collection;
-
-/**
- * Integration test to ensure that the BookKeeper JournalManager
- * works for HDFS Namenode HA
- */
-@RunWith(Parameterized.class)
-public class TestBookKeeperAsHASharedDir {
-  static final Log LOG = LogFactory.getLog(TestBookKeeperAsHASharedDir.class);
-
-  private static BKJMUtil bkutil;
-  static int numBookies = 3;
-
-  private static final String TEST_FILE_DATA = "HA BookKeeperJournalManager";
-
-  @Parameters
-  public static Collection<Object[]> data() {
-    Collection<Object[]> params = new ArrayList<Object[]>();
-    params.add(new Object[]{ Boolean.FALSE });
-    params.add(new Object[]{ Boolean.TRUE });
-    return params;
-  }
-
-  private static boolean useAsyncEditLog;
-  public TestBookKeeperAsHASharedDir(Boolean async) {
-    useAsyncEditLog = async;
-  }
-
-  private static Configuration getConf() {
-    Configuration conf = new Configuration();
-    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_ASYNC_LOGGING,
-        useAsyncEditLog);
-    return conf;
-  }
-
-  @BeforeClass
-  public static void setupBookkeeper() throws Exception {
-    bkutil = new BKJMUtil(numBookies);
-    bkutil.start();
-  }
-  
-  @Before
-  public void clearExitStatus() {
-    ExitUtil.resetFirstExitException();
-  }
-
-  @AfterClass
-  public static void teardownBookkeeper() throws Exception {
-    bkutil.teardown();
-  }
-
-  /**
-   * Test simple HA failover usecase with BK
-   */
-  @Test
-  public void testFailoverWithBK() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = getConf();
-      conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
-               BKJMUtil.createJournalURI("/hotfailover").toString());
-      BKJMUtil.addJournalManagerDefinition(conf);
-
-      cluster = new MiniDFSCluster.Builder(conf)
-        .nnTopology(MiniDFSNNTopology.simpleHATopology())
-        .numDataNodes(0)
-        .manageNameDfsSharedDirs(false)
-        .build();
-      NameNode nn1 = cluster.getNameNode(0);
-      NameNode nn2 = cluster.getNameNode(1);
-
-      cluster.waitActive();
-      cluster.transitionToActive(0);
-
-      Path p = new Path("/testBKJMfailover");
-
-      FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
-
-      fs.mkdirs(p);
-      cluster.shutdownNameNode(0);
-
-      cluster.transitionToActive(1);
-
-      assertTrue(fs.exists(p));
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-  /**
-   * Test HA failover, where BK, as the shared storage, fails.
-   * Once it becomes available again, a standby can come up.
-   * Verify that any write happening after the BK fail is not
-   * available on the standby.
-   */
-  @Test
-  public void testFailoverWithFailingBKCluster() throws Exception {
-    int ensembleSize = numBookies + 1;
-    BookieServer newBookie = bkutil.newBookie();
-    assertEquals("New bookie didn't start",
-                 ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
-
-    BookieServer replacementBookie = null;
-
-    MiniDFSCluster cluster = null;
-
-    try {
-      Configuration conf = getConf();
-      conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
-               BKJMUtil.createJournalURI("/hotfailoverWithFail").toString());
-      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
-                  ensembleSize);
-      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,
-                  ensembleSize);
-      BKJMUtil.addJournalManagerDefinition(conf);
-
-      cluster = new MiniDFSCluster.Builder(conf)
-        .nnTopology(MiniDFSNNTopology.simpleHATopology())
-        .numDataNodes(0)
-        .manageNameDfsSharedDirs(false)
-        .checkExitOnShutdown(false)
-        .build();
-      NameNode nn1 = cluster.getNameNode(0);
-      NameNode nn2 = cluster.getNameNode(1);
-
-      cluster.waitActive();
-      cluster.transitionToActive(0);
-
-      Path p1 = new Path("/testBKJMFailingBKCluster1");
-      Path p2 = new Path("/testBKJMFailingBKCluster2");
-
-      FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
-
-      fs.mkdirs(p1);
-      newBookie.shutdown(); // will take down shared storage
-      assertEquals("New bookie didn't stop",
-                   numBookies, bkutil.checkBookiesUp(numBookies, 10));
-
-      try {
-        fs.mkdirs(p2);
-        fail("mkdirs should result in the NN exiting");
-      } catch (RemoteException re) {
-        assertTrue(re.getClassName().contains("ExitException"));
-      }
-      cluster.shutdownNameNode(0);
-
-      try {
-        cluster.transitionToActive(1);
-        fail("Shouldn't have been able to transition with bookies down");
-      } catch (ExitException ee) {
-        assertTrue("Should shutdown due to required journal failure",
-            ee.getMessage().contains(
-                "starting log segment 3 failed for required journal"));
-      }
-
-      replacementBookie = bkutil.newBookie();
-      assertEquals("Replacement bookie didn't start",
-                   ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
-      cluster.transitionToActive(1); // should work fine now
-
-      assertTrue(fs.exists(p1));
-      assertFalse(fs.exists(p2));
-    } finally {
-      newBookie.shutdown();
-      if (replacementBookie != null) {
-        replacementBookie.shutdown();
-      }
-
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-  /**
-   * Test that two namenodes can't continue as primary
-   */
-  @Test
-  public void testMultiplePrimariesStarted() throws Exception {
-    Path p1 = new Path("/testBKJMMultiplePrimary");
-
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = getConf();
-      conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
-               BKJMUtil.createJournalURI("/hotfailoverMultiple").toString());
-      BKJMUtil.addJournalManagerDefinition(conf);
-
-      cluster = new MiniDFSCluster.Builder(conf)
-        .nnTopology(MiniDFSNNTopology.simpleHATopology())
-        .numDataNodes(0)
-        .manageNameDfsSharedDirs(false)
-        .checkExitOnShutdown(false)
-        .build();
-      NameNode nn1 = cluster.getNameNode(0);
-      NameNode nn2 = cluster.getNameNode(1);
-      cluster.waitActive();
-      cluster.transitionToActive(0);
-
-      FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
-      fs.mkdirs(p1);
-      nn1.getRpcServer().rollEditLog();
-      cluster.transitionToActive(1);
-      fs = cluster.getFileSystem(0); // get the older active server.
-
-      try {
-        System.out.println("DMS: > *************");
-        boolean foo = fs.delete(p1, true);
-        System.out.println("DMS: < ************* "+foo);
-        fail("Log update on older active should cause it to exit");
-      } catch (RemoteException re) {
-        assertTrue(re.getClassName().contains("ExitException"));
-      }
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-  
-  /**
-   * Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
-   * the edits log segments to new bkjm shared edits.
-   * 
-   * @throws Exception
-   */
-  @Test
-  public void testInitializeBKSharedEdits() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = getConf();
-      HAUtil.setAllowStandbyReads(conf, true);
-
-      MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
-      cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
-          .numDataNodes(0).build();
-      cluster.waitActive();
-      // Shutdown and clear the current filebased shared dir.
-      cluster.shutdownNameNodes();
-      File shareddir = new File(cluster.getSharedEditsDir(0, 1));
-      assertTrue("Initial Shared edits dir not fully deleted",
-          FileUtil.fullyDelete(shareddir));
-
-      // Check namenodes should not start without shared dir.
-      assertCanNotStartNamenode(cluster, 0);
-      assertCanNotStartNamenode(cluster, 1);
-
-      // Configure bkjm as new shared edits dir in both namenodes
-      Configuration nn1Conf = cluster.getConfiguration(0);
-      Configuration nn2Conf = cluster.getConfiguration(1);
-      nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
-          .createJournalURI("/initializeSharedEdits").toString());
-      nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
-          .createJournalURI("/initializeSharedEdits").toString());
-      BKJMUtil.addJournalManagerDefinition(nn1Conf);
-      BKJMUtil.addJournalManagerDefinition(nn2Conf);
-
-      // Initialize the BKJM shared edits.
-      assertFalse(NameNode.initializeSharedEdits(nn1Conf));
-
-      // NameNode should be able to start and should be in sync with BKJM as
-      // shared dir
-      assertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-
-  private void assertCanNotStartNamenode(MiniDFSCluster cluster, int nnIndex) {
-    try {
-      cluster.restartNameNode(nnIndex, false);
-      fail("Should not have been able to start NN" + (nnIndex)
-          + " without shared dir");
-    } catch (IOException ioe) {
-      LOG.info("Got expected exception", ioe);
-      GenericTestUtils.assertExceptionContains(
-          "storage directory does not exist or is not accessible", ioe);
-    }
-  }
-
-  private void assertCanStartHANameNodes(MiniDFSCluster cluster,
-      Configuration conf, String path) throws ServiceFailedException,
-      IOException, URISyntaxException, InterruptedException {
-    // Now should be able to start both NNs. Pass "false" here so that we don't
-    // try to waitActive on all NNs, since the second NN doesn't exist yet.
-    cluster.restartNameNode(0, false);
-    cluster.restartNameNode(1, true);
-
-    // Make sure HA is working.
-    cluster
-        .getNameNode(0)
-        .getRpcServer()
-        .transitionToActive(
-            new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
-    FileSystem fs = null;
-    try {
-      Path newPath = new Path(path);
-      fs = HATestUtil.configureFailoverFs(cluster, conf);
-      assertTrue(fs.mkdirs(newPath));
-      HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
-          cluster.getNameNode(1));
-      assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-          newPath.toString(), false).isDir());
-    } finally {
-      if (fs != null) {
-        fs.close();
-      }
-    }
-  }
-
-  /**
-   * NameNode should load the edits correctly if the applicable edits are
-   * present in the BKJM.
-   */
-  @Test
-  public void testNameNodeMultipleSwitchesUsingBKJM() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = getConf();
-      conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
-          .createJournalURI("/correctEditLogSelection").toString());
-      BKJMUtil.addJournalManagerDefinition(conf);
-
-      cluster = new MiniDFSCluster.Builder(conf)
-          .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
-          .manageNameDfsSharedDirs(false).build();
-      NameNode nn1 = cluster.getNameNode(0);
-      NameNode nn2 = cluster.getNameNode(1);
-      cluster.waitActive();
-      cluster.transitionToActive(0);
-      nn1.getRpcServer().rollEditLog(); // Roll Edits from current Active.
-      // Transition to standby current active gracefully.
-      cluster.transitionToStandby(0);
-      // Make the other Active and Roll edits multiple times
-      cluster.transitionToActive(1);
-      nn2.getRpcServer().rollEditLog();
-      nn2.getRpcServer().rollEditLog();
-      // Now One more failover. So NN1 should be able to failover successfully.
-      cluster.transitionToStandby(1);
-      cluster.transitionToActive(0);
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java
deleted file mode 100644
index f3f6ce5..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.Random;
-
-import org.apache.bookkeeper.util.LocalBookKeeper;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZKUtil;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.server.NIOServerCnxnFactory;
-import org.apache.zookeeper.server.ZooKeeperServer;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-
-public class TestBookKeeperConfiguration {
-  private static final Log LOG = LogFactory
-      .getLog(TestBookKeeperConfiguration.class);
-  private static final int ZK_SESSION_TIMEOUT = 5000;
-  private static final String HOSTPORT = "127.0.0.1:2181";
-  private static final int CONNECTION_TIMEOUT = 30000;
-  private static NIOServerCnxnFactory serverFactory;
-  private static ZooKeeperServer zks;
-  private static ZooKeeper zkc;
-  private static int ZooKeeperDefaultPort = 2181;
-  private static File ZkTmpDir;
-  private BookKeeperJournalManager bkjm;
-  private static final String BK_ROOT_PATH = "/ledgers";
-
-  private static ZooKeeper connectZooKeeper(String ensemble)
-      throws IOException, KeeperException, InterruptedException {
-    final CountDownLatch latch = new CountDownLatch(1);
-
-    ZooKeeper zkc = new ZooKeeper(HOSTPORT, ZK_SESSION_TIMEOUT, new Watcher() {
-      public void process(WatchedEvent event) {
-        if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
-          latch.countDown();
-        }
-      }
-    });
-    if (!latch.await(ZK_SESSION_TIMEOUT, TimeUnit.MILLISECONDS)) {
-      throw new IOException("Zookeeper took too long to connect");
-    }
-    return zkc;
-  }
-
-  private NamespaceInfo newNSInfo() {
-    Random r = new Random();
-    return new NamespaceInfo(r.nextInt(), "testCluster", "TestBPID", -1);
-  }
-
-  @BeforeClass
-  public static void setupZooKeeper() throws Exception {
-    // create a ZooKeeper server(dataDir, dataLogDir, port)
-    LOG.info("Starting ZK server");
-    ZkTmpDir = File.createTempFile("zookeeper", "test");
-    ZkTmpDir.delete();
-    ZkTmpDir.mkdir();
-
-    try {
-      zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperDefaultPort);
-      serverFactory = new NIOServerCnxnFactory();
-      serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), 10);
-      serverFactory.startup(zks);
-    } catch (Exception e) {
-      LOG.error("Exception while instantiating ZooKeeper", e);
-    }
-
-    boolean b = LocalBookKeeper.waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT);
-    LOG.debug("ZooKeeper server up: " + b);
-  }
-
-  @Before
-  public void setup() throws Exception {
-    zkc = connectZooKeeper(HOSTPORT);
-    try {
-      ZKUtil.deleteRecursive(zkc, BK_ROOT_PATH);
-    } catch (KeeperException.NoNodeException e) {
-      LOG.debug("Ignoring no node exception on cleanup", e);
-    } catch (Exception e) {
-      LOG.error("Exception when deleting bookie root path in zk", e);
-    }
-  }
-
-  @After
-  public void teardown() throws Exception {
-    if (null != zkc) {
-      zkc.close();
-    }
-    if (null != bkjm) {
-      bkjm.close();
-    }
-  }
-
-  @AfterClass
-  public static void teardownZooKeeper() throws Exception {
-    if (null != zkc) {
-      zkc.close();
-    }
-  }
-
-  /**
-   * Verify the BKJM is creating the bookie available path configured in
-   * 'dfs.namenode.bookkeeperjournal.zk.availablebookies'
-   */
-  @Test
-  public void testWithConfiguringBKAvailablePath() throws Exception {
-    // set Bookie available path in the configuration
-    String bkAvailablePath 
-      = BookKeeperJournalManager.BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT;
-    Configuration conf = new Configuration();
-    conf.setStrings(BookKeeperJournalManager.BKJM_ZK_LEDGERS_AVAILABLE_PATH,
-        bkAvailablePath);
-    Assert.assertNull(bkAvailablePath + " already exists", zkc.exists(
-        bkAvailablePath, false));
-    NamespaceInfo nsi = newNSInfo();
-    bkjm = new BookKeeperJournalManager(conf,
-        URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-WithBKPath"),
-        nsi);
-    bkjm.format(nsi);
-    Assert.assertNotNull("Bookie available path : " + bkAvailablePath
-        + " doesn't exists", zkc.exists(bkAvailablePath, false));
-  }
-
-  /**
-   * Verify the BKJM is creating the bookie available default path, when there
-   * is no 'dfs.namenode.bookkeeperjournal.zk.availablebookies' configured
-   */
-  @Test
-  public void testDefaultBKAvailablePath() throws Exception {
-    Configuration conf = new Configuration();
-    Assert.assertNull(BK_ROOT_PATH + " already exists", zkc.exists(
-        BK_ROOT_PATH, false));
-    NamespaceInfo nsi = newNSInfo();
-    bkjm = new BookKeeperJournalManager(conf,
-        URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-DefaultBKPath"),
-        nsi);
-    bkjm.format(nsi);
-    Assert.assertNotNull("Bookie available path : " + BK_ROOT_PATH
-        + " doesn't exists", zkc.exists(BK_ROOT_PATH, false));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java
deleted file mode 100644
index 52e4568..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperEditLogStreams.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.conf.ClientConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.zookeeper.ZooKeeper;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Unit test for the bkjm's streams
- */
-public class TestBookKeeperEditLogStreams {
-  static final Log LOG = LogFactory.getLog(TestBookKeeperEditLogStreams.class);
-
-  private static BKJMUtil bkutil;
-  private final static int numBookies = 3;
-
-  @BeforeClass
-  public static void setupBookkeeper() throws Exception {
-    bkutil = new BKJMUtil(numBookies);
-    bkutil.start();
-  }
-
-  @AfterClass
-  public static void teardownBookkeeper() throws Exception {
-    bkutil.teardown();
-  }
-
-  /**
-   * Test that bkjm will refuse open a stream on an empty
-   * ledger.
-   */
-  @Test
-  public void testEmptyInputStream() throws Exception {
-    ZooKeeper zk = BKJMUtil.connectZooKeeper();
-
-    BookKeeper bkc = new BookKeeper(new ClientConfiguration(), zk);
-    try {
-      LedgerHandle lh = bkc.createLedger(BookKeeper.DigestType.CRC32, "foobar"
-          .getBytes());
-      lh.close();
-
-      EditLogLedgerMetadata metadata = new EditLogLedgerMetadata("/foobar",
-          HdfsServerConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234);
-      try {
-        new BookKeeperEditLogInputStream(lh, metadata, -1);
-        fail("Shouldn't get this far, should have thrown");
-      } catch (IOException ioe) {
-        assertTrue(ioe.getMessage().contains("Invalid first bk entry to read"));
-      }
-
-      metadata = new EditLogLedgerMetadata("/foobar",
-          HdfsServerConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234);
-      try {
-        new BookKeeperEditLogInputStream(lh, metadata, 0);
-        fail("Shouldn't get this far, should have thrown");
-      } catch (IOException ioe) {
-        assertTrue(ioe.getMessage().contains("Invalid first bk entry to read"));
-      }
-    } finally {
-      bkc.close();
-      zk.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
deleted file mode 100644
index b8fc30d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
-import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-
-import java.net.BindException;
-import java.util.Random;
-
-/**
- * Runs the same tests as TestStandbyCheckpoints, but
- * using a bookkeeper journal manager as the shared directory
- */
-public class TestBookKeeperHACheckpoints extends TestStandbyCheckpoints {
-  //overwrite the nn count
- static{
-   TestStandbyCheckpoints.NUM_NNS = 2;
- }
-  private static BKJMUtil bkutil = null;
-  static int numBookies = 3;
-  static int journalCount = 0;
-  private final Random random = new Random();
-
-  private static final Log LOG = LogFactory.getLog(TestStandbyCheckpoints.class);
-
-  @SuppressWarnings("rawtypes")
-  @Override
-  @Before
-  public void setupCluster() throws Exception {
-    Configuration conf = setupCommonConfig();
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
-             BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
-             .toString());
-    BKJMUtil.addJournalManagerDefinition(conf);
-
-    int retryCount = 0;
-    while (true) {
-      try {
-        int basePort = 10060 + random.nextInt(100) * 2;
-        MiniDFSNNTopology topology = new MiniDFSNNTopology()
-          .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-            .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
-            .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
-
-        cluster = new MiniDFSCluster.Builder(conf)
-          .nnTopology(topology)
-          .numDataNodes(1)
-          .manageNameDfsSharedDirs(false)
-          .build();
-        cluster.waitActive();
-
-        setNNs();
-        fs = HATestUtil.configureFailoverFs(cluster, conf);
-
-        cluster.transitionToActive(0);
-        ++retryCount;
-        break;
-      } catch (BindException e) {
-        LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
-            + retryCount + " times");
-      }
-    }
-  }
-
-  @BeforeClass
-  public static void startBK() throws Exception {
-    journalCount = 0;
-    bkutil = new BKJMUtil(numBookies);
-    bkutil.start();
-  }
-
-  @AfterClass
-  public static void shutdownBK() throws Exception {
-    if (bkutil != null) {
-      bkutil.teardown();
-    }
-  }
-
-  @Override
-  public void testCheckpointCancellation() throws Exception {
-    // Overriden as the implementation in the superclass assumes that writes
-    // are to a file. This should be fixed at some point
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
deleted file mode 100644
index 07fcd72..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
+++ /dev/null
@@ -1,984 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.spy;
-import org.junit.Test;
-import org.junit.Before;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.conf.Configuration;
-
-import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
-import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
-import org.apache.hadoop.hdfs.server.namenode.JournalManager;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-
-import org.apache.bookkeeper.proto.BookieServer;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.ZooDefs.Ids;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-public class TestBookKeeperJournalManager {
-  static final Log LOG = LogFactory.getLog(TestBookKeeperJournalManager.class);
-  
-  private static final long DEFAULT_SEGMENT_SIZE = 1000;
-
-  protected static Configuration conf = new Configuration();
-  private ZooKeeper zkc;
-  private static BKJMUtil bkutil;
-  static int numBookies = 3;
-  private BookieServer newBookie;
-
-  @BeforeClass
-  public static void setupBookkeeper() throws Exception {
-    bkutil = new BKJMUtil(numBookies);
-    bkutil.start();
-  }
-
-  @AfterClass
-  public static void teardownBookkeeper() throws Exception {
-    bkutil.teardown();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    zkc = BKJMUtil.connectZooKeeper();
-  }
-
-  @After
-  public void teardown() throws Exception {
-    zkc.close();
-    if (newBookie != null) {
-      newBookie.shutdown();
-    }
-  }
-
-  private NamespaceInfo newNSInfo() {
-    Random r = new Random();
-    return new NamespaceInfo(r.nextInt(), "testCluster", "TestBPID", -1);
-  }
-
-  @Test
-  public void testSimpleWrite() throws Exception {
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi);
-    bkjm.format(nsi);
-
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    for (long i = 1 ; i <= 100; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(1, 100);
- 
-    String zkpath = bkjm.finalizedLedgerZNode(1, 100);
-    
-    assertNotNull(zkc.exists(zkpath, false));
-    assertNull(zkc.exists(bkjm.inprogressZNode(1), false));
-  }
-
-  @Test
-  public void testNumberOfTransactions() throws Exception {
-    NamespaceInfo nsi = newNSInfo();
-
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi);
-    bkjm.format(nsi);
-
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    for (long i = 1 ; i <= 100; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(1, 100);
-
-    long numTrans = bkjm.getNumberOfTransactions(1, true);
-    assertEquals(100, numTrans);
-  }
-
-  @Test 
-  public void testNumberOfTransactionsWithGaps() throws Exception {
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-gaps"), nsi);
-    bkjm.format(nsi);
-
-    long txid = 1;
-    for (long i = 0; i < 3; i++) {
-      long start = txid;
-      EditLogOutputStream out = bkjm.startLogSegment(start,
-          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-      for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
-        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-        op.setTransactionId(txid++);
-        out.write(op);
-      }
-      out.close();
-      bkjm.finalizeLogSegment(start, txid-1);
-      assertNotNull(
-          zkc.exists(bkjm.finalizedLedgerZNode(start, txid-1), false));
-    }
-    zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE+1,
-                                         DEFAULT_SEGMENT_SIZE*2), -1);
-    
-    long numTrans = bkjm.getNumberOfTransactions(1, true);
-    assertEquals(DEFAULT_SEGMENT_SIZE, numTrans);
-    
-    try {
-      numTrans = bkjm.getNumberOfTransactions(DEFAULT_SEGMENT_SIZE+1, true);
-      fail("Should have thrown corruption exception by this point");
-    } catch (JournalManager.CorruptionException ce) {
-      // if we get here, everything is going good
-    }
-
-    numTrans = bkjm.getNumberOfTransactions((DEFAULT_SEGMENT_SIZE*2)+1, true);
-    assertEquals(DEFAULT_SEGMENT_SIZE, numTrans);
-  }
-
-  @Test
-  public void testNumberOfTransactionsWithInprogressAtEnd() throws Exception {
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"), nsi);
-    bkjm.format(nsi);
-
-    long txid = 1;
-    for (long i = 0; i < 3; i++) {
-      long start = txid;
-      EditLogOutputStream out = bkjm.startLogSegment(start,
-          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-      for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
-        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-        op.setTransactionId(txid++);
-        out.write(op);
-      }
-      
-      out.close();
-      bkjm.finalizeLogSegment(start, (txid-1));
-      assertNotNull(
-          zkc.exists(bkjm.finalizedLedgerZNode(start, (txid-1)), false));
-    }
-    long start = txid;
-    EditLogOutputStream out = bkjm.startLogSegment(start,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE/2; j++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(txid++);
-      out.write(op);
-    }
-    out.setReadyToFlush();
-    out.flush();
-    out.abort();
-    out.close();
-    
-    long numTrans = bkjm.getNumberOfTransactions(1, true);
-    assertEquals((txid-1), numTrans);
-  }
-
-  /**
-   * Create a bkjm namespace, write a journal from txid 1, close stream.
-   * Try to create a new journal from txid 1. Should throw an exception.
-   */
-  @Test
-  public void testWriteRestartFrom1() throws Exception {
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"), nsi);
-    bkjm.format(nsi);
-
-    long txid = 1;
-    long start = txid;
-    EditLogOutputStream out = bkjm.startLogSegment(txid,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(txid++);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(start, (txid-1));
-    
-    txid = 1;
-    try {
-      out = bkjm.startLogSegment(txid,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-      fail("Shouldn't be able to start another journal from " + txid
-          + " when one already exists");
-    } catch (Exception ioe) {
-      LOG.info("Caught exception as expected", ioe);
-    }
-
-    // test border case
-    txid = DEFAULT_SEGMENT_SIZE;
-    try {
-      out = bkjm.startLogSegment(txid,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-      fail("Shouldn't be able to start another journal from " + txid
-          + " when one already exists");
-    } catch (IOException ioe) {
-      LOG.info("Caught exception as expected", ioe);
-    }
-
-    // open journal continuing from before
-    txid = DEFAULT_SEGMENT_SIZE + 1;
-    start = txid;
-    out = bkjm.startLogSegment(start,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    assertNotNull(out);
-
-    for (long j = 1 ; j <= DEFAULT_SEGMENT_SIZE; j++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(txid++);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(start, (txid-1));
-
-    // open journal arbitarily far in the future
-    txid = DEFAULT_SEGMENT_SIZE * 4;
-    out = bkjm.startLogSegment(txid,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    assertNotNull(out);
-  }
-
-  @Test
-  public void testTwoWriters() throws Exception {
-    long start = 1;
-    NamespaceInfo nsi = newNSInfo();
-
-    BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
-    bkjm1.format(nsi);
-
-    BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
-
-
-    EditLogOutputStream out1 = bkjm1.startLogSegment(start,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    try {
-      bkjm2.startLogSegment(start,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-      fail("Shouldn't have been able to open the second writer");
-    } catch (IOException ioe) {
-      LOG.info("Caught exception as expected", ioe);
-    }finally{
-      out1.close();
-    }
-  }
-
-  @Test
-  public void testSimpleRead() throws Exception {
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-simpleread"),
-        nsi);
-    bkjm.format(nsi);
-
-    final long numTransactions = 10000;
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
-    for (long i = 1 ; i <= numTransactions; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(1, numTransactions);
-
-    List<EditLogInputStream> in = new ArrayList<EditLogInputStream>();
-    bkjm.selectInputStreams(in, 1, true);
-    try {
-      assertEquals(numTransactions, 
-                   FSEditLogTestUtil.countTransactionsInStream(in.get(0)));
-    } finally {
-      in.get(0).close();
-    }
-  }
-
-  @Test
-  public void testSimpleRecovery() throws Exception {
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),
-        nsi);
-    bkjm.format(nsi);
-
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
-    for (long i = 1 ; i <= 100; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.setReadyToFlush();
-    out.flush();
-
-    out.abort();
-    out.close();
-
-
-    assertNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
-    assertNotNull(zkc.exists(bkjm.inprogressZNode(1), false));
-
-    bkjm.recoverUnfinalizedSegments();
-
-    assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(1, 100), false));
-    assertNull(zkc.exists(bkjm.inprogressZNode(1), false));
-  }
-
-  /**
-   * Test that if enough bookies fail to prevent an ensemble,
-   * writes the bookkeeper will fail. Test that when once again
-   * an ensemble is available, it can continue to write.
-   */
-  @Test
-  public void testAllBookieFailure() throws Exception {
-    // bookie to fail
-    newBookie = bkutil.newBookie();
-    BookieServer replacementBookie = null;
-
-    try {
-      int ensembleSize = numBookies + 1;
-      assertEquals("New bookie didn't start",
-                   ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
-
-      // ensure that the journal manager has to use all bookies,
-      // so that a failure will fail the journal manager
-      Configuration conf = new Configuration();
-      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
-                  ensembleSize);
-      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,
-                  ensembleSize);
-      long txid = 1;
-      NamespaceInfo nsi = newNSInfo();
-      BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-          BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),
-          nsi);
-      bkjm.format(nsi);
-      EditLogOutputStream out = bkjm.startLogSegment(txid,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-
-      for (long i = 1 ; i <= 3; i++) {
-        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-        op.setTransactionId(txid++);
-        out.write(op);
-      }
-      out.setReadyToFlush();
-      out.flush();
-      newBookie.shutdown();
-      assertEquals("New bookie didn't die",
-                   numBookies, bkutil.checkBookiesUp(numBookies, 10));
-
-      try {
-        for (long i = 1 ; i <= 3; i++) {
-          FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-          op.setTransactionId(txid++);
-          out.write(op);
-        }
-        out.setReadyToFlush();
-        out.flush();
-        fail("should not get to this stage");
-      } catch (IOException ioe) {
-        LOG.debug("Error writing to bookkeeper", ioe);
-        assertTrue("Invalid exception message",
-                   ioe.getMessage().contains("Failed to write to bookkeeper"));
-      }
-      replacementBookie = bkutil.newBookie();
-
-      assertEquals("New bookie didn't start",
-                   numBookies+1, bkutil.checkBookiesUp(numBookies+1, 10));
-      bkjm.recoverUnfinalizedSegments();
-      out = bkjm.startLogSegment(txid,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-      for (long i = 1 ; i <= 3; i++) {
-        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-        op.setTransactionId(txid++);
-        out.write(op);
-      }
-
-      out.setReadyToFlush();
-      out.flush();
-
-    } catch (Exception e) {
-      LOG.error("Exception in test", e);
-      throw e;
-    } finally {
-      if (replacementBookie != null) {
-        replacementBookie.shutdown();
-      }
-      newBookie.shutdown();
-
-      if (bkutil.checkBookiesUp(numBookies, 30) != numBookies) {
-        LOG.warn("Not all bookies from this test shut down, expect errors");
-      }
-    }
-  }
-
-  /**
-   * Test that a BookKeeper JM can continue to work across the
-   * failure of a bookie. This should be handled transparently
-   * by bookkeeper.
-   */
-  @Test
-  public void testOneBookieFailure() throws Exception {
-    newBookie = bkutil.newBookie();
-    BookieServer replacementBookie = null;
-
-    try {
-      int ensembleSize = numBookies + 1;
-      assertEquals("New bookie didn't start",
-                   ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
-
-      // ensure that the journal manager has to use all bookies,
-      // so that a failure will fail the journal manager
-      Configuration conf = new Configuration();
-      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
-                  ensembleSize);
-      conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,
-                  ensembleSize);
-      long txid = 1;
-
-      NamespaceInfo nsi = newNSInfo();
-      BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-          BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"),
-          nsi);
-      bkjm.format(nsi);
-
-      EditLogOutputStream out = bkjm.startLogSegment(txid,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-      for (long i = 1 ; i <= 3; i++) {
-        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-        op.setTransactionId(txid++);
-        out.write(op);
-      }
-      out.setReadyToFlush();
-      out.flush();
-
-      replacementBookie = bkutil.newBookie();
-      assertEquals("replacement bookie didn't start",
-                   ensembleSize+1, bkutil.checkBookiesUp(ensembleSize+1, 10));
-      newBookie.shutdown();
-      assertEquals("New bookie didn't die",
-                   ensembleSize, bkutil.checkBookiesUp(ensembleSize, 10));
-
-      for (long i = 1 ; i <= 3; i++) {
-        FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-        op.setTransactionId(txid++);
-        out.write(op);
-      }
-      out.setReadyToFlush();
-      out.flush();
-    } catch (Exception e) {
-      LOG.error("Exception in test", e);
-      throw e;
-    } finally {
-      if (replacementBookie != null) {
-        replacementBookie.shutdown();
-      }
-      newBookie.shutdown();
-
-      if (bkutil.checkBookiesUp(numBookies, 30) != numBookies) {
-        LOG.warn("Not all bookies from this test shut down, expect errors");
-      }
-    }
-  }
-  
-  /**
-   * If a journal manager has an empty inprogress node, ensure that we throw an
-   * error, as this should not be possible, and some third party has corrupted
-   * the zookeeper state
-   */
-  @Test
-  public void testEmptyInprogressNode() throws Exception {
-    URI uri = BKJMUtil.createJournalURI("/hdfsjournal-emptyInprogress");
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
-                                                                 nsi);
-    bkjm.format(nsi);
-
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
-    for (long i = 1; i <= 100; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(1, 100);
-
-    out = bkjm.startLogSegment(101,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    out.close();
-    bkjm.close();
-    String inprogressZNode = bkjm.inprogressZNode(101);
-    zkc.setData(inprogressZNode, new byte[0], -1);
-
-    bkjm = new BookKeeperJournalManager(conf, uri, nsi);
-    try {
-      bkjm.recoverUnfinalizedSegments();
-      fail("Should have failed. There should be no way of creating"
-          + " an empty inprogess znode");
-    } catch (IOException e) {
-      // correct behaviour
-      assertTrue("Exception different than expected", e.getMessage().contains(
-          "Invalid/Incomplete data in znode"));
-    } finally {
-      bkjm.close();
-    }
-  }
-
-  /**
-   * If a journal manager has an corrupt inprogress node, ensure that we throw
-   * an error, as this should not be possible, and some third party has
-   * corrupted the zookeeper state
-   */
-  @Test
-  public void testCorruptInprogressNode() throws Exception {
-    URI uri = BKJMUtil.createJournalURI("/hdfsjournal-corruptInprogress");
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
-                                                                 nsi);
-    bkjm.format(nsi);
-
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
-    for (long i = 1; i <= 100; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(1, 100);
-
-    out = bkjm.startLogSegment(101,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    out.close();
-    bkjm.close();
-
-    String inprogressZNode = bkjm.inprogressZNode(101);
-    zkc.setData(inprogressZNode, "WholeLottaJunk".getBytes(), -1);
-
-    bkjm = new BookKeeperJournalManager(conf, uri, nsi);
-    try {
-      bkjm.recoverUnfinalizedSegments();
-      fail("Should have failed. There should be no way of creating"
-          + " an empty inprogess znode");
-    } catch (IOException e) {
-      // correct behaviour
-      assertTrue("Exception different than expected", e.getMessage().contains(
-          "has no field named"));
-    } finally {
-      bkjm.close();
-    }
-  }
-
-  /**
-   * Cases can occur where we create a segment but crash before we even have the
-   * chance to write the START_SEGMENT op. If this occurs we should warn, but
-   * load as normal
-   */
-  @Test
-  public void testEmptyInprogressLedger() throws Exception {
-    URI uri = BKJMUtil.createJournalURI("/hdfsjournal-emptyInprogressLedger");
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
-                                                                 nsi);
-    bkjm.format(nsi);
-
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
-    for (long i = 1; i <= 100; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(1, 100);
-
-    out = bkjm.startLogSegment(101,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    out.close();
-    bkjm.close();
-
-    bkjm = new BookKeeperJournalManager(conf, uri, nsi);
-    bkjm.recoverUnfinalizedSegments();
-    out = bkjm.startLogSegment(101,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    for (long i = 1; i <= 100; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(101, 200);
-
-    bkjm.close();
-  }
-
-  /**
-   * Test that if we fail between finalizing an inprogress and deleting the
-   * corresponding inprogress znode.
-   */
-  @Test
-  public void testRefinalizeAlreadyFinalizedInprogress() throws Exception {
-    URI uri = BKJMUtil
-        .createJournalURI("/hdfsjournal-refinalizeInprogressLedger");
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
-                                                                 nsi);
-    bkjm.format(nsi);
-
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);;
-    for (long i = 1; i <= 100; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.close();
-
-    String inprogressZNode = bkjm.inprogressZNode(1);
-    String finalizedZNode = bkjm.finalizedLedgerZNode(1, 100);
-    assertNotNull("inprogress znode doesn't exist", zkc.exists(inprogressZNode,
-        null));
-    assertNull("finalized znode exists", zkc.exists(finalizedZNode, null));
-
-    byte[] inprogressData = zkc.getData(inprogressZNode, false, null);
-
-    // finalize
-    bkjm = new BookKeeperJournalManager(conf, uri, nsi);
-    bkjm.recoverUnfinalizedSegments();
-    bkjm.close();
-
-    assertNull("inprogress znode exists", zkc.exists(inprogressZNode, null));
-    assertNotNull("finalized znode doesn't exist", zkc.exists(finalizedZNode,
-        null));
-
-    zkc.create(inprogressZNode, inprogressData, Ids.OPEN_ACL_UNSAFE,
-        CreateMode.PERSISTENT);
-
-    // should work fine
-    bkjm = new BookKeeperJournalManager(conf, uri, nsi);
-    bkjm.recoverUnfinalizedSegments();
-    bkjm.close();
-  }
-
-  /**
-   * Tests that the edit log file meta data reading from ZooKeeper should be
-   * able to handle the NoNodeException. bkjm.getInputStream(fromTxId,
-   * inProgressOk) should suppress the NoNodeException and continue. HDFS-3441.
-   */
-  @Test
-  public void testEditLogFileNotExistsWhenReadingMetadata() throws Exception {
-    URI uri = BKJMUtil.createJournalURI("/hdfsjournal-editlogfile");
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
-                                                                 nsi);
-    bkjm.format(nsi);
-
-    try {
-      // start new inprogress log segment with txid=1
-      // and write transactions till txid=50
-      String zkpath1 = startAndFinalizeLogSegment(bkjm, 1, 50);
-
-      // start new inprogress log segment with txid=51
-      // and write transactions till txid=100
-      String zkpath2 = startAndFinalizeLogSegment(bkjm, 51, 100);
-
-      // read the metadata from ZK. Here simulating the situation
-      // when reading,the edit log metadata can be removed by purger thread.
-      ZooKeeper zkspy = spy(BKJMUtil.connectZooKeeper());
-      bkjm.setZooKeeper(zkspy);
-      Mockito.doThrow(
-          new KeeperException.NoNodeException(zkpath2 + " doesn't exists"))
-          .when(zkspy).getData(zkpath2, false, null);
-
-      List<EditLogLedgerMetadata> ledgerList = bkjm.getLedgerList(false);
-      assertEquals("List contains the metadata of non exists path.", 1,
-          ledgerList.size());
-      assertEquals("LogLedgerMetadata contains wrong zk paths.", zkpath1,
-          ledgerList.get(0).getZkPath());
-    } finally {
-      bkjm.close();
-    }
-  }
-
-  private enum ThreadStatus {
-    COMPLETED, GOODEXCEPTION, BADEXCEPTION;
-  };
-
-  /**
-   * Tests that concurrent calls to format will still allow one to succeed.
-   */
-  @Test
-  public void testConcurrentFormat() throws Exception {
-    final URI uri = BKJMUtil.createJournalURI("/hdfsjournal-concurrentformat");
-    final NamespaceInfo nsi = newNSInfo();
-
-    // populate with data first
-    BookKeeperJournalManager bkjm
-      = new BookKeeperJournalManager(conf, uri, nsi);
-    bkjm.format(nsi);
-    for (int i = 1; i < 100*2; i += 2) {
-      bkjm.startLogSegment(i, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-      bkjm.finalizeLogSegment(i, i+1);
-    }
-    bkjm.close();
-
-    final int numThreads = 40;
-    List<Callable<ThreadStatus>> threads
-      = new ArrayList<Callable<ThreadStatus>>();
-    final CyclicBarrier barrier = new CyclicBarrier(numThreads);
-
-    for (int i = 0; i < numThreads; i++) {
-      threads.add(new Callable<ThreadStatus>() {
-          public ThreadStatus call() {
-            BookKeeperJournalManager bkjm = null;
-            try {
-              bkjm = new BookKeeperJournalManager(conf, uri, nsi);
-              barrier.await();
-              bkjm.format(nsi);
-              return ThreadStatus.COMPLETED;
-            } catch (IOException ioe) {
-              LOG.info("Exception formatting ", ioe);
-              return ThreadStatus.GOODEXCEPTION;
-            } catch (InterruptedException ie) {
-              LOG.error("Interrupted. Something is broken", ie);
-              Thread.currentThread().interrupt();
-              return ThreadStatus.BADEXCEPTION;
-            } catch (Exception e) {
-              LOG.error("Some other bad exception", e);
-              return ThreadStatus.BADEXCEPTION;
-            } finally {
-              if (bkjm != null) {
-                try {
-                  bkjm.close();
-                } catch (IOException ioe) {
-                  LOG.error("Error closing journal manager", ioe);
-                }
-              }
-            }
-          }
-        });
-    }
-    ExecutorService service = Executors.newFixedThreadPool(numThreads);
-    List<Future<ThreadStatus>> statuses = service.invokeAll(threads, 60,
-                                                      TimeUnit.SECONDS);
-    int numCompleted = 0;
-    for (Future<ThreadStatus> s : statuses) {
-      assertTrue(s.isDone());
-      assertTrue("Thread threw invalid exception",
-          s.get() == ThreadStatus.COMPLETED
-          || s.get() == ThreadStatus.GOODEXCEPTION);
-      if (s.get() == ThreadStatus.COMPLETED) {
-        numCompleted++;
-      }
-    }
-    LOG.info("Completed " + numCompleted + " formats");
-    assertTrue("No thread managed to complete formatting", numCompleted > 0);
-  }
-
-  @Test(timeout = 120000)
-  public void testDefaultAckQuorum() throws Exception {
-    newBookie = bkutil.newBookie();
-    int ensembleSize = numBookies + 1;
-    int quorumSize = numBookies + 1;
-    // ensure that the journal manager has to use all bookies,
-    // so that a failure will fail the journal manager
-    Configuration conf = new Configuration();
-    conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
-        ensembleSize);
-    conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,
-        quorumSize);
-    // sets 2 secs
-    conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_SEC,
-        2);
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"), nsi);
-    bkjm.format(nsi);
-    CountDownLatch sleepLatch = new CountDownLatch(1);
-    sleepBookie(sleepLatch, newBookie);
-
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    int numTransactions = 100;
-    for (long i = 1; i <= numTransactions; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    try {
-      out.close();
-      bkjm.finalizeLogSegment(1, numTransactions);
-
-      List<EditLogInputStream> in = new ArrayList<EditLogInputStream>();
-      bkjm.selectInputStreams(in, 1, true);
-      try {
-        assertEquals(numTransactions,
-            FSEditLogTestUtil.countTransactionsInStream(in.get(0)));
-      } finally {
-        in.get(0).close();
-      }
-      fail("Should throw exception as not enough non-faulty bookies available!");
-    } catch (IOException ioe) {
-      // expected
-    }
-  }
-
-  /**
-   * Test ack quorum feature supported by bookkeeper. Keep ack quorum bookie
-   * alive and sleep all the other bookies. Now the client would wait for the
-   * acknowledgement from the ack size bookies and after receiving the success
-   * response will continue writing. Non ack client will hang long time to add
-   * entries.
-   */
-  @Test(timeout = 120000)
-  public void testAckQuorum() throws Exception {
-    // slow bookie
-    newBookie = bkutil.newBookie();
-    // make quorum size and ensemble size same to avoid the interleave writing
-    // of the ledger entries
-    int ensembleSize = numBookies + 1;
-    int quorumSize = numBookies + 1;
-    int ackSize = numBookies;
-    // ensure that the journal manager has to use all bookies,
-    // so that a failure will fail the journal manager
-    Configuration conf = new Configuration();
-    conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
-        ensembleSize);
-    conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,
-        quorumSize);
-    conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ACK_QUORUM_SIZE,
-        ackSize);
-    // sets 60 minutes
-    conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_SEC,
-        3600);
-
-    NamespaceInfo nsi = newNSInfo();
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"), nsi);
-    bkjm.format(nsi);
-    CountDownLatch sleepLatch = new CountDownLatch(1);
-    sleepBookie(sleepLatch, newBookie);
-
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    int numTransactions = 100;
-    for (long i = 1; i <= numTransactions; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(1, numTransactions);
-
-    List<EditLogInputStream> in = new ArrayList<EditLogInputStream>();
-    bkjm.selectInputStreams(in, 1, true);
-    try {
-      assertEquals(numTransactions,
-          FSEditLogTestUtil.countTransactionsInStream(in.get(0)));
-    } finally {
-      sleepLatch.countDown();
-      in.get(0).close();
-      bkjm.close();
-    }
-  }
-
-  /**
-   * Sleep a bookie until I count down the latch
-   *
-   * @param latch
-   *          Latch to wait on
-   * @param bookie
-   *          bookie server
-   * @throws Exception
-   */
-  private void sleepBookie(final CountDownLatch l, final BookieServer bookie)
-      throws Exception {
-
-    Thread sleeper = new Thread() {
-      public void run() {
-        try {
-          bookie.suspendProcessing();
-          l.await(60, TimeUnit.SECONDS);
-          bookie.resumeProcessing();
-        } catch (Exception e) {
-          LOG.error("Error suspending bookie", e);
-        }
-      }
-    };
-    sleeper.setName("BookieServerSleeper-" + bookie.getBookie().getId());
-    sleeper.start();
-  }
-
-
-  private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm,
-      int startTxid, int endTxid) throws IOException, KeeperException,
-      InterruptedException {
-    EditLogOutputStream out = bkjm.startLogSegment(startTxid,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    for (long i = startTxid; i <= endTxid; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    // finalize the inprogress_1 log segment.
-    bkjm.finalizeLogSegment(startTxid, endTxid);
-    String zkpath1 = bkjm.finalizedLedgerZNode(startTxid, endTxid);
-    assertNotNull(zkc.exists(zkpath1, false));
-    assertNull(zkc.exists(bkjm.inprogressZNode(startTxid), false));
-    return zkpath1;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperSpeculativeRead.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperSpeculativeRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperSpeculativeRead.java
deleted file mode 100644
index f5b86bc..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperSpeculativeRead.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.bookkeeper.proto.BookieServer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
-import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.zookeeper.ZooKeeper;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestBookKeeperSpeculativeRead {
-  private static final Log LOG = LogFactory
-      .getLog(TestBookKeeperSpeculativeRead.class);
-
-  private ZooKeeper zkc;
-  private static BKJMUtil bkutil;
-  private static int numLocalBookies = 1;
-  private static List<BookieServer> bks = new ArrayList<BookieServer>();
-
-  @BeforeClass
-  public static void setupBookkeeper() throws Exception {
-    bkutil = new BKJMUtil(1);
-    bkutil.start();
-  }
-
-  @AfterClass
-  public static void teardownBookkeeper() throws Exception {
-    bkutil.teardown();
-    for (BookieServer bk : bks) {
-      bk.shutdown();
-    }
-  }
-
-  @Before
-  public void setup() throws Exception {
-    zkc = BKJMUtil.connectZooKeeper();
-  }
-
-  @After
-  public void teardown() throws Exception {
-    zkc.close();
-  }
-
-  private NamespaceInfo newNSInfo() {
-    Random r = new Random();
-    return new NamespaceInfo(r.nextInt(), "testCluster", "TestBPID", -1);
-  }
-
-  /**
-   * Test speculative read feature supported by bookkeeper. Keep one bookie
-   * alive and sleep all the other bookies. Non spec client will hang for long
-   * time to read the entries from the bookkeeper.
-   */
-  @Test(timeout = 120000)
-  public void testSpeculativeRead() throws Exception {
-    // starting 9 more servers
-    for (int i = 1; i < 10; i++) {
-      bks.add(bkutil.newBookie());
-    }
-    NamespaceInfo nsi = newNSInfo();
-    Configuration conf = new Configuration();
-    int ensembleSize = numLocalBookies + 9;
-    conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
-        ensembleSize);
-    conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,
-        ensembleSize);
-    conf.setInt(
-        BookKeeperJournalManager.BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_MS,
-        100);
-    // sets 60 minute
-    conf.setInt(
-        BookKeeperJournalManager.BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_SEC, 3600);
-    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
-        BKJMUtil.createJournalURI("/hdfsjournal-specread"), nsi);
-    bkjm.format(nsi);
-
-    final long numTransactions = 1000;
-    EditLogOutputStream out = bkjm.startLogSegment(1,
-        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-    for (long i = 1; i <= numTransactions; i++) {
-      FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
-      op.setTransactionId(i);
-      out.write(op);
-    }
-    out.close();
-    bkjm.finalizeLogSegment(1, numTransactions);
-
-    List<EditLogInputStream> in = new ArrayList<EditLogInputStream>();
-    bkjm.selectInputStreams(in, 1, true);
-
-    // sleep 9 bk servers. Now only one server is running and responding to the
-    // clients
-    CountDownLatch sleepLatch = new CountDownLatch(1);
-    for (final BookieServer bookie : bks) {
-      sleepBookie(sleepLatch, bookie);
-    }
-    try {
-      assertEquals(numTransactions,
-          FSEditLogTestUtil.countTransactionsInStream(in.get(0)));
-    } finally {
-      in.get(0).close();
-      sleepLatch.countDown();
-      bkjm.close();
-    }
-  }
-
-  /**
-   * Sleep a bookie until I count down the latch
-   *
-   * @param latch
-   *          latch to wait on
-   * @param bookie
-   *          bookie server
-   * @throws Exception
-   */
-  private void sleepBookie(final CountDownLatch latch, final BookieServer bookie)
-      throws Exception {
-
-    Thread sleeper = new Thread() {
-      public void run() {
-        try {
-          bookie.suspendProcessing();
-          latch.await(2, TimeUnit.MINUTES);
-          bookie.resumeProcessing();
-        } catch (Exception e) {
-          LOG.error("Error suspending bookie", e);
-        }
-      }
-    };
-    sleeper.setName("BookieServerSleeper-" + bookie.getBookie().getId());
-    sleeper.start();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
deleted file mode 100644
index ef7f708..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import java.io.File;
-import java.io.FileFilter;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
-import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
-import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints.SlowCodec;
-import org.apache.hadoop.io.compress.CompressionCodecFactory;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.ImmutableList;
-
-public class TestBootstrapStandbyWithBKJM {
-  private static BKJMUtil bkutil;
-  protected MiniDFSCluster cluster;
-
-  @BeforeClass
-  public static void setupBookkeeper() throws Exception {
-    bkutil = new BKJMUtil(3);
-    bkutil.start();
-  }
-
-  @AfterClass
-  public static void teardownBookkeeper() throws Exception {
-    bkutil.teardown();
-  }
-
-  @After
-  public void teardown() {
-    if (cluster != null) {
-      cluster.shutdown();
-      cluster = null;
-    }
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    Configuration conf = new Configuration();
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
-    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
-        .createJournalURI("/bootstrapStandby").toString());
-    BKJMUtil.addJournalManagerDefinition(conf);
-    conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
-    conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
-        SlowCodec.class.getCanonicalName());
-    CompressionCodecFactory.setCodecClasses(conf,
-        ImmutableList.<Class> of(SlowCodec.class));
-    MiniDFSNNTopology topology = new MiniDFSNNTopology()
-        .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
-            new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
-            new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
-    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
-        .numDataNodes(1).manageNameDfsSharedDirs(false).build();
-    cluster.waitActive();
-  }
-
-  /**
-   * While boostrapping, in_progress transaction entries should be skipped.
-   * Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
-   */
-  @Test
-  public void testBootstrapStandbyWithActiveNN() throws Exception {
-    // make nn0 active
-    cluster.transitionToActive(0);
-   
-    // do ops and generate in-progress edit log data
-    Configuration confNN1 = cluster.getConfiguration(1);
-    DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil
-        .configureFailoverFs(cluster, confNN1);
-    for (int i = 1; i <= 10; i++) {
-      dfs.mkdirs(new Path("/test" + i));
-    }
-    dfs.close();
-
-    // shutdown nn1 and delete its edit log files
-    cluster.shutdownNameNode(1);
-    deleteEditLogIfExists(confNN1);
-    cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
-    cluster.getNameNodeRpc(0).saveNamespace(0, 0);
-    cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);
-
-    // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
-    // immediately after saveNamespace
-    int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" },
-      confNN1);
-    Assert.assertEquals("Mismatches return code", 6, rc);
-
-    // check with -skipSharedEditsCheck
-    rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive",
-        "-skipSharedEditsCheck" }, confNN1);
-    Assert.assertEquals("Mismatches return code", 0, rc);
-
-    // Checkpoint as fast as we can, in a tight loop.
-    confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
-    cluster.restartNameNode(1);
-    cluster.transitionToStandby(1);
-   
-    NameNode nn0 = cluster.getNameNode(0);
-    HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1));
-    long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
-        .getFSImage().getMostRecentCheckpointTxId();
-    HATestUtil.waitForCheckpoint(cluster, 1,
-        ImmutableList.of((int) expectedCheckpointTxId));
-
-    // Should have copied over the namespace
-    FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
-        ImmutableList.of((int) expectedCheckpointTxId));
-    FSImageTestUtil.assertNNFilesMatch(cluster);
-  }
-
-  private void deleteEditLogIfExists(Configuration confNN1) {
-    String editDirs = confNN1.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
-    String[] listEditDirs = StringUtils.split(editDirs, ',');
-    Assert.assertTrue("Wrong edit directory path!", listEditDirs.length > 0);
-
-    for (String dir : listEditDirs) {
-      File curDir = new File(dir, "current");
-      File[] listFiles = curDir.listFiles(new FileFilter() {
-        @Override
-        public boolean accept(File f) {
-          if (!f.getName().startsWith("edits")) {
-            return true;
-          }
-          return false;
-        }
-      });
-      if (listFiles != null && listFiles.length > 0) {
-        for (File file : listFiles) {
-          Assert.assertTrue("Failed to delete edit files!", file.delete());
-        }
-      }
-    }
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/52] [abbrv] hadoop git commit: YARN-5101. YARN_APPLICATION_UPDATED event is parsed in ApplicationHistoryManagerOnTimelineStore#convertToApplicationReport with reversed order. Contributed by Sunil G.

Posted by cn...@apache.org.
YARN-5101. YARN_APPLICATION_UPDATED event is parsed in ApplicationHistoryManagerOnTimelineStore#convertToApplicationReport with reversed order. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d2f380d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d2f380d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d2f380d

Branch: refs/heads/HADOOP-13037
Commit: 4d2f380d787a6145f45c87ba663079fedbf645b8
Parents: 3119548
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Thu Oct 6 18:16:48 2016 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Thu Oct 6 20:42:36 2016 +0530

----------------------------------------------------------------------
 .../ApplicationHistoryManagerOnTimelineStore.java     | 14 +++++++++++---
 .../TestApplicationHistoryManagerOnTimelineStore.java | 14 +++++++++-----
 .../yarn/server/resourcemanager/rmapp/RMAppImpl.java  |  2 +-
 3 files changed, 21 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d2f380d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 84d4543..feeafdd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -351,6 +351,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
       }
     }
     List<TimelineEvent> events = entity.getEvents();
+    long updatedTimeStamp = 0L;
     if (events != null) {
       for (TimelineEvent event : events) {
         if (event.getEventType().equals(
@@ -358,9 +359,16 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
           createdTime = event.getTimestamp();
         } else if (event.getEventType().equals(
             ApplicationMetricsConstants.UPDATED_EVENT_TYPE)) {
-          // TODO: YARN-5101. This type of events are parsed in
-          // time-stamp descending order which means the previous event
-          // could override the information from the later same type of event.
+          // This type of events are parsed in time-stamp descending order
+          // which means the previous event could override the information
+          // from the later same type of event. Hence compare timestamp
+          // before over writing.
+          if (event.getTimestamp() > updatedTimeStamp) {
+            updatedTimeStamp = event.getTimestamp();
+          } else {
+            continue;
+          }
+
           Map<String, Object> eventInfo = event.getEventInfo();
           if (eventInfo == null) {
             continue;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d2f380d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
index b65b22b..dd1a453 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
@@ -551,23 +551,27 @@ public class TestApplicationHistoryManagerOnTimelineStore {
     entity.addEvent(tEvent);
     if (enableUpdateEvent) {
       tEvent = new TimelineEvent();
-      createAppModifiedEvent(appId, tEvent, "changed queue", 5);
+      long updatedTimeIndex = 4L;
+      createAppModifiedEvent(appId, tEvent, updatedTimeIndex++, "changed queue",
+          5);
       entity.addEvent(tEvent);
       // Change priority alone
       tEvent = new TimelineEvent();
-      createAppModifiedEvent(appId, tEvent, "changed queue", 6);
+      createAppModifiedEvent(appId, tEvent, updatedTimeIndex++, "changed queue",
+          6);
       // Now change queue
       tEvent = new TimelineEvent();
-      createAppModifiedEvent(appId, tEvent, "changed queue1", 6);
+      createAppModifiedEvent(appId, tEvent, updatedTimeIndex++,
+          "changed queue1", 6);
       entity.addEvent(tEvent);
     }
     return entity;
   }
 
   private static void createAppModifiedEvent(ApplicationId appId,
-      TimelineEvent tEvent, String queue, int priority) {
+      TimelineEvent tEvent, long updatedTimeIndex, String queue, int priority) {
     tEvent.setEventType(ApplicationMetricsConstants.UPDATED_EVENT_TYPE);
-    tEvent.setTimestamp(Integer.MAX_VALUE + 4L + appId.getId());
+    tEvent.setTimestamp(Integer.MAX_VALUE + updatedTimeIndex + appId.getId());
     Map<String, Object> eventInfo = new HashMap<String, Object>();
     eventInfo.put(ApplicationMetricsConstants.QUEUE_ENTITY_INFO, queue);
     eventInfo.put(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d2f380d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 727703b..0fdc311 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -1061,7 +1061,7 @@ public class RMAppImpl implements RMApp, Recoverable {
       }
 
       app.rmContext.getSystemMetricsPublisher().appUpdated(app,
-          System.currentTimeMillis());
+          app.systemClock.getTime());
 
       // TODO: Write out change to state store (YARN-1558)
       // Also take care of RM failover


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/52] [abbrv] hadoop git commit: HDFS-10939. Reduce performance penalty of encryption zones. Contributed by Daryn sharp.

Posted by cn...@apache.org.
HDFS-10939. Reduce performance penalty of encryption zones. Contributed by Daryn sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f32e9fc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f32e9fc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f32e9fc8

Branch: refs/heads/HADOOP-13037
Commit: f32e9fc8f7150f0e889c0774b3ad712af26fbd65
Parents: 72a2ae6
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Oct 6 15:11:14 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Oct 6 15:11:14 2016 -0500

----------------------------------------------------------------------
 .../namenode/EncryptionFaultInjector.java       |   6 +
 .../server/namenode/EncryptionZoneManager.java  |  25 +--
 .../server/namenode/FSDirEncryptionZoneOp.java  | 144 +++++++++++++---
 .../server/namenode/FSDirErasureCodingOp.java   |   2 +-
 .../hdfs/server/namenode/FSDirRenameOp.java     |   4 +-
 .../server/namenode/FSDirStatAndListingOp.java  |  20 +--
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 163 +++++--------------
 .../hdfs/server/namenode/FSDirXAttrOp.java      |  21 +--
 .../hdfs/server/namenode/FSDirectory.java       |   5 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java      | 115 ++++++-------
 .../hdfs/server/namenode/XAttrStorage.java      |   7 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  50 ++++--
 13 files changed, 295 insertions(+), 270 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
index 27d8f50..104d8c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
@@ -35,5 +35,11 @@ public class EncryptionFaultInjector {
   }
 
   @VisibleForTesting
+  public void startFileNoKey() throws IOException {}
+
+  @VisibleForTesting
+  public void startFileBeforeGenerateKey() throws IOException {}
+
+  @VisibleForTesting
   public void startFileAfterGenerateKey() throws IOException {}
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 511c616..ceeccf6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -260,12 +260,14 @@ public class EncryptionZoneManager {
    *
    * @param srcIIP source IIP
    * @param dstIIP destination IIP
-   * @param src    source path, used for debugging
    * @throws IOException if the src cannot be renamed to the dst
    */
-  void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src)
+  void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP)
       throws IOException {
     assert dir.hasReadLock();
+    if (!hasCreatedEncryptionZone()) {
+      return;
+    }
     final EncryptionZoneInt srcParentEZI =
         getParentEncryptionZoneForPath(srcIIP);
     final EncryptionZoneInt dstParentEZI =
@@ -274,17 +276,17 @@ public class EncryptionZoneManager {
     final boolean dstInEZ = (dstParentEZI != null);
     if (srcInEZ && !dstInEZ) {
       throw new IOException(
-          src + " can't be moved from an encryption zone.");
+          srcIIP.getPath() + " can't be moved from an encryption zone.");
     } else if (dstInEZ && !srcInEZ) {
       throw new IOException(
-          src + " can't be moved into an encryption zone.");
+          srcIIP.getPath() + " can't be moved into an encryption zone.");
     }
 
     if (srcInEZ) {
       if (srcParentEZI != dstParentEZI) {
         final String srcEZPath = getFullPathName(srcParentEZI);
         final String dstEZPath = getFullPathName(dstParentEZI);
-        final StringBuilder sb = new StringBuilder(src);
+        final StringBuilder sb = new StringBuilder(srcIIP.getPath());
         sb.append(" can't be moved from encryption zone ");
         sb.append(srcEZPath);
         sb.append(" to encryption zone ");
@@ -300,15 +302,14 @@ public class EncryptionZoneManager {
    * <p/>
    * Called while holding the FSDirectory lock.
    */
-  XAttr createEncryptionZone(String src, CipherSuite suite,
+  XAttr createEncryptionZone(INodesInPath srcIIP, CipherSuite suite,
       CryptoProtocolVersion version, String keyName)
       throws IOException {
     assert dir.hasWriteLock();
 
     // Check if src is a valid path for new EZ creation
-    final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false);
-    if (srcIIP == null || srcIIP.getLastINode() == null) {
-      throw new FileNotFoundException("cannot find " + src);
+    if (srcIIP.getLastINode() == null) {
+      throw new FileNotFoundException("cannot find " + srcIIP.getPath());
     }
     if (dir.isNonEmptyDirectory(srcIIP)) {
       throw new IOException(
@@ -322,8 +323,8 @@ public class EncryptionZoneManager {
 
     if (hasCreatedEncryptionZone() && encryptionZones.
         get(srcINode.getId()) != null) {
-      throw new IOException("Directory " + src + " is already an encryption " +
-          "zone.");
+      throw new IOException(
+          "Directory " + srcIIP.getPath() + " is already an encryption zone.");
     }
 
     final HdfsProtos.ZoneEncryptionInfoProto proto =
@@ -335,7 +336,7 @@ public class EncryptionZoneManager {
     xattrs.add(ezXAttr);
     // updating the xattr will call addEncryptionZone,
     // done this way to handle edit log loading
-    FSDirXAttrOp.unprotectedSetXAttrs(dir, src, xattrs,
+    FSDirXAttrOp.unprotectedSetXAttrs(dir, srcIIP, xattrs,
                                       EnumSet.of(XAttrSetFlag.CREATE));
     return ezXAttr;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
index 5457f08..d7a3611 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
@@ -72,8 +72,11 @@ final class FSDirEncryptionZoneOp {
    * @return New EDEK, or null if ezKeyName is null
    * @throws IOException
    */
-  static EncryptedKeyVersion generateEncryptedDataEncryptionKey(
+  private static EncryptedKeyVersion generateEncryptedDataEncryptionKey(
       final FSDirectory fsd, final String ezKeyName) throws IOException {
+    // must not be holding lock during this operation
+    assert !fsd.getFSNamesystem().hasReadLock();
+    assert !fsd.getFSNamesystem().hasWriteLock();
     if (ezKeyName == null) {
       return null;
     }
@@ -147,23 +150,21 @@ final class FSDirEncryptionZoneOp {
       final String keyName, final boolean logRetryCache) throws IOException {
     final CipherSuite suite = CipherSuite.convert(cipher);
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
-    final String src;
     // For now this is hard coded, as we only support one method.
     final CryptoProtocolVersion version =
         CryptoProtocolVersion.ENCRYPTION_ZONES;
 
+    final INodesInPath iip;
     fsd.writeLock();
     try {
-      final INodesInPath iip = fsd.resolvePath(pc, srcArg);
-      src = iip.getPath();
-      final XAttr ezXAttr = fsd.ezManager.createEncryptionZone(src, suite,
+      iip = fsd.resolvePathForWrite(pc, srcArg);
+      final XAttr ezXAttr = fsd.ezManager.createEncryptionZone(iip, suite,
           version, keyName);
       xAttrs.add(ezXAttr);
     } finally {
       fsd.writeUnlock();
     }
-    fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
-    final INodesInPath iip = fsd.getINodesInPath4Write(src, false);
+    fsd.getEditLog().logSetXAttrs(iip.getPath(), xAttrs, logRetryCache);
     return fsd.getAuditFileInfo(iip);
   }
 
@@ -223,8 +224,9 @@ final class FSDirEncryptionZoneOp {
    * @param info file encryption information
    * @throws IOException
    */
-  static void setFileEncryptionInfo(final FSDirectory fsd, final String src,
-      final FileEncryptionInfo info) throws IOException {
+  static void setFileEncryptionInfo(final FSDirectory fsd,
+      final INodesInPath iip, final FileEncryptionInfo info)
+          throws IOException {
     // Make the PB for the xattr
     final HdfsProtos.PerFileEncryptionInfoProto proto =
         PBHelperClient.convertPerFileEncInfo(info);
@@ -235,7 +237,7 @@ final class FSDirEncryptionZoneOp {
     xAttrs.add(fileEncryptionAttr);
     fsd.writeLock();
     try {
-      FSDirXAttrOp.unprotectedSetXAttrs(fsd, src, xAttrs,
+      FSDirXAttrOp.unprotectedSetXAttrs(fsd, iip, xAttrs,
                                         EnumSet.of(XAttrSetFlag.CREATE));
     } finally {
       fsd.writeUnlock();
@@ -246,21 +248,18 @@ final class FSDirEncryptionZoneOp {
    * This function combines the per-file encryption info (obtained
    * from the inode's XAttrs), and the encryption info from its zone, and
    * returns a consolidated FileEncryptionInfo instance. Null is returned
-   * for non-encrypted files.
+   * for non-encrypted or raw files.
    *
    * @param fsd fsdirectory
-   * @param inode inode of the file
-   * @param snapshotId ID of the snapshot that
-   *                   we want to get encryption info from
    * @param iip inodes in the path containing the file, passed in to
-   *            avoid obtaining the list of inodes again; if iip is
-   *            null then the list of inodes will be obtained again
+   *            avoid obtaining the list of inodes again
    * @return consolidated file encryption info; null for non-encrypted files
    */
   static FileEncryptionInfo getFileEncryptionInfo(final FSDirectory fsd,
-      final INode inode, final int snapshotId, final INodesInPath iip)
-      throws IOException {
-    if (!inode.isFile() || !fsd.ezManager.hasCreatedEncryptionZone()) {
+      final INodesInPath iip) throws IOException {
+    if (iip.isRaw() ||
+        !fsd.ezManager.hasCreatedEncryptionZone() ||
+        !iip.getLastINode().isFile()) {
       return null;
     }
     fsd.readLock();
@@ -280,8 +279,8 @@ final class FSDirEncryptionZoneOp {
       final CryptoProtocolVersion version = encryptionZone.getVersion();
       final CipherSuite suite = encryptionZone.getSuite();
       final String keyName = encryptionZone.getKeyName();
-      XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByPrefixedName(inode,
-          snapshotId, CRYPTO_XATTR_FILE_ENCRYPTION_INFO);
+      XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByPrefixedName(
+          iip, CRYPTO_XATTR_FILE_ENCRYPTION_INFO);
 
       if (fileXAttr == null) {
         NameNode.LOG.warn("Could not find encryption XAttr for file " +
@@ -295,15 +294,53 @@ final class FSDirEncryptionZoneOp {
         return PBHelperClient.convert(fileProto, suite, version, keyName);
       } catch (InvalidProtocolBufferException e) {
         throw new IOException("Could not parse file encryption info for " +
-            "inode " + inode, e);
+            "inode " + iip.getPath(), e);
       }
     } finally {
       fsd.readUnlock();
     }
   }
 
+  /**
+   * If the file and encryption key are valid, return the encryption info,
+   * else throw a retry exception.  The startFile method generates the EDEK
+   * outside of the lock so the zone must be reverified.
+   *
+   * @param dir fsdirectory
+   * @param iip inodes in the file path
+   * @param ezInfo the encryption key
+   * @return FileEncryptionInfo for the file
+   * @throws RetryStartFileException if key is inconsistent with current zone
+   */
+  static FileEncryptionInfo getFileEncryptionInfo(FSDirectory dir,
+      INodesInPath iip, EncryptionKeyInfo ezInfo)
+          throws RetryStartFileException {
+    FileEncryptionInfo feInfo = null;
+    final EncryptionZone zone = getEZForPath(dir, iip);
+    if (zone != null) {
+      // The path is now within an EZ, but we're missing encryption parameters
+      if (ezInfo == null) {
+        throw new RetryStartFileException();
+      }
+      // Path is within an EZ and we have provided encryption parameters.
+      // Make sure that the generated EDEK matches the settings of the EZ.
+      final String ezKeyName = zone.getKeyName();
+      if (!ezKeyName.equals(ezInfo.edek.getEncryptionKeyName())) {
+        throw new RetryStartFileException();
+      }
+      feInfo = new FileEncryptionInfo(ezInfo.suite, ezInfo.protocolVersion,
+          ezInfo.edek.getEncryptedKeyVersion().getMaterial(),
+          ezInfo.edek.getEncryptedKeyIv(),
+          ezKeyName, ezInfo.edek.getEncryptionKeyVersionName());
+    }
+    return feInfo;
+  }
+
   static boolean isInAnEZ(final FSDirectory fsd, final INodesInPath iip)
       throws UnresolvedLinkException, SnapshotAccessControlException {
+    if (!fsd.ezManager.hasCreatedEncryptionZone()) {
+      return false;
+    }
     fsd.readLock();
     try {
       return fsd.ezManager.isInAnEZ(iip);
@@ -399,4 +436,67 @@ final class FSDirEncryptionZoneOp {
       }
     }
   }
+
+  /**
+   * If the file is in an encryption zone, we optimistically create an
+   * EDEK for the file by calling out to the configured KeyProvider.
+   * Since this typically involves doing an RPC, the fsn lock is yielded.
+   *
+   * Since the path can flip-flop between being in an encryption zone and not
+   * in the meantime, the call MUST re-resolve the IIP and re-check
+   * preconditions if this method does not return null;
+   *
+   * @param fsn the namesystem.
+   * @param iip the inodes for the path
+   * @param supportedVersions client's supported versions
+   * @return EncryptionKeyInfo if the path is in an EZ, else null
+   */
+  static EncryptionKeyInfo getEncryptionKeyInfo(FSNamesystem fsn,
+      INodesInPath iip, CryptoProtocolVersion[] supportedVersions)
+      throws IOException {
+    FSDirectory fsd = fsn.getFSDirectory();
+    // Nothing to do if the path is not within an EZ
+    final EncryptionZone zone = getEZForPath(fsd, iip);
+    if (zone == null) {
+      EncryptionFaultInjector.getInstance().startFileNoKey();
+      return null;
+    }
+    CryptoProtocolVersion protocolVersion = fsn.chooseProtocolVersion(
+        zone, supportedVersions);
+    CipherSuite suite = zone.getSuite();
+    String ezKeyName = zone.getKeyName();
+
+    Preconditions.checkNotNull(protocolVersion);
+    Preconditions.checkNotNull(suite);
+    Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
+                                "Chose an UNKNOWN CipherSuite!");
+    Preconditions.checkNotNull(ezKeyName);
+
+    // Generate EDEK while not holding the fsn lock.
+    fsn.writeUnlock();
+    try {
+      EncryptionFaultInjector.getInstance().startFileBeforeGenerateKey();
+      return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName,
+          generateEncryptedDataEncryptionKey(fsd, ezKeyName));
+    } finally {
+      fsn.writeLock();
+      EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
+    }
+  }
+
+  static class EncryptionKeyInfo {
+    final CryptoProtocolVersion protocolVersion;
+    final CipherSuite suite;
+    final String ezKeyName;
+    final KeyProviderCryptoExtension.EncryptedKeyVersion edek;
+
+    EncryptionKeyInfo(
+        CryptoProtocolVersion protocolVersion, CipherSuite suite,
+        String ezKeyName, KeyProviderCryptoExtension.EncryptedKeyVersion edek) {
+      this.protocolVersion = protocolVersion;
+      this.suite = suite;
+      this.ezKeyName = ezKeyName;
+      this.edek = edek;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 17544f5..25b3155 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -141,7 +141,7 @@ final class FSDirErasureCodingOp {
     }
     final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
     xattrs.add(ecXAttr);
-    FSDirXAttrOp.unprotectedSetXAttrs(fsd, src, xattrs,
+    FSDirXAttrOp.unprotectedSetXAttrs(fsd, srcIIP, xattrs,
         EnumSet.of(XAttrSetFlag.CREATE));
     return xattrs;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 911b178..12d5cfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -190,7 +190,7 @@ class FSDirRenameOp {
       return null;
     }
 
-    fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src);
+    fsd.ezManager.checkMoveValidity(srcIIP, dstIIP);
     // Ensure dst has quota to accommodate rename
     verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
     verifyQuotaForRename(fsd, srcIIP, dstIIP);
@@ -382,7 +382,7 @@ class FSDirRenameOp {
     }
 
     BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
-    fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src);
+    fsd.ezManager.checkMoveValidity(srcIIP, dstIIP);
     final INode dstInode = dstIIP.getLastINode();
     List<INodeDirectory> snapshottableDirs = new ArrayList<>();
     if (dstInode != null) { // Destination exists

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index f56d83d..5aa4dbc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -163,7 +163,7 @@ class FSDirStatAndListingOp {
       final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.READ);
-        fsd.checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
+        fsd.checkUnreadableBySuperuser(pc, iip);
       }
 
       final long fileSize = iip.isSnapshot()
@@ -178,9 +178,8 @@ class FSDirStatAndListingOp {
         isUc = false;
       }
 
-      final FileEncryptionInfo feInfo = iip.isRaw() ? null
-          : FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, inode,
-          iip.getPathSnapshotId(), iip);
+      final FileEncryptionInfo feInfo =
+          FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
       final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
           getErasureCodingPolicy(fsd.getFSNamesystem(), iip);
 
@@ -415,14 +414,12 @@ class FSDirStatAndListingOp {
     long size = 0;     // length is zero for directories
     short replication = 0;
     long blocksize = 0;
-    final boolean isEncrypted;
     final INode node = iip.getLastINode();
     final int snapshot = iip.getPathSnapshotId();
-    final boolean isRawPath = iip.isRaw();
     LocatedBlocks loc = null;
 
-    final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp
-        .getFileEncryptionInfo(fsd, node, snapshot, iip);
+    final boolean isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
+    FileEncryptionInfo feInfo = null;
 
     final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp
         .getErasureCodingPolicy(fsd.getFSNamesystem(), iip);
@@ -432,8 +429,9 @@ class FSDirStatAndListingOp {
       size = fileNode.computeFileSize(snapshot);
       replication = fileNode.getFileReplication(snapshot);
       blocksize = fileNode.getPreferredBlockSize();
-      isEncrypted = (feInfo != null)
-          || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, iip));
+      if (isEncrypted) {
+        feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
+      }
       if (needLocation) {
         final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
         final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
@@ -446,8 +444,6 @@ class FSDirStatAndListingOp {
           loc = new LocatedBlocks();
         }
       }
-    } else {
-      isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
     }
 
     int childrenNum = node.isDirectory() ?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index aa2be92..40be83b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -19,14 +19,10 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.CryptoProtocolVersion;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -38,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -307,6 +302,37 @@ class FSDirWriteFileOp {
     return clientNode;
   }
 
+  static INodesInPath resolvePathForStartFile(FSDirectory dir,
+      FSPermissionChecker pc, String src, EnumSet<CreateFlag> flag,
+      boolean createParent) throws IOException {
+    INodesInPath iip = dir.resolvePathForWrite(pc, src);
+    if (dir.isPermissionEnabled()) {
+      dir.checkAncestorAccess(pc, iip, FsAction.WRITE);
+    }
+    INode inode = iip.getLastINode();
+    if (inode != null) {
+      // Verify that the destination does not exist as a directory already.
+      if (inode.isDirectory()) {
+        throw new FileAlreadyExistsException(iip.getPath() +
+            " already exists as a directory");
+      }
+      // Verifies it's indeed a file and perms allow overwrite
+      INodeFile.valueOf(inode, src);
+      if (dir.isPermissionEnabled() && flag.contains(CreateFlag.OVERWRITE)) {
+        dir.checkPathAccess(pc, iip, FsAction.WRITE);
+      }
+    } else {
+      if (!createParent) {
+        dir.verifyParentDir(iip, src);
+      }
+      if (!flag.contains(CreateFlag.CREATE)) {
+        throw new FileNotFoundException("Can't overwrite non-existent " + src);
+      }
+    }
+    return iip;
+  }
+
+
   /**
    * Create a new file or overwrite an existing file<br>
    *
@@ -317,88 +343,21 @@ class FSDirWriteFileOp {
    * {@link ClientProtocol#create}
    */
   static HdfsFileStatus startFile(
-      FSNamesystem fsn, FSPermissionChecker pc, String src,
+      FSNamesystem fsn, INodesInPath iip,
       PermissionStatus permissions, String holder, String clientMachine,
       EnumSet<CreateFlag> flag, boolean createParent,
       short replication, long blockSize,
-      EncryptionKeyInfo ezInfo, INode.BlocksMapUpdateInfo toRemoveBlocks,
+      FileEncryptionInfo feInfo, INode.BlocksMapUpdateInfo toRemoveBlocks,
       boolean logRetryEntry)
       throws IOException {
     assert fsn.hasWriteLock();
-
-    boolean create = flag.contains(CreateFlag.CREATE);
     boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
     boolean isLazyPersist = flag.contains(CreateFlag.LAZY_PERSIST);
 
-    CipherSuite suite = null;
-    CryptoProtocolVersion version = null;
-    KeyProviderCryptoExtension.EncryptedKeyVersion edek = null;
-
-    if (ezInfo != null) {
-      edek = ezInfo.edek;
-      suite = ezInfo.suite;
-      version = ezInfo.protocolVersion;
-    }
-
+    final String src = iip.getPath();
     FSDirectory fsd = fsn.getFSDirectory();
-    INodesInPath iip = fsd.resolvePathForWrite(pc, src);
-    src = iip.getPath();
 
-    // Verify that the destination does not exist as a directory already.
-    final INode inode = iip.getLastINode();
-    if (inode != null && inode.isDirectory()) {
-      throw new FileAlreadyExistsException(src +
-          " already exists as a directory");
-    }
-
-    if (FSDirectory.isExactReservedName(src) || (FSDirectory.isReservedName(src)
-        && !FSDirectory.isReservedRawName(src)
-        && !FSDirectory.isReservedInodesName(src))) {
-      throw new InvalidPathException(src);
-    }
-
-    final INodeFile myFile = INodeFile.valueOf(inode, src, true);
-    if (fsd.isPermissionEnabled()) {
-      if (overwrite && myFile != null) {
-        fsd.checkPathAccess(pc, iip, FsAction.WRITE);
-      }
-      /*
-       * To overwrite existing file, need to check 'w' permission
-       * of parent (equals to ancestor in this case)
-       */
-      fsd.checkAncestorAccess(pc, iip, FsAction.WRITE);
-    }
-
-    if (!createParent) {
-      fsd.verifyParentDir(iip, src);
-    }
-
-    if (myFile == null && !create) {
-      throw new FileNotFoundException("Can't overwrite non-existent " +
-          src + " for client " + clientMachine);
-    }
-
-    FileEncryptionInfo feInfo = null;
-
-    final EncryptionZone zone = FSDirEncryptionZoneOp.getEZForPath(fsd, iip);
-    if (zone != null) {
-      // The path is now within an EZ, but we're missing encryption parameters
-      if (suite == null || edek == null) {
-        throw new RetryStartFileException();
-      }
-      // Path is within an EZ and we have provided encryption parameters.
-      // Make sure that the generated EDEK matches the settings of the EZ.
-      final String ezKeyName = zone.getKeyName();
-      if (!ezKeyName.equals(edek.getEncryptionKeyName())) {
-        throw new RetryStartFileException();
-      }
-      feInfo = new FileEncryptionInfo(suite, version,
-          edek.getEncryptedKeyVersion().getMaterial(),
-          edek.getEncryptedKeyIv(),
-          ezKeyName, edek.getEncryptionKeyVersionName());
-    }
-
-    if (myFile != null) {
+    if (iip.getLastINode() != null) {
       if (overwrite) {
         List<INode> toRemoveINodes = new ChunkedArrayList<>();
         List<Long> toRemoveUCFiles = new ChunkedArrayList<>();
@@ -433,11 +392,9 @@ class FSDirWriteFileOp {
         newNode.getFileUnderConstructionFeature().getClientName(),
         newNode.getId());
     if (feInfo != null) {
-      FSDirEncryptionZoneOp.setFileEncryptionInfo(fsd, src, feInfo);
-      newNode = fsd.getInode(newNode.getId()).asFile();
+      FSDirEncryptionZoneOp.setFileEncryptionInfo(fsd, iip, feInfo);
     }
-    setNewINodeStoragePolicy(fsd.getBlockManager(), newNode, iip,
-                             isLazyPersist);
+    setNewINodeStoragePolicy(fsd.getBlockManager(), iip, isLazyPersist);
     fsd.getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry);
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " +
@@ -446,30 +403,6 @@ class FSDirWriteFileOp {
     return FSDirStatAndListingOp.getFileInfo(fsd, iip);
   }
 
-  static EncryptionKeyInfo getEncryptionKeyInfo(FSNamesystem fsn,
-      FSPermissionChecker pc, String src,
-      CryptoProtocolVersion[] supportedVersions)
-      throws IOException {
-    FSDirectory fsd = fsn.getFSDirectory();
-    INodesInPath iip = fsd.resolvePathForWrite(pc, src);
-    // Nothing to do if the path is not within an EZ
-    final EncryptionZone zone = FSDirEncryptionZoneOp.getEZForPath(fsd, iip);
-    if (zone == null) {
-      return null;
-    }
-    CryptoProtocolVersion protocolVersion = fsn.chooseProtocolVersion(
-        zone, supportedVersions);
-    CipherSuite suite = zone.getSuite();
-    String ezKeyName = zone.getKeyName();
-
-    Preconditions.checkNotNull(protocolVersion);
-    Preconditions.checkNotNull(suite);
-    Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
-                                "Chose an UNKNOWN CipherSuite!");
-    Preconditions.checkNotNull(ezKeyName);
-    return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName);
-  }
-
   static INodeFile addFileForEditLog(
       FSDirectory fsd, long id, INodesInPath existing, byte[] localName,
       PermissionStatus permissions, List<AclEntry> aclEntries,
@@ -828,10 +761,9 @@ class FSDirWriteFileOp {
     NameNode.stateChangeLog.info(sb.toString());
   }
 
-  private static void setNewINodeStoragePolicy(BlockManager bm, INodeFile
-      inode, INodesInPath iip, boolean isLazyPersist)
-      throws IOException {
-
+  private static void setNewINodeStoragePolicy(BlockManager bm,
+      INodesInPath iip, boolean isLazyPersist) throws IOException {
+    INodeFile inode = iip.getLastINode().asFile();
     if (isLazyPersist) {
       BlockStoragePolicy lpPolicy =
           bm.getStoragePolicy("LAZY_PERSIST");
@@ -887,19 +819,4 @@ class FSDirWriteFileOp {
       this.isStriped = isStriped;
     }
   }
-
-  static class EncryptionKeyInfo {
-    final CryptoProtocolVersion protocolVersion;
-    final CipherSuite suite;
-    final String ezKeyName;
-    KeyProviderCryptoExtension.EncryptedKeyVersion edek;
-
-    EncryptionKeyInfo(
-        CryptoProtocolVersion protocolVersion, CipherSuite suite,
-        String ezKeyName) {
-      this.protocolVersion = protocolVersion;
-      this.suite = suite;
-      this.ezKeyName = ezKeyName;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 08016c3..6badf24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -75,7 +75,7 @@ class FSDirXAttrOp {
       iip = fsd.resolvePathForWrite(pc, src);
       src = iip.getPath();
       checkXAttrChangeAccess(fsd, iip, xAttr, pc);
-      unprotectedSetXAttrs(fsd, src, xAttrs, flag);
+      unprotectedSetXAttrs(fsd, iip, xAttrs, flag);
     } finally {
       fsd.writeUnlock();
     }
@@ -253,14 +253,11 @@ class FSDirXAttrOp {
   }
 
   static INode unprotectedSetXAttrs(
-      FSDirectory fsd, final String src, final List<XAttr> xAttrs,
+      FSDirectory fsd, final INodesInPath iip, final List<XAttr> xAttrs,
       final EnumSet<XAttrSetFlag> flag)
       throws IOException {
     assert fsd.hasWriteLock();
-    INodesInPath iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src),
-        true);
     INode inode = FSDirectory.resolveLastINode(iip);
-    int snapshotId = iip.getLatestSnapshotId();
     List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
     List<XAttr> newXAttrs = setINodeXAttrs(fsd, existingXAttrs, xAttrs, flag);
     final boolean isFile = inode.isFile();
@@ -287,7 +284,7 @@ class FSDirXAttrOp {
       }
     }
 
-    XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
+    XAttrStorage.updateINodeXAttrs(inode, newXAttrs, iip.getLatestSnapshotId());
     return inode;
   }
 
@@ -361,22 +358,20 @@ class FSDirXAttrOp {
     return xAttrs;
   }
 
-  static XAttr getXAttrByPrefixedName(FSDirectory fsd, INode inode,
-      int snapshotId, String prefixedName) throws IOException {
+  static XAttr getXAttrByPrefixedName(FSDirectory fsd, INodesInPath iip,
+      String prefixedName) throws IOException {
     fsd.readLock();
     try {
-      return XAttrStorage.readINodeXAttrByPrefixedName(inode, snapshotId,
-          prefixedName);
+      return XAttrStorage.readINodeXAttrByPrefixedName(iip, prefixedName);
     } finally {
       fsd.readUnlock();
     }
   }
 
   static XAttr unprotectedGetXAttrByPrefixedName(
-      INode inode, int snapshotId, String prefixedName)
+      INodesInPath iip, String prefixedName)
       throws IOException {
-    return XAttrStorage.readINodeXAttrByPrefixedName(inode, snapshotId,
-        prefixedName);
+    return XAttrStorage.readINodeXAttrByPrefixedName(iip, prefixedName);
   }
 
   private static void checkXAttrChangeAccess(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 7db2106..8456da6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1744,11 +1744,10 @@ public class FSDirectory implements Closeable {
     }
   }
 
-  void checkUnreadableBySuperuser(
-      FSPermissionChecker pc, INode inode, int snapshotId)
+  void checkUnreadableBySuperuser(FSPermissionChecker pc, INodesInPath iip)
       throws IOException {
     if (pc.isSuperUser()) {
-      if (FSDirXAttrOp.getXAttrByPrefixedName(this, inode, snapshotId,
+      if (FSDirXAttrOp.getXAttrByPrefixedName(this, iip,
           SECURITY_XATTR_UNREADABLE_BY_SUPERUSER) != null) {
         throw new AccessControlException(
             "Access is denied for " + pc.getUser() + " since the superuser "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index ee6206a..9c72a86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -896,7 +896,8 @@ public class FSEditLogLoader {
     }
     case OP_SET_XATTR: {
       SetXAttrOp setXAttrOp = (SetXAttrOp) op;
-      FSDirXAttrOp.unprotectedSetXAttrs(fsDir, setXAttrOp.src,
+      INodesInPath iip = fsDir.getINodesInPath4Write(setXAttrOp.src);
+      FSDirXAttrOp.unprotectedSetXAttrs(fsDir, iip,
                                         setXAttrOp.xAttrs,
                                         EnumSet.of(XAttrSetFlag.CREATE,
                                                    XAttrSetFlag.REPLACE));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 1721b2c..fb3b375 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -145,6 +145,7 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -216,6 +217,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Util;
+import org.apache.hadoop.hdfs.server.namenode.FSDirEncryptionZoneOp.EncryptionKeyInfo;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
@@ -2152,7 +2154,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return status;
   }
 
-  private HdfsFileStatus startFileInt(final String src,
+  private HdfsFileStatus startFileInt(String src,
       PermissionStatus permissions, String holder, String clientMachine,
       EnumSet<CreateFlag> flag, boolean createParent, short replication,
       long blockSize, CryptoProtocolVersion[] supportedVersions,
@@ -2171,92 +2173,79 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           .append(Arrays.toString(supportedVersions));
       NameNode.stateChangeLog.debug(builder.toString());
     }
-    if (!DFSUtil.isValidName(src)) {
+    if (!DFSUtil.isValidName(src) ||
+        FSDirectory.isExactReservedName(src) ||
+        (FSDirectory.isReservedName(src)
+            && !FSDirectory.isReservedRawName(src)
+            && !FSDirectory.isReservedInodesName(src))) {
       throw new InvalidPathException(src);
     }
 
-    checkOperation(OperationCategory.READ);
-    readLock();
-    try {
-      checkOperation(OperationCategory.READ);
-      if (!FSDirErasureCodingOp.hasErasureCodingPolicy(this, src)) {
-        blockManager.verifyReplication(src, replication, clientMachine);
-      }
-    } finally {
-      readUnlock();
-    }
-    
-    checkOperation(OperationCategory.WRITE);
-    if (blockSize < minBlockSize) {
-      throw new IOException("Specified block size is less than configured" +
-          " minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY
-          + "): " + blockSize + " < " + minBlockSize);
-    }
-
     FSPermissionChecker pc = getPermissionChecker();
+    INodesInPath iip = null;
+    boolean skipSync = true; // until we do something that might create edits
+    HdfsFileStatus stat = null;
+    BlocksMapUpdateInfo toRemoveBlocks = null;
 
-    /**
-     * If the file is in an encryption zone, we optimistically create an
-     * EDEK for the file by calling out to the configured KeyProvider.
-     * Since this typically involves doing an RPC, we take the readLock
-     * initially, then drop it to do the RPC.
-     * 
-     * Since the path can flip-flop between being in an encryption zone and not
-     * in the meantime, we need to recheck the preconditions when we retake the
-     * lock to do the create. If the preconditions are not met, we throw a
-     * special RetryStartFileException to ask the DFSClient to try the create
-     * again later.
-     */
-    FSDirWriteFileOp.EncryptionKeyInfo ezInfo = null;
+    checkOperation(OperationCategory.WRITE);
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot create file" + src);
 
-    if (provider != null) {
-      readLock();
-      try {
-        checkOperation(OperationCategory.READ);
-        ezInfo = FSDirWriteFileOp
-            .getEncryptionKeyInfo(this, pc, src, supportedVersions);
-      } finally {
-        readUnlock();
+      iip = FSDirWriteFileOp.resolvePathForStartFile(
+          dir, pc, src, flag, createParent);
+
+      if (!FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip)) {
+        blockManager.verifyReplication(src, replication, clientMachine);
       }
 
-      // Generate EDEK if necessary while not holding the lock
-      if (ezInfo != null) {
-        ezInfo.edek = FSDirEncryptionZoneOp
-            .generateEncryptedDataEncryptionKey(dir, ezInfo.ezKeyName);
+      if (blockSize < minBlockSize) {
+        throw new IOException("Specified block size is less than configured" +
+            " minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY
+            + "): " + blockSize + " < " + minBlockSize);
       }
-      EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
-    }
 
-    boolean skipSync = false;
-    HdfsFileStatus stat = null;
+      FileEncryptionInfo feInfo = null;
+      if (provider != null) {
+        EncryptionKeyInfo ezInfo = FSDirEncryptionZoneOp.getEncryptionKeyInfo(
+            this, iip, supportedVersions);
+        // if the path has an encryption zone, the lock was released while
+        // generating the EDEK.  re-resolve the path to ensure the namesystem
+        // and/or EZ has not mutated
+        if (ezInfo != null) {
+          checkOperation(OperationCategory.WRITE);
+          iip = FSDirWriteFileOp.resolvePathForStartFile(
+              dir, pc, iip.getPath(), flag, createParent);
+          feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(
+              dir, iip, ezInfo);
+        }
+      }
 
-    // Proceed with the create, using the computed cipher suite and
-    // generated EDEK
-    BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
-    writeLock();
-    try {
-      checkOperation(OperationCategory.WRITE);
-      checkNameNodeSafeMode("Cannot create file" + src);
+      skipSync = false; // following might generate edits
+      toRemoveBlocks = new BlocksMapUpdateInfo();
       dir.writeLock();
       try {
-        stat = FSDirWriteFileOp.startFile(this, pc, src, permissions, holder,
+        stat = FSDirWriteFileOp.startFile(this, iip, permissions, holder,
                                           clientMachine, flag, createParent,
-                                          replication, blockSize, ezInfo,
+                                          replication, blockSize, feInfo,
                                           toRemoveBlocks, logRetryCache);
+      } catch (IOException e) {
+        skipSync = e instanceof StandbyException;
+        throw e;
       } finally {
         dir.writeUnlock();
       }
-    } catch (IOException e) {
-      skipSync = e instanceof StandbyException;
-      throw e;
     } finally {
       writeUnlock();
       // There might be transactions logged while trying to recover the lease.
       // They need to be sync'ed even when an exception was thrown.
       if (!skipSync) {
         getEditLog().logSync();
-        removeBlocks(toRemoveBlocks);
-        toRemoveBlocks.clear();
+        if (toRemoveBlocks != null) {
+          removeBlocks(toRemoveBlocks);
+          toRemoveBlocks.clear();
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
index 65a4ada..8a91e2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
@@ -51,9 +51,10 @@ public class XAttrStorage {
    * @param prefixedName xAttr name with prefix
    * @return the xAttr
    */
-  public static XAttr readINodeXAttrByPrefixedName(INode inode,
-      int snapshotId, String prefixedName) {
-    XAttrFeature f = inode.getXAttrFeature(snapshotId);
+  public static XAttr readINodeXAttrByPrefixedName(INodesInPath iip,
+      String prefixedName) {
+    XAttrFeature f =
+        iip.getLastINode().getXAttrFeature(iip.getPathSnapshotId());
     return f == null ? null : f.getXAttr(prefixedName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32e9fc8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 18a0800..67019c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.RandomAccessFile;
@@ -1048,7 +1047,7 @@ public class TestEncryptionZones {
   }
 
   private class MyInjector extends EncryptionFaultInjector {
-    int generateCount;
+    volatile int generateCount;
     CountDownLatch ready;
     CountDownLatch wait;
 
@@ -1058,13 +1057,27 @@ public class TestEncryptionZones {
     }
 
     @Override
-    public void startFileAfterGenerateKey() throws IOException {
+    public void startFileNoKey() throws IOException {
+      generateCount = -1;
+      syncWithLatches();
+    }
+
+    @Override
+    public void startFileBeforeGenerateKey() throws IOException {
+      syncWithLatches();
+    }
+
+    private void syncWithLatches() throws IOException {
       ready.countDown();
       try {
         wait.await();
       } catch (InterruptedException e) {
         throw new IOException(e);
       }
+    }
+
+    @Override
+    public void startFileAfterGenerateKey() throws IOException {
       generateCount++;
     }
   }
@@ -1100,10 +1113,14 @@ public class TestEncryptionZones {
       Future<Void> future =
           executor.submit(new CreateFileTask(fsWrapper, file));
       injector.ready.await();
-      // Do the fault
-      doFault();
-      // Allow create to proceed
-      injector.wait.countDown();
+      try {
+        // Do the fault
+        doFault();
+        // Allow create to proceed
+      } finally {
+        // Always decrement latch to avoid hanging the tests on failure.
+        injector.wait.countDown();
+      }
       future.get();
       // Cleanup and postconditions
       doCleanup();
@@ -1126,20 +1143,21 @@ public class TestEncryptionZones {
     fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
     ExecutorService executor = Executors.newSingleThreadExecutor();
 
-    // Test when the parent directory becomes an EZ
+    // Test when the parent directory becomes an EZ.  With no initial EZ,
+    // the fsn lock must not be yielded.
     executor.submit(new InjectFaultTask() {
       @Override
-      public void doFault() throws Exception {
-        dfsAdmin.createEncryptionZone(zone1, TEST_KEY, NO_TRASH);
-      }
-      @Override
       public void doCleanup() throws Exception {
-        assertEquals("Expected a startFile retry", 2, injector.generateCount);
+        assertEquals("Expected no startFile key generation",
+            -1, injector.generateCount);
         fsWrapper.delete(file, false);
       }
     }).get();
 
-    // Test when the parent directory unbecomes an EZ
+    // Test when the parent directory unbecomes an EZ.  The generation of
+    // the EDEK will yield the lock, then re-resolve the path and use the
+    // previous EDEK.
+    dfsAdmin.createEncryptionZone(zone1, TEST_KEY, NO_TRASH);
     executor.submit(new InjectFaultTask() {
       @Override
       public void doFault() throws Exception {
@@ -1152,7 +1170,9 @@ public class TestEncryptionZones {
       }
     }).get();
 
-    // Test when the parent directory becomes a different EZ
+    // Test when the parent directory becomes a different EZ.  The generation
+    // of the EDEK will yield the lock, re-resolve will detect the EZ has
+    // changed, and client will be asked to retry a 2nd time
     fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
     final String otherKey = "other_key";
     DFSTestUtil.createKey(otherKey, cluster, conf);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/52] [abbrv] hadoop git commit: HDFS-10988. Refactor TestBalancerBandwidth. Contributed by Brahma Reddy Battula

Posted by cn...@apache.org.
HDFS-10988. Refactor TestBalancerBandwidth. Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9638186
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9638186
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9638186

Branch: refs/heads/HADOOP-13037
Commit: b963818621c200160bb37624f177bdcb059de4eb
Parents: 65912e4
Author: Mingliang Liu <li...@apache.org>
Authored: Mon Oct 10 13:19:17 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Oct 10 13:19:17 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/TestBalancerBandwidth.java      | 57 +++++++++-----------
 1 file changed, 25 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9638186/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
index 6e6bbee..6bbe3a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
@@ -24,13 +24,15 @@ import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 /**
@@ -54,9 +56,8 @@ public class TestBalancerBandwidth {
         DEFAULT_BANDWIDTH);
 
     /* Create and start cluster */
-    MiniDFSCluster cluster = 
-      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
-    try {
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(NUM_OF_DATANODES).build()) {
       cluster.waitActive();
 
       DistributedFileSystem fs = cluster.getFileSystem();
@@ -65,12 +66,6 @@ public class TestBalancerBandwidth {
       // Ensure value from the configuration is reflected in the datanodes.
       assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(0).getBalancerBandwidth());
       assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(1).getBalancerBandwidth());
-      ClientDatanodeProtocol dn1Proxy = DFSUtilClient
-          .createClientDatanodeProtocolProxy(datanodes.get(0).getDatanodeId(),
-              conf, 60000, false);
-      ClientDatanodeProtocol dn2Proxy = DFSUtilClient
-          .createClientDatanodeProtocolProxy(datanodes.get(1).getDatanodeId(),
-              conf, 60000, false);
       DFSAdmin admin = new DFSAdmin(conf);
       String dn1Address = datanodes.get(0).ipcServer.getListenerAddress()
           .getHostName() + ":" + datanodes.get(0).getIpcPort();
@@ -79,51 +74,49 @@ public class TestBalancerBandwidth {
 
       // verifies the dfsadmin command execution
       String[] args = new String[] { "-getBalancerBandwidth", dn1Address };
-      runGetBalancerBandwidthCmd(admin, args, dn1Proxy, DEFAULT_BANDWIDTH);
+      runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
       args = new String[] { "-getBalancerBandwidth", dn2Address };
-      runGetBalancerBandwidthCmd(admin, args, dn2Proxy, DEFAULT_BANDWIDTH);
+      runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
 
       // Dynamically change balancer bandwidth and ensure the updated value
       // is reflected on the datanodes.
       long newBandwidth = 12 * DEFAULT_BANDWIDTH; // 12M bps
       fs.setBalancerBandwidth(newBandwidth);
+      verifyBalancerBandwidth(datanodes, newBandwidth);
 
-      // Give it a few seconds to propogate new the value to the datanodes.
-      try {
-        Thread.sleep(5000);
-      } catch (Exception e) {}
-
-      assertEquals(newBandwidth, (long) datanodes.get(0).getBalancerBandwidth());
-      assertEquals(newBandwidth, (long) datanodes.get(1).getBalancerBandwidth());
       // verifies the dfsadmin command execution
       args = new String[] { "-getBalancerBandwidth", dn1Address };
-      runGetBalancerBandwidthCmd(admin, args, dn1Proxy, newBandwidth);
+      runGetBalancerBandwidthCmd(admin, args, newBandwidth);
       args = new String[] { "-getBalancerBandwidth", dn2Address };
-      runGetBalancerBandwidthCmd(admin, args, dn2Proxy, newBandwidth);
+      runGetBalancerBandwidthCmd(admin, args, newBandwidth);
 
       // Dynamically change balancer bandwidth to 0. Balancer bandwidth on the
       // datanodes should remain as it was.
       fs.setBalancerBandwidth(0);
 
-      // Give it a few seconds to propogate new the value to the datanodes.
-      try {
-        Thread.sleep(5000);
-      } catch (Exception e) {}
+      verifyBalancerBandwidth(datanodes, newBandwidth);
 
-      assertEquals(newBandwidth, (long) datanodes.get(0).getBalancerBandwidth());
-      assertEquals(newBandwidth, (long) datanodes.get(1).getBalancerBandwidth());
       // verifies the dfsadmin command execution
       args = new String[] { "-getBalancerBandwidth", dn1Address };
-      runGetBalancerBandwidthCmd(admin, args, dn1Proxy, newBandwidth);
+      runGetBalancerBandwidthCmd(admin, args, newBandwidth);
       args = new String[] { "-getBalancerBandwidth", dn2Address };
-      runGetBalancerBandwidthCmd(admin, args, dn2Proxy, newBandwidth);
-    } finally {
-      cluster.shutdown();
+      runGetBalancerBandwidthCmd(admin, args, newBandwidth);
     }
   }
 
+  private void verifyBalancerBandwidth(final ArrayList<DataNode> datanodes,
+      final long newBandwidth) throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return (long) datanodes.get(0).getBalancerBandwidth() == newBandwidth
+            && (long) datanodes.get(1).getBalancerBandwidth() == newBandwidth;
+      }
+    }, 100, 60 * 1000);
+  }
+
   private void runGetBalancerBandwidthCmd(DFSAdmin admin, String[] args,
-      ClientDatanodeProtocol proxy, long expectedBandwidth) throws Exception {
+      long expectedBandwidth) throws Exception {
     PrintStream initialStdOut = System.out;
     outContent.reset();
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/52] [abbrv] hadoop git commit: YARN-5057. Resourcemanager.security.TestDelegationTokenRenewer fails in trunk. Contributed by Jason Lowe.

Posted by cn...@apache.org.
YARN-5057. Resourcemanager.security.TestDelegationTokenRenewer fails in trunk. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0773ffd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0773ffd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0773ffd0

Branch: refs/heads/HADOOP-13037
Commit: 0773ffd0f8383384f8cf8599476565f78aae70c9
Parents: 669d6f1
Author: Naganarasimha <na...@apache.org>
Authored: Mon Oct 10 18:04:47 2016 -0400
Committer: Naganarasimha <na...@apache.org>
Committed: Mon Oct 10 18:04:47 2016 -0400

----------------------------------------------------------------------
 .../security/TestDelegationTokenRenewer.java    | 24 ++++++++++++++++----
 1 file changed, 19 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0773ffd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index 5dfee89..205188b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -1148,17 +1148,21 @@ public class TestDelegationTokenRenewer {
         credentials, null, true, false, false, null, 0, null, false, null);
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
     rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
+    DelegationTokenRenewer renewer =
+        rm.getRMContext().getDelegationTokenRenewer();
+    DelegationTokenToRenew dttr = renewer.getAllTokens().get(token1);
+    Assert.assertNotNull(dttr);
 
     // submit app2 with the same token, set cancelTokenWhenComplete to true;
     RMApp app2 = rm.submitApp(resource, "name", "user", null, false, null, 2,
         credentials, null, true, false, false, null, 0, null, true, null);
     MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
     rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING);
-    MockRM.finishAMAndVerifyAppState(app2, rm, nm1, am2);
+    finishAMAndWaitForComplete(app2, rm, nm1, am2, dttr);
     Assert.assertTrue(rm.getRMContext().getDelegationTokenRenewer()
       .getAllTokens().containsKey(token1));
 
-    MockRM.finishAMAndVerifyAppState(app1, rm, nm1, am1);
+    finishAMAndWaitForComplete(app1, rm, nm1, am1, dttr);
     // app2 completes, app1 is still running, check the token is not cancelled
     Assert.assertFalse(Renewer.cancelled);
   }
@@ -1224,7 +1228,7 @@ public class TestDelegationTokenRenewer {
     Assert.assertTrue(dttr.referringAppIds.contains(app2.getApplicationId()));
     Assert.assertFalse(Renewer.cancelled);
 
-    MockRM.finishAMAndVerifyAppState(app2, rm, nm1, am2);
+    finishAMAndWaitForComplete(app2, rm, nm1, am2, dttr);
     // app2 completes, app1 is still running, check the token is not cancelled
     Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
     Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
@@ -1242,14 +1246,14 @@ public class TestDelegationTokenRenewer {
     Assert.assertFalse(dttr.isTimerCancelled());
     Assert.assertFalse(Renewer.cancelled);
 
-    MockRM.finishAMAndVerifyAppState(app1, rm, nm1, am1);
+    finishAMAndWaitForComplete(app1, rm, nm1, am1, dttr);
     Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
     Assert.assertFalse(dttr.referringAppIds.contains(app1.getApplicationId()));
     Assert.assertTrue(dttr.referringAppIds.contains(app3.getApplicationId()));
     Assert.assertFalse(dttr.isTimerCancelled());
     Assert.assertFalse(Renewer.cancelled);
 
-    MockRM.finishAMAndVerifyAppState(app3, rm, nm1, am3);
+    finishAMAndWaitForComplete(app3, rm, nm1, am3, dttr);
     Assert.assertFalse(renewer.getAllTokens().containsKey(token1));
     Assert.assertTrue(dttr.referringAppIds.isEmpty());
     Assert.assertTrue(dttr.isTimerCancelled());
@@ -1259,4 +1263,14 @@ public class TestDelegationTokenRenewer {
     Assert.assertFalse(renewer.getDelegationTokens().contains(token1));
   }
 
+  private void finishAMAndWaitForComplete(final RMApp app, MockRM rm,
+      MockNM nm, MockAM am, final DelegationTokenToRenew dttr)
+          throws Exception {
+    MockRM.finishAMAndVerifyAppState(app, rm, nm, am);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      public Boolean get() {
+        return !dttr.referringAppIds.contains(app.getApplicationId());
+      }
+    }, 10, 10000);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/52] [abbrv] hadoop git commit: HADOOP-13699. Configuration does not substitute multiple references to the same var.

Posted by cn...@apache.org.
HADOOP-13699. Configuration does not substitute multiple references to the same var.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03060075
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03060075
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03060075

Branch: refs/heads/HADOOP-13037
Commit: 03060075c53a2cecfbf5f60b6fc77afecf64ace5
Parents: 3441c74
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Oct 10 12:19:26 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Oct 10 12:19:26 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/conf/Configuration.java   | 23 ++++++++++---------
 .../apache/hadoop/conf/TestConfiguration.java   | 24 ++++----------------
 2 files changed, 16 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03060075/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 1e8ed50..dbbc8ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -943,10 +943,15 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *
    * If var is unbounded the current state of expansion "prefix${var}suffix" is
    * returned.
-   *
-   * If a cycle is detected: replacing var1 requires replacing var2 ... requires
-   * replacing var1, i.e., the cycle is shorter than
-   * {@link Configuration#MAX_SUBST} then the original expr is returned.
+   * <p>
+   * This function also detects self-referential substitutions, i.e.
+   * <pre>
+   *   {@code
+   *   foo.bar = ${foo.bar}
+   *   }
+   * </pre>
+   * If a cycle is detected then the original expr is returned. Loops
+   * involving multiple substitutions are not detected.
    *
    * @param expr the literal value of a config key
    * @return null if expr is null, otherwise the value resulting from expanding
@@ -959,7 +964,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       return null;
     }
     String eval = expr;
-    Set<String> evalSet = null;
     for(int s = 0; s < MAX_SUBST; s++) {
       final int[] varBounds = findSubVariable(eval);
       if (varBounds[SUB_START_IDX] == -1) {
@@ -1004,15 +1008,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         return eval; // return literal ${var}: var is unbound
       }
 
-      // prevent recursive resolution
-      //
       final int dollar = varBounds[SUB_START_IDX] - "${".length();
       final int afterRightBrace = varBounds[SUB_END_IDX] + "}".length();
       final String refVar = eval.substring(dollar, afterRightBrace);
-      if (evalSet == null) {
-        evalSet = new HashSet<String>();
-      }
-      if (!evalSet.add(refVar)) {
+
+      // detect self-referential values
+      if (val.contains(refVar)) {
         return expr; // return original expression if there is a loop
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03060075/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 917ccbc..17112f5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -168,6 +168,9 @@ public class TestConfiguration extends TestCase {
     declareProperty("my.fullfile", "${my.base}/${my.file}${my.suffix}", "/tmp/hadoop_user/hello.txt");
     // check that undefined variables are returned as-is
     declareProperty("my.failsexpand", "a${my.undefvar}b", "a${my.undefvar}b");
+    // check that multiple variable references are resolved
+    declareProperty("my.user.group", "${user.name} ${user.name}",
+        "hadoop_user hadoop_user");
     endConfig();
     Path fileResource = new Path(CONFIG);
     mock.addResource(fileResource);
@@ -1508,7 +1511,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
-  public void testInvalidSubstitutation() {
+  public void testInvalidSubstitution() {
     final Configuration configuration = new Configuration(false);
 
     // 2-var loops
@@ -1522,25 +1525,6 @@ public class TestConfiguration extends TestCase {
       configuration.set(key, keyExpression);
       assertEquals("Unexpected value", keyExpression, configuration.get(key));
     }
-
-    //
-    // 3-variable loops
-    //
-
-    final String expVal1 = "${test.var2}";
-    String testVar1 = "test.var1";
-    configuration.set(testVar1, expVal1);
-    configuration.set("test.var2", "${test.var3}");
-    configuration.set("test.var3", "${test.var1}");
-    assertEquals("Unexpected value", expVal1, configuration.get(testVar1));
-
-    // 3-variable loop with non-empty value prefix/suffix
-    //
-    final String expVal2 = "foo2${test.var2}bar2";
-    configuration.set(testVar1, expVal2);
-    configuration.set("test.var2", "foo3${test.var3}bar3");
-    configuration.set("test.var3", "foo1${test.var1}bar1");
-    assertEquals("Unexpected value", expVal2, configuration.get(testVar1));
   }
 
   public void testIncompleteSubbing() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[52/52] [abbrv] hadoop git commit: YARN-5677. RM should transition to standby when connection is lost for an extended period. (Daniel Templeton via kasha)

Posted by cn...@apache.org.
YARN-5677. RM should transition to standby when connection is lost for an extended period. (Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6476934a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6476934a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6476934a

Branch: refs/heads/HADOOP-13037
Commit: 6476934ae5de1be7988ab198b673d82fe0f006e3
Parents: 6378845
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Tue Oct 11 22:07:10 2016 -0700
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Tue Oct 11 22:07:10 2016 -0700

----------------------------------------------------------------------
 .../resourcemanager/EmbeddedElectorService.java |  59 +++++-
 .../resourcemanager/TestRMEmbeddedElector.java  | 191 +++++++++++++++++++
 2 files changed, 244 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6476934a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
index 72327e8..88d2e10 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,6 +40,8 @@ import org.apache.zookeeper.data.ACL;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Timer;
+import java.util.TimerTask;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -54,6 +57,10 @@ public class EmbeddedElectorService extends AbstractService
 
   private byte[] localActiveNodeInfo;
   private ActiveStandbyElector elector;
+  private long zkSessionTimeout;
+  private Timer zkDisconnectTimer;
+  @VisibleForTesting
+  final Object zkDisconnectLock = new Object();
 
   EmbeddedElectorService(RMContext rmContext) {
     super(EmbeddedElectorService.class.getName());
@@ -80,7 +87,7 @@ public class EmbeddedElectorService extends AbstractService
         YarnConfiguration.DEFAULT_AUTO_FAILOVER_ZK_BASE_PATH);
     String electionZNode = zkBasePath + "/" + clusterId;
 
-    long zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS,
+    zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS,
         YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
 
     List<ACL> zkAcls = RMZKUtils.getZKAcls(conf);
@@ -123,6 +130,8 @@ public class EmbeddedElectorService extends AbstractService
 
   @Override
   public void becomeActive() throws ServiceFailedException {
+    cancelDisconnectTimer();
+
     try {
       rmContext.getRMAdminService().transitionToActive(req);
     } catch (Exception e) {
@@ -132,6 +141,8 @@ public class EmbeddedElectorService extends AbstractService
 
   @Override
   public void becomeStandby() {
+    cancelDisconnectTimer();
+
     try {
       rmContext.getRMAdminService().transitionToStandby(req);
     } catch (Exception e) {
@@ -139,13 +150,49 @@ public class EmbeddedElectorService extends AbstractService
     }
   }
 
+  /**
+   * Stop the disconnect timer.  Any running tasks will be allowed to complete.
+   */
+  private void cancelDisconnectTimer() {
+    synchronized (zkDisconnectLock) {
+      if (zkDisconnectTimer != null) {
+        zkDisconnectTimer.cancel();
+        zkDisconnectTimer = null;
+      }
+    }
+  }
+
+  /**
+   * When the ZK client loses contact with ZK, this method will be called to
+   * allow the RM to react. Because the loss of connection can be noticed
+   * before the session timeout happens, it is undesirable to transition
+   * immediately. Instead the method starts a timer that will wait
+   * {@link YarnConfiguration#RM_ZK_TIMEOUT_MS} milliseconds before
+   * initiating the transition into standby state.
+   */
   @Override
   public void enterNeutralMode() {
-    /**
-     * Possibly due to transient connection issues. Do nothing.
-     * TODO: Might want to keep track of how long in this state and transition
-     * to standby.
-     */
+    LOG.warn("Lost contact with Zookeeper. Transitioning to standby in "
+        + zkSessionTimeout + " ms if connection is not reestablished.");
+
+    // If we've just become disconnected, start a timer.  When the time's up,
+    // we'll transition to standby.
+    synchronized (zkDisconnectLock) {
+      if (zkDisconnectTimer == null) {
+        zkDisconnectTimer = new Timer("Zookeeper disconnect timer");
+        zkDisconnectTimer.schedule(new TimerTask() {
+          @Override
+          public void run() {
+            synchronized (zkDisconnectLock) {
+              // Only run if the timer hasn't been cancelled
+              if (zkDisconnectTimer != null) {
+                becomeStandby();
+              }
+            }
+          }
+        }, zkSessionTimeout);
+      }
+    }
   }
 
   @SuppressWarnings(value = "unchecked")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6476934a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 20b1c0e..bfd0b4e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -28,6 +28,14 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicBoolean;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.atMost;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestRMEmbeddedElector extends ClientBaseWithFixes {
   private static final Log LOG =
@@ -41,6 +49,14 @@ public class TestRMEmbeddedElector extends ClientBaseWithFixes {
   private Configuration conf;
   private AtomicBoolean callbackCalled;
 
+  private enum SyncTestType {
+    ACTIVE,
+    STANDBY,
+    NEUTRAL,
+    ACTIVE_TIMING,
+    STANDBY_TIMING
+  }
+
   @Before
   public void setup() throws IOException {
     conf = new YarnConfiguration();
@@ -79,6 +95,181 @@ public class TestRMEmbeddedElector extends ClientBaseWithFixes {
     LOG.info("Stopped RM");
   }
 
+  /**
+   * Test that neutral mode plays well with all other transitions.
+   *
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  @Test
+  public void testCallbackSynchronization()
+      throws IOException, InterruptedException {
+    testCallbackSynchronization(SyncTestType.ACTIVE);
+    testCallbackSynchronization(SyncTestType.STANDBY);
+    testCallbackSynchronization(SyncTestType.NEUTRAL);
+    testCallbackSynchronization(SyncTestType.ACTIVE_TIMING);
+    testCallbackSynchronization(SyncTestType.STANDBY_TIMING);
+  }
+
+  /**
+   * Helper method to test that neutral mode plays well with other transitions.
+   *
+   * @param type the type of test to run
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronization(SyncTestType type)
+      throws IOException, InterruptedException {
+    AdminService as = mock(AdminService.class);
+    RMContext rc = mock(RMContext.class);
+    Configuration myConf = new Configuration(conf);
+
+    myConf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 50);
+    when(rc.getRMAdminService()).thenReturn(as);
+
+    EmbeddedElectorService ees = new EmbeddedElectorService(rc);
+    ees.init(myConf);
+
+    ees.enterNeutralMode();
+
+    switch (type) {
+    case ACTIVE:
+      testCallbackSynchronizationActive(as, ees);
+      break;
+    case STANDBY:
+      testCallbackSynchronizationStandby(as, ees);
+      break;
+    case NEUTRAL:
+      testCallbackSynchronizationNeutral(as, ees);
+      break;
+    case ACTIVE_TIMING:
+      testCallbackSynchronizationTimingActive(as, ees);
+      break;
+    case STANDBY_TIMING:
+      testCallbackSynchronizationTimingStandby(as, ees);
+      break;
+    default:
+      fail("Unknown test type: " + type);
+      break;
+    }
+  }
+
+  /**
+   * Helper method to test that neutral mode plays well with an active
+   * transition.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationActive(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    ees.becomeActive();
+
+    Thread.sleep(100);
+
+    verify(as).transitionToActive(any());
+    verify(as, never()).transitionToStandby(any());
+  }
+
+  /**
+   * Helper method to test that neutral mode plays well with a standby
+   * transition.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationStandby(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    ees.becomeStandby();
+
+    Thread.sleep(100);
+
+    verify(as, atLeast(1)).transitionToStandby(any());
+    verify(as, atMost(1)).transitionToStandby(any());
+  }
+
+  /**
+   * Helper method to test that neutral mode plays well with itself.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationNeutral(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    ees.enterNeutralMode();
+
+    Thread.sleep(100);
+
+    verify(as, atLeast(1)).transitionToStandby(any());
+    verify(as, atMost(1)).transitionToStandby(any());
+  }
+
+  /**
+   * Helper method to test that neutral mode does not race with an active
+   * transition.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationTimingActive(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    synchronized (ees.zkDisconnectLock) {
+      // Sleep while holding the lock so that the timer thread can't do
+      // anything when it runs.  Sleep until we're pretty sure the timer thread
+      // has tried to run.
+      Thread.sleep(100);
+      // While still holding the lock cancel the timer by transitioning. This
+      // simulates a race where the callback goes to cancel the timer while the
+      // timer is trying to run.
+      ees.becomeActive();
+    }
+
+    // Sleep just a little more so that the timer thread can do whatever it's
+    // going to do, hopefully nothing.
+    Thread.sleep(50);
+
+    verify(as).transitionToActive(any());
+    verify(as, never()).transitionToStandby(any());
+  }
+
+  /**
+   * Helper method to test that neutral mode does not race with an active
+   * transition.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationTimingStandby(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    synchronized (ees.zkDisconnectLock) {
+      // Sleep while holding the lock so that the timer thread can't do
+      // anything when it runs.  Sleep until we're pretty sure the timer thread
+      // has tried to run.
+      Thread.sleep(100);
+      // While still holding the lock cancel the timer by transitioning. This
+      // simulates a race where the callback goes to cancel the timer while the
+      // timer is trying to run.
+      ees.becomeStandby();
+    }
+
+    // Sleep just a little more so that the timer thread can do whatever it's
+    // going to do, hopefully nothing.
+    Thread.sleep(50);
+
+    verify(as, atLeast(1)).transitionToStandby(any());
+    verify(as, atMost(1)).transitionToStandby(any());
+  }
+
   private class MockRMWithElector extends MockRM {
     private long delayMs = 0;
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/52] [abbrv] hadoop git commit: HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by java.io.File. (Virajith Jalaparti via lei)

Posted by cn...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 57fab66..76af724 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -23,11 +23,13 @@ import java.io.FileOutputStream;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.nio.file.StandardCopyOption;
 import java.util.Collections;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -56,13 +58,18 @@ import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.BlockDirFilter;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.CloseableReferenceCount;
@@ -102,8 +109,14 @@ public class FsVolumeImpl implements FsVolumeSpi {
   private final StorageType storageType;
   private final Map<String, BlockPoolSlice> bpSlices
       = new ConcurrentHashMap<String, BlockPoolSlice>();
+
+  // Refers to the base StorageLocation used to construct this volume
+  // (i.e., does not include STORAGE_DIR_CURRENT in
+  // <location>/STORAGE_DIR_CURRENT/)
+  private final StorageLocation storageLocation;
+
   private final File currentDir;    // <StorageDirectory>/current
-  private final DF usage;           
+  private final DF usage;
   private final long reserved;
   private CloseableReferenceCount reference = new CloseableReferenceCount();
 
@@ -124,19 +137,25 @@ public class FsVolumeImpl implements FsVolumeSpi {
    */
   protected ThreadPoolExecutor cacheExecutor;
   
-  FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
-      Configuration conf, StorageType storageType) throws IOException {
+  FsVolumeImpl(FsDatasetImpl dataset, String storageID, StorageDirectory sd,
+      Configuration conf) throws IOException {
+
+    if (sd.getStorageLocation() == null) {
+      throw new IOException("StorageLocation specified for storage directory " +
+          sd + " is null");
+    }
     this.dataset = dataset;
     this.storageID = storageID;
+    this.reservedForReplicas = new AtomicLong(0L);
+    this.storageLocation = sd.getStorageLocation();
+    this.currentDir = sd.getCurrentDir();
+    File parent = currentDir.getParentFile();
+    this.usage = new DF(parent, conf);
+    this.storageType = storageLocation.getStorageType();
     this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
         + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
-    this.reservedForReplicas = new AtomicLong(0L);
-    this.currentDir = currentDir;
-    File parent = currentDir.getParentFile();
-    this.usage = new DF(parent, conf);
-    this.storageType = storageType;
     this.configuredCapacity = -1;
     this.conf = conf;
     cacheExecutor = initializeCacheExecutor(parent);
@@ -285,19 +304,20 @@ public class FsVolumeImpl implements FsVolumeSpi {
     return true;
   }
 
+  @VisibleForTesting
   File getCurrentDir() {
     return currentDir;
   }
   
-  File getRbwDir(String bpid) throws IOException {
+  protected File getRbwDir(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getRbwDir();
   }
 
-  File getLazyPersistDir(String bpid) throws IOException {
+  protected File getLazyPersistDir(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getLazypersistDir();
   }
 
-  File getTmpDir(String bpid) throws IOException {
+  protected File getTmpDir(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getTmpDir();
   }
 
@@ -448,6 +468,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
     return reserved;
   }
 
+  @VisibleForTesting
   BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
     BlockPoolSlice bp = bpSlices.get(bpid);
     if (bp == null) {
@@ -457,21 +478,33 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
 
   @Override
-  public String getBasePath() {
-    return currentDir.getParent();
+  public URI getBaseURI() {
+    return new File(currentDir.getParent()).toURI();
   }
-  
+
   @Override
-  public boolean isTransientStorage() {
-    return storageType.isTransient();
+  public DF getUsageStats(Configuration conf) {
+    if (currentDir != null) {
+      try {
+        return new DF(new File(currentDir.getParent()), conf);
+      } catch (IOException e) {
+        LOG.error("Unable to get disk statistics for volume " + this);
+      }
+    }
+    return null;
   }
 
   @Override
-  public String getPath(String bpid) throws IOException {
-    return getBlockPoolSlice(bpid).getDirectory().getAbsolutePath();
+  public StorageLocation getStorageLocation() {
+    return storageLocation;
   }
 
   @Override
+  public boolean isTransientStorage() {
+    return storageType.isTransient();
+  }
+
+  @VisibleForTesting
   public File getFinalizedDir(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getFinalizedDir();
   }
@@ -951,7 +984,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   @Override
   public String toString() {
-    return currentDir.getAbsolutePath();
+    return currentDir != null ? currentDir.getParent() : "NULL";
   }
 
   void shutdown() {
@@ -1189,5 +1222,167 @@ public class FsVolumeImpl implements FsVolumeSpi {
         dstBlockFile, true, DFSUtilClient.getSmallBufferSize(conf), conf);
   }
 
+  @Override
+  public LinkedList<ScanInfo> compileReport(String bpid,
+      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+      throws InterruptedException, IOException {
+    return compileReport(getFinalizedDir(bpid),
+        getFinalizedDir(bpid), report, reportCompiler);
+  }
+
+  private LinkedList<ScanInfo> compileReport(File bpFinalizedDir,
+      File dir, LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+        throws InterruptedException {
+
+    reportCompiler.throttle();
+
+    List <String> fileNames;
+    try {
+      fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE);
+    } catch (IOException ioe) {
+      LOG.warn("Exception occured while compiling report: ", ioe);
+      // Initiate a check on disk failure.
+      dataset.datanode.checkDiskErrorAsync();
+      // Ignore this directory and proceed.
+      return report;
+    }
+    Collections.sort(fileNames);
+
+    /*
+     * Assumption: In the sorted list of files block file appears immediately
+     * before block metadata file. This is true for the current naming
+     * convention for block file blk_<blockid> and meta file
+     * blk_<blockid>_<genstamp>.meta
+     */
+    for (int i = 0; i < fileNames.size(); i++) {
+      // Make sure this thread can make a timely exit. With a low throttle
+      // rate, completing a run can take a looooong time.
+      if (Thread.interrupted()) {
+        throw new InterruptedException();
+      }
+
+      File file = new File(dir, fileNames.get(i));
+      if (file.isDirectory()) {
+        compileReport(bpFinalizedDir, file, report, reportCompiler);
+        continue;
+      }
+      if (!Block.isBlockFilename(file)) {
+        if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, file.getName())) {
+          long blockId = Block.getBlockId(file.getName());
+          verifyFileLocation(file.getParentFile(), bpFinalizedDir,
+              blockId);
+          report.add(new ScanInfo(blockId, null, file, this));
+        }
+        continue;
+      }
+      File blockFile = file;
+      long blockId = Block.filename2id(file.getName());
+      File metaFile = null;
+
+      // Skip all the files that start with block name until
+      // getting to the metafile for the block
+      while (i + 1 < fileNames.size()) {
+        File blkMetaFile = new File(dir, fileNames.get(i + 1));
+        if (!(blkMetaFile.isFile()
+            && blkMetaFile.getName().startsWith(blockFile.getName()))) {
+          break;
+        }
+        i++;
+        if (isBlockMetaFile(blockFile.getName(), blkMetaFile.getName())) {
+          metaFile = blkMetaFile;
+          break;
+        }
+      }
+      verifyFileLocation(blockFile, bpFinalizedDir, blockId);
+      report.add(new ScanInfo(blockId, blockFile, metaFile, this));
+    }
+    return report;
+  }
+
+  /**
+   * Helper method to determine if a file name is consistent with a block.
+   * meta-data file
+   *
+   * @param blockId the block ID
+   * @param metaFile the file to check
+   * @return whether the file name is a block meta-data file name
+   */
+  private static boolean isBlockMetaFile(String blockId, String metaFile) {
+    return metaFile.startsWith(blockId)
+        && metaFile.endsWith(Block.METADATA_EXTENSION);
+  }
+
+  /**
+   * Verify whether the actual directory location of block file has the
+   * expected directory path computed using its block ID.
+   */
+  private void verifyFileLocation(File actualBlockFile,
+      File bpFinalizedDir, long blockId) {
+    File expectedBlockDir =
+        DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
+    File actualBlockDir = actualBlockFile.getParentFile();
+    if (actualBlockDir.compareTo(expectedBlockDir) != 0) {
+      LOG.warn("Block: " + blockId +
+          " found in invalid directory.  Expected directory: " +
+          expectedBlockDir + ".  Actual directory: " + actualBlockDir);
+    }
+  }
+
+  public ReplicaInfo moveBlockToTmpLocation(ExtendedBlock block,
+      ReplicaInfo replicaInfo,
+      int smallBufferSize,
+      Configuration conf) throws IOException {
+
+    File[] blockFiles = FsDatasetImpl.copyBlockFiles(block.getBlockId(),
+        block.getGenerationStamp(), replicaInfo,
+        getTmpDir(block.getBlockPoolId()),
+        replicaInfo.isOnTransientStorage(), smallBufferSize, conf);
+
+    ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.TEMPORARY)
+        .setBlockId(replicaInfo.getBlockId())
+        .setGenerationStamp(replicaInfo.getGenerationStamp())
+        .setFsVolume(this)
+        .setDirectoryToUse(blockFiles[0].getParentFile())
+        .setBytesToReserve(0)
+        .build();
+    newReplicaInfo.setNumBytes(blockFiles[1].length());
+    return newReplicaInfo;
+  }
+
+  public File[] copyBlockToLazyPersistLocation(String bpId, long blockId,
+      long genStamp,
+      ReplicaInfo replicaInfo,
+      int smallBufferSize,
+      Configuration conf) throws IOException {
+
+    File lazyPersistDir  = getLazyPersistDir(bpId);
+    if (!lazyPersistDir.exists() && !lazyPersistDir.mkdirs()) {
+      FsDatasetImpl.LOG.warn("LazyWriter failed to create " + lazyPersistDir);
+      throw new IOException("LazyWriter fail to find or " +
+          "create lazy persist dir: " + lazyPersistDir.toString());
+    }
+
+    // No FsDatasetImpl lock for the file copy
+    File[] targetFiles = FsDatasetImpl.copyBlockFiles(
+        blockId, genStamp, replicaInfo, lazyPersistDir, true,
+        smallBufferSize, conf);
+    return targetFiles;
+  }
+
+  public void incrNumBlocks(String bpid) throws IOException {
+    getBlockPoolSlice(bpid).incrNumBlocks();
+  }
+
+  public void resolveDuplicateReplicas(String bpid, ReplicaInfo memBlockInfo,
+      ReplicaInfo diskBlockInfo, ReplicaMap volumeMap) throws IOException {
+    getBlockPoolSlice(bpid).resolveDuplicateReplicas(
+        memBlockInfo, diskBlockInfo, volumeMap);
+  }
+
+  public ReplicaInfo activateSavedReplica(String bpid,
+      ReplicaInfo replicaInfo, RamDiskReplica replicaState) throws IOException {
+    return getBlockPoolSlice(bpid).activateSavedReplica(replicaInfo,
+        replicaState);
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
new file mode 100644
index 0000000..a1f7e91
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+
+/**
+ * This class is to be used as a builder for {@link FsVolumeImpl} objects.
+ */
+public class FsVolumeImplBuilder {
+
+  private FsDatasetImpl dataset;
+  private String storageID;
+  private StorageDirectory sd;
+  private Configuration conf;
+
+  public FsVolumeImplBuilder() {
+    dataset = null;
+    storageID = null;
+    sd = null;
+    conf = null;
+  }
+
+  FsVolumeImplBuilder setDataset(FsDatasetImpl dataset) {
+    this.dataset = dataset;
+    return this;
+  }
+
+  FsVolumeImplBuilder setStorageID(String id) {
+    this.storageID = id;
+    return this;
+  }
+
+  FsVolumeImplBuilder setStorageDirectory(StorageDirectory sd) {
+    this.sd = sd;
+    return this;
+  }
+
+  FsVolumeImplBuilder setConf(Configuration conf) {
+    this.conf = conf;
+    return this;
+  }
+
+  FsVolumeImpl build() throws IOException {
+    return new FsVolumeImpl(dataset, storageID, sd, conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index f869008..cf9c319 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
@@ -41,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.AutoCloseableLock;
@@ -51,8 +51,10 @@ class FsVolumeList {
   private final CopyOnWriteArrayList<FsVolumeImpl> volumes =
       new CopyOnWriteArrayList<>();
   // Tracks volume failures, sorted by volume path.
-  private final Map<String, VolumeFailureInfo> volumeFailureInfos =
-      Collections.synchronizedMap(new TreeMap<String, VolumeFailureInfo>());
+  // map from volume storageID to the volume failure info
+  private final Map<StorageLocation, VolumeFailureInfo> volumeFailureInfos =
+      Collections.synchronizedMap(
+          new TreeMap<StorageLocation, VolumeFailureInfo>());
   private final ConcurrentLinkedQueue<FsVolumeImpl> volumesBeingRemoved =
       new ConcurrentLinkedQueue<>();
   private final AutoCloseableLock checkDirsLock;
@@ -234,10 +236,9 @@ class FsVolumeList {
    *
    * @return list of all the failed volumes.
    */
-  Set<File> checkDirs() {
+  Set<StorageLocation> checkDirs() {
     try (AutoCloseableLock lock = checkDirsLock.acquire()) {
-      Set<File> failedVols = null;
-      
+      Set<StorageLocation> failedLocations = null;
       // Make a copy of volumes for performing modification 
       final List<FsVolumeImpl> volumeList = getVolumes();
 
@@ -247,10 +248,10 @@ class FsVolumeList {
           fsv.checkDirs();
         } catch (DiskErrorException e) {
           FsDatasetImpl.LOG.warn("Removing failed volume " + fsv + ": ", e);
-          if (failedVols == null) {
-            failedVols = new HashSet<>(1);
+          if (failedLocations == null) {
+            failedLocations = new HashSet<>(1);
           }
-          failedVols.add(new File(fsv.getBasePath()).getAbsoluteFile());
+          failedLocations.add(fsv.getStorageLocation());
           addVolumeFailureInfo(fsv);
           removeVolume(fsv);
         } catch (ClosedChannelException e) {
@@ -261,13 +262,13 @@ class FsVolumeList {
         }
       }
       
-      if (failedVols != null && failedVols.size() > 0) {
-        FsDatasetImpl.LOG.warn("Completed checkDirs. Found " + failedVols.size()
-            + " failure volumes.");
+      if (failedLocations != null && failedLocations.size() > 0) {
+        FsDatasetImpl.LOG.warn("Completed checkDirs. Found " +
+            failedLocations.size() + " failure volumes.");
       }
 
       waitVolumeRemoved(5000, checkDirsLockCondition);
-      return failedVols;
+      return failedLocations;
     }
   }
 
@@ -315,7 +316,7 @@ class FsVolumeList {
     }
     // If the volume is used to replace a failed volume, it needs to reset the
     // volume failure info for this volume.
-    removeVolumeFailureInfo(new File(volume.getBasePath()));
+    removeVolumeFailureInfo(volume.getStorageLocation());
     FsDatasetImpl.LOG.info("Added new volume: " +
         volume.getStorageID());
   }
@@ -351,16 +352,15 @@ class FsVolumeList {
    * @param volume the volume to be removed.
    * @param clearFailure set true to remove failure info for this volume.
    */
-  void removeVolume(File volume, boolean clearFailure) {
+  void removeVolume(StorageLocation storageLocation, boolean clearFailure) {
     for (FsVolumeImpl fsVolume : volumes) {
-      String basePath = new File(fsVolume.getBasePath()).getAbsolutePath();
-      String targetPath = volume.getAbsolutePath();
-      if (basePath.equals(targetPath)) {
+      StorageLocation baseLocation = fsVolume.getStorageLocation();
+      if (baseLocation.equals(storageLocation)) {
         removeVolume(fsVolume);
       }
     }
     if (clearFailure) {
-      removeVolumeFailureInfo(volume);
+      removeVolumeFailureInfo(storageLocation);
     }
   }
 
@@ -394,13 +394,13 @@ class FsVolumeList {
 
   private void addVolumeFailureInfo(FsVolumeImpl vol) {
     addVolumeFailureInfo(new VolumeFailureInfo(
-        new File(vol.getBasePath()).getAbsolutePath(),
+        vol.getStorageLocation(),
         Time.now(),
         vol.getCapacity()));
   }
 
-  private void removeVolumeFailureInfo(File vol) {
-    volumeFailureInfos.remove(vol.getAbsolutePath());
+  private void removeVolumeFailureInfo(StorageLocation location) {
+    volumeFailureInfos.remove(location);
   }
 
   void addBlockPool(final String bpid, final Configuration conf) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java
index 9e549f9..d6969c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java
@@ -58,8 +58,8 @@ class RamDiskAsyncLazyPersistService {
   private final Configuration conf;
 
   private final ThreadGroup threadGroup;
-  private Map<File, ThreadPoolExecutor> executors
-      = new HashMap<File, ThreadPoolExecutor>();
+  private Map<String, ThreadPoolExecutor> executors
+      = new HashMap<String, ThreadPoolExecutor>();
   private final static HdfsConfiguration EMPTY_HDFS_CONF = new HdfsConfiguration();
 
   /**
@@ -75,13 +75,14 @@ class RamDiskAsyncLazyPersistService {
     this.threadGroup = new ThreadGroup(getClass().getSimpleName());
   }
 
-  private void addExecutorForVolume(final File volume) {
+  private void addExecutorForVolume(final String storageId) {
     ThreadFactory threadFactory = new ThreadFactory() {
 
       @Override
       public Thread newThread(Runnable r) {
         Thread t = new Thread(threadGroup, r);
-        t.setName("Async RamDisk lazy persist worker for volume " + volume);
+        t.setName("Async RamDisk lazy persist worker " +
+            " for volume with id " + storageId);
         return t;
       }
     };
@@ -93,39 +94,41 @@ class RamDiskAsyncLazyPersistService {
 
     // This can reduce the number of running threads
     executor.allowCoreThreadTimeOut(true);
-    executors.put(volume, executor);
+    executors.put(storageId, executor);
   }
 
   /**
    * Starts AsyncLazyPersistService for a new volume
    * @param volume the root of the new data volume.
    */
-  synchronized void addVolume(File volume) {
+  synchronized void addVolume(FsVolumeImpl volume) {
+    String storageId = volume.getStorageID();
     if (executors == null) {
       throw new RuntimeException("AsyncLazyPersistService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    ThreadPoolExecutor executor = executors.get(storageId);
     if (executor != null) {
       throw new RuntimeException("Volume " + volume + " is already existed.");
     }
-    addExecutorForVolume(volume);
+    addExecutorForVolume(storageId);
   }
 
   /**
    * Stops AsyncLazyPersistService for a volume.
    * @param volume the root of the volume.
    */
-  synchronized void removeVolume(File volume) {
+  synchronized void removeVolume(FsVolumeImpl volume) {
+    String storageId = volume.getStorageID();
     if (executors == null) {
       throw new RuntimeException("AsyncDiskService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    ThreadPoolExecutor executor = executors.get(storageId);
     if (executor == null) {
-      throw new RuntimeException("Can not find volume " + volume
-        + " to remove.");
+      throw new RuntimeException("Can not find volume with storage id " +
+          storageId + " to remove.");
     } else {
       executor.shutdown();
-      executors.remove(volume);
+      executors.remove(storageId);
     }
   }
 
@@ -135,25 +138,28 @@ class RamDiskAsyncLazyPersistService {
    * @return true if there is one thread pool for the volume
    *         false otherwise
    */
-  synchronized boolean queryVolume(File volume) {
+  synchronized boolean queryVolume(FsVolumeImpl volume) {
+    String storageId = volume.getStorageID();
     if (executors == null) {
-      throw new RuntimeException("AsyncLazyPersistService is already shutdown");
+      throw new RuntimeException(
+          "AsyncLazyPersistService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    ThreadPoolExecutor executor = executors.get(storageId);
     return (executor != null);
   }
 
   /**
    * Execute the task sometime in the future, using ThreadPools.
    */
-  synchronized void execute(File root, Runnable task) {
+  synchronized void execute(String storageId, Runnable task) {
     if (executors == null) {
-      throw new RuntimeException("AsyncLazyPersistService is already shutdown");
+      throw new RuntimeException(
+          "AsyncLazyPersistService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(root);
+    ThreadPoolExecutor executor = executors.get(storageId);
     if (executor == null) {
-      throw new RuntimeException("Cannot find root " + root
-          + " for execution of task " + task);
+      throw new RuntimeException("Cannot find root storage volume with id " +
+          storageId + " for execution of task " + task);
     } else {
       executor.execute(task);
     }
@@ -169,7 +175,7 @@ class RamDiskAsyncLazyPersistService {
     } else {
       LOG.info("Shutting down all async lazy persist service threads");
 
-      for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
+      for (Map.Entry<String, ThreadPoolExecutor> e : executors.entrySet()) {
         e.getValue().shutdown();
       }
       // clear the executor map so that calling execute again will fail.
@@ -189,18 +195,11 @@ class RamDiskAsyncLazyPersistService {
           + bpId + " block id: " + blockId);
     }
 
-    FsVolumeImpl volume = (FsVolumeImpl)target.getVolume();
-    File lazyPersistDir  = volume.getLazyPersistDir(bpId);
-    if (!lazyPersistDir.exists() && !lazyPersistDir.mkdirs()) {
-      FsDatasetImpl.LOG.warn("LazyWriter failed to create " + lazyPersistDir);
-      throw new IOException("LazyWriter fail to find or create lazy persist dir: "
-          + lazyPersistDir.toString());
-    }
-
     ReplicaLazyPersistTask lazyPersistTask = new ReplicaLazyPersistTask(
-        bpId, blockId, genStamp, creationTime, replica,
-        target, lazyPersistDir);
-    execute(volume.getCurrentDir(), lazyPersistTask);
+        bpId, blockId, genStamp, creationTime, replica, target);
+
+    FsVolumeImpl volume = (FsVolumeImpl)target.getVolume();
+    execute(volume.getStorageID(), lazyPersistTask);
   }
 
   class ReplicaLazyPersistTask implements Runnable {
@@ -210,19 +209,17 @@ class RamDiskAsyncLazyPersistService {
     private final long creationTime;
     private final ReplicaInfo replicaInfo;
     private final FsVolumeReference targetVolume;
-    private final File lazyPersistDir;
 
     ReplicaLazyPersistTask(String bpId, long blockId,
         long genStamp, long creationTime,
         ReplicaInfo replicaInfo,
-        FsVolumeReference targetVolume, File lazyPersistDir) {
+        FsVolumeReference targetVolume) {
       this.bpId = bpId;
       this.blockId = blockId;
       this.genStamp = genStamp;
       this.creationTime = creationTime;
       this.replicaInfo = replicaInfo;
       this.targetVolume = targetVolume;
-      this.lazyPersistDir = lazyPersistDir;
     }
 
     @Override
@@ -241,14 +238,14 @@ class RamDiskAsyncLazyPersistService {
       final FsDatasetImpl dataset = (FsDatasetImpl)datanode.getFSDataset();
       try (FsVolumeReference ref = this.targetVolume) {
         int smallBufferSize = DFSUtilClient.getSmallBufferSize(EMPTY_HDFS_CONF);
-        // No FsDatasetImpl lock for the file copy
-        File targetFiles[] = FsDatasetImpl.copyBlockFiles(
-            blockId, genStamp, replicaInfo, lazyPersistDir, true,
-            smallBufferSize, conf);
+
+        FsVolumeImpl volume = (FsVolumeImpl)ref.getVolume();
+        File[] targetFiles = volume.copyBlockToLazyPersistLocation(bpId,
+            blockId, genStamp, replicaInfo, smallBufferSize, conf);
 
         // Lock FsDataSetImpl during onCompleteLazyPersist callback
         dataset.onCompleteLazyPersist(bpId, blockId,
-                creationTime, targetFiles, (FsVolumeImpl)ref.getVolume());
+                creationTime, targetFiles, volume);
         succeeded = true;
       } catch (Exception e){
         FsDatasetImpl.LOG.warn(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java
index c3ce2a4..a762785 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+
 /**
  * Tracks information about failure of a data volume.
  */
 final class VolumeFailureInfo {
-  private final String failedStorageLocation;
+  private final StorageLocation failedStorageLocation;
   private final long failureDate;
   private final long estimatedCapacityLost;
 
@@ -33,7 +35,8 @@ final class VolumeFailureInfo {
    * @param failedStorageLocation storage location that has failed
    * @param failureDate date/time of failure in milliseconds since epoch
    */
-  public VolumeFailureInfo(String failedStorageLocation, long failureDate) {
+  public VolumeFailureInfo(StorageLocation failedStorageLocation,
+      long failureDate) {
     this(failedStorageLocation, failureDate, 0);
   }
 
@@ -44,8 +47,8 @@ final class VolumeFailureInfo {
    * @param failureDate date/time of failure in milliseconds since epoch
    * @param estimatedCapacityLost estimate of capacity lost in bytes
    */
-  public VolumeFailureInfo(String failedStorageLocation, long failureDate,
-      long estimatedCapacityLost) {
+  public VolumeFailureInfo(StorageLocation failedStorageLocation,
+      long failureDate, long estimatedCapacityLost) {
     this.failedStorageLocation = failedStorageLocation;
     this.failureDate = failureDate;
     this.estimatedCapacityLost = estimatedCapacityLost;
@@ -56,7 +59,7 @@ final class VolumeFailureInfo {
    *
    * @return storage location that has failed
    */
-  public String getFailedStorageLocation() {
+  public StorageLocation getFailedStorageLocation() {
     return this.failedStorageLocation;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0f4f14c..2471dc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5413,7 +5413,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
       if (volumeFailureSummary != null) {
         innerinfo
-            .put("failedStorageLocations",
+            .put("failedStorageIDs",
                 volumeFailureSummary.getFailedStorageLocations())
             .put("lastVolumeFailureDate",
                 volumeFailureSummary.getLastVolumeFailureDate())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index b11b48a..6efc53a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -216,13 +217,13 @@ public class TestNameNodePrunesMissingStorages {
         datanodeToRemoveStorageFromIdx++;
       }
       // Find the volume within the datanode which holds that first storage.
-      String volumeDirectoryToRemove = null;
+      StorageLocation volumeLocationToRemove = null;
       try (FsVolumeReferences volumes =
           datanodeToRemoveStorageFrom.getFSDataset().getFsVolumeReferences()) {
         assertEquals(NUM_STORAGES_PER_DN, volumes.size());
         for (FsVolumeSpi volume : volumes) {
           if (volume.getStorageID().equals(storageIdToRemove)) {
-            volumeDirectoryToRemove = volume.getBasePath();
+            volumeLocationToRemove = volume.getStorageLocation();
           }
         }
       };
@@ -230,10 +231,11 @@ public class TestNameNodePrunesMissingStorages {
       // Replace the volume directory with a regular file, which will
       // cause a volume failure.  (If we merely removed the directory,
       // it would be re-initialized with a new storage ID.)
-      assertNotNull(volumeDirectoryToRemove);
+      assertNotNull(volumeLocationToRemove);
       datanodeToRemoveStorageFrom.shutdown();
-      FileUtil.fullyDelete(new File(volumeDirectoryToRemove));
-      FileOutputStream fos = new FileOutputStream(volumeDirectoryToRemove);
+      FileUtil.fullyDelete(volumeLocationToRemove.getFile());
+      FileOutputStream fos = new FileOutputStream(
+          volumeLocationToRemove.getFile().toString());
       try {
         fos.write(1);
       } finally {
@@ -326,7 +328,8 @@ public class TestNameNodePrunesMissingStorages {
           dn.getFSDataset().getFsVolumeReferences();
       final String newStorageId = DatanodeStorage.generateUuid();
       try {
-        File currentDir = new File(volumeRefs.get(0).getBasePath(), "current");
+        File currentDir = new File(
+            volumeRefs.get(0).getStorageLocation().getFile(), "current");
         File versionFile = new File(currentDir, "VERSION");
         rewriteVersionFile(versionFile, newStorageId);
       } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 6034d1e..6c59231 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -22,7 +22,9 @@ import java.io.FileDescriptor;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -38,6 +40,7 @@ import javax.management.StandardMBean;
 
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -46,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -495,21 +499,6 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     }
 
     @Override
-    public String getBasePath() {
-      return null;
-    }
-
-    @Override
-    public String getPath(String bpid) throws IOException {
-      return null;
-    }
-
-    @Override
-    public File getFinalizedDir(String bpid) throws IOException {
-      return null;
-    }
-
-    @Override
     public StorageType getStorageType() {
       return null;
     }
@@ -546,6 +535,28 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     public FsDatasetSpi getDataset() {
       throw new UnsupportedOperationException();
     }
+
+    @Override
+    public StorageLocation getStorageLocation() {
+      return null;
+    }
+
+    @Override
+    public URI getBaseURI() {
+      return null;
+    }
+
+    @Override
+    public DF getUsageStats(Configuration conf) {
+      return null;
+    }
+
+    @Override
+    public LinkedList<ScanInfo> compileReport(String bpid,
+        LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+        throws InterruptedException, IOException {
+      return null;
+    }
   }
 
   private final Map<String, Map<Block, BInfo>> blockMap
@@ -1030,7 +1041,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override
-  public Set<File> checkDataDir() {
+  public Set<StorageLocation> checkDataDir() {
     // nothing to check for simulated data set
     return null;
   }
@@ -1344,7 +1355,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override
-  public synchronized void removeVolumes(Set<File> volumes, boolean clearFailure) {
+  public synchronized void removeVolumes(Collection<StorageLocation> volumes,
+      boolean clearFailure) {
     throw new UnsupportedOperationException();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
index 021361b..c55a828 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
@@ -549,7 +549,8 @@ public class TestBlockScanner {
       info.shouldRun = false;
     }
     ctx.datanode.shutdown();
-    String vPath = ctx.volumes.get(0).getBasePath();
+    String vPath = ctx.volumes.get(0).getStorageLocation()
+        .getFile().getAbsolutePath();
     File cursorPath = new File(new File(new File(vPath, "current"),
           ctx.bpids[0]), "scanner.cursor");
     assertTrue("Failed to find cursor save file in " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 0dbb09c..06387c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -52,7 +52,6 @@ import org.junit.Test;
 
 import java.io.File;
 import java.io.IOException;
-import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -519,11 +518,8 @@ public class TestDataNodeHotSwapVolumes {
     ExtendedBlock block =
         DFSTestUtil.getAllBlocks(fs, testFile).get(1).getBlock();
     FsVolumeSpi volumeWithBlock = dn.getFSDataset().getVolume(block);
-    String basePath = volumeWithBlock.getBasePath();
-    File storageDir = new File(basePath);
-    URI fileUri = storageDir.toURI();
-    String dirWithBlock =
-        "[" + volumeWithBlock.getStorageType() + "]" + fileUri;
+    String dirWithBlock = "[" + volumeWithBlock.getStorageType() + "]" +
+        volumeWithBlock.getStorageLocation().getFile().toURI();
     String newDirs = dirWithBlock;
     for (String dir : oldDirs) {
       if (dirWithBlock.startsWith(dir)) {
@@ -581,8 +577,8 @@ public class TestDataNodeHotSwapVolumes {
     try (FsDatasetSpi.FsVolumeReferences volumes =
         dataset.getFsVolumeReferences()) {
       for (FsVolumeSpi volume : volumes) {
-        assertThat(volume.getBasePath(), is(not(anyOf(
-            is(newDirs.get(0)), is(newDirs.get(2))))));
+        assertThat(volume.getStorageLocation().getFile().toString(),
+            is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
       }
     }
     DataStorage storage = dn.getStorage();
@@ -765,7 +761,7 @@ public class TestDataNodeHotSwapVolumes {
     try (FsDatasetSpi.FsVolumeReferences volumes =
       dn.getFSDataset().getFsVolumeReferences()) {
       for (FsVolumeSpi vol : volumes) {
-        if (vol.getBasePath().equals(basePath.getPath())) {
+        if (vol.getBaseURI().equals(basePath.toURI())) {
           return (FsVolumeImpl) vol;
         }
       }
@@ -810,6 +806,7 @@ public class TestDataNodeHotSwapVolumes {
     assertEquals(used, failedVolume.getDfsUsed());
 
     DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
+    LOG.info("reconfiguring DN ");
     assertThat(
         "DN did not update its own config",
         dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 6792ba8..47f4823 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -21,7 +21,6 @@ import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
@@ -254,17 +253,18 @@ public class TestDataNodeVolumeFailure {
     FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
     try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) {
       for (FsVolumeSpi volume : vols) {
-        assertNotEquals(new File(volume.getBasePath()).getAbsoluteFile(),
-            dn0Vol1.getAbsoluteFile());
+        assertFalse(volume.getStorageLocation().getFile()
+            .getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
+        ));
       }
     }
 
     // 3. all blocks on dn0Vol1 have been removed.
     for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
       assertNotNull(replica.getVolume());
-      assertNotEquals(
-          new File(replica.getVolume().getBasePath()).getAbsoluteFile(),
-          dn0Vol1.getAbsoluteFile());
+      assertFalse(replica.getVolume().getStorageLocation().getFile()
+          .getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
+      ));
     }
 
     // 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index 8d021cd..4bb5e7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -539,6 +539,16 @@ public class TestDataNodeVolumeFailureReporting {
     assertCounter("VolumeFailures", expectedVolumeFailuresCounter,
         getMetrics(dn.getMetrics().name()));
     FsDatasetSpi<?> fsd = dn.getFSDataset();
+    StringBuilder strBuilder = new StringBuilder();
+    strBuilder.append("expectedFailedVolumes is ");
+    for (String expected: expectedFailedVolumes) {
+      strBuilder.append(expected + ",");
+    }
+    strBuilder.append(" fsd.getFailedStorageLocations() is ");
+    for (String expected: fsd.getFailedStorageLocations()) {
+      strBuilder.append(expected + ",");
+    }
+    LOG.info(strBuilder.toString());
     assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes());
     assertArrayEquals(expectedFailedVolumes, fsd.getFailedStorageLocations());
     if (expectedFailedVolumes.length > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 576aae0..08a5af9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -28,6 +28,7 @@ import static org.junit.Assert.assertTrue;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
 import java.nio.channels.FileChannel;
 import java.util.ArrayList;
@@ -44,6 +45,7 @@ import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
@@ -56,11 +58,13 @@ import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -185,18 +189,20 @@ public class TestDirectoryScanner {
           // Volume without a copy of the block. Make a copy now.
           File sourceBlock = new File(b.getBlockURI());
           File sourceMeta = new File(b.getMetadataURI());
-          String sourceRoot = b.getVolume().getBasePath();
-          String destRoot = v.getBasePath();
+          URI sourceRoot = b.getVolume().getStorageLocation().getFile().toURI();
+          URI destRoot = v.getStorageLocation().getFile().toURI();
 
           String relativeBlockPath =
-              new File(sourceRoot).toURI().relativize(sourceBlock.toURI())
+              sourceRoot.relativize(sourceBlock.toURI())
                   .getPath();
           String relativeMetaPath =
-              new File(sourceRoot).toURI().relativize(sourceMeta.toURI())
+              sourceRoot.relativize(sourceMeta.toURI())
                   .getPath();
 
-          File destBlock = new File(destRoot, relativeBlockPath);
-          File destMeta = new File(destRoot, relativeMetaPath);
+          File destBlock = new File(new File(destRoot).toString(),
+              relativeBlockPath);
+          File destMeta = new File(new File(destRoot).toString(),
+              relativeMetaPath);
 
           destBlock.getParentFile().mkdirs();
           FileUtils.copyFile(sourceBlock, destBlock);
@@ -238,7 +244,8 @@ public class TestDirectoryScanner {
     try (FsDatasetSpi.FsVolumeReferences volumes = fds.getFsVolumeReferences()) {
       int numVolumes = volumes.size();
       int index = rand.nextInt(numVolumes - 1);
-      File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
+      File finalizedDir = ((FsVolumeImpl) volumes.get(index))
+          .getFinalizedDir(bpid);
       File file = new File(finalizedDir, getBlockFile(id));
       if (file.createNewFile()) {
         LOG.info("Created block file " + file.getName());
@@ -253,8 +260,8 @@ public class TestDirectoryScanner {
     try (FsDatasetSpi.FsVolumeReferences refs = fds.getFsVolumeReferences()) {
       int numVolumes = refs.size();
       int index = rand.nextInt(numVolumes - 1);
-
-      File finalizedDir = refs.get(index).getFinalizedDir(bpid);
+      File finalizedDir = ((FsVolumeImpl) refs.get(index))
+          .getFinalizedDir(bpid);
       File file = new File(finalizedDir, getMetaFile(id));
       if (file.createNewFile()) {
         LOG.info("Created metafile " + file.getName());
@@ -271,7 +278,8 @@ public class TestDirectoryScanner {
       int numVolumes = refs.size();
       int index = rand.nextInt(numVolumes - 1);
 
-      File finalizedDir = refs.get(index).getFinalizedDir(bpid);
+      File finalizedDir =
+          ((FsVolumeImpl) refs.get(index)).getFinalizedDir(bpid);
       File file = new File(finalizedDir, getBlockFile(id));
       if (file.createNewFile()) {
         LOG.info("Created block file " + file.getName());
@@ -311,7 +319,7 @@ public class TestDirectoryScanner {
     scanner.reconcile();
     
     assertTrue(scanner.diffs.containsKey(bpid));
-    LinkedList<DirectoryScanner.ScanInfo> diff = scanner.diffs.get(bpid);
+    LinkedList<FsVolumeSpi.ScanInfo> diff = scanner.diffs.get(bpid);
     assertTrue(scanner.stats.containsKey(bpid));
     DirectoryScanner.Stats stats = scanner.stats.get(bpid);
     
@@ -820,17 +828,6 @@ public class TestDirectoryScanner {
       return 0;
     }
     
-    @Override
-    public String getBasePath() {
-      return (new File("/base")).getAbsolutePath();
-    }
-    
-    @Override
-    public String getPath(String bpid) throws IOException {
-      return (new File("/base/current/" + bpid)).getAbsolutePath();
-    }
-
-    @Override
     public File getFinalizedDir(String bpid) throws IOException {
       return new File("/base/current/" + bpid + "/finalized");
     }
@@ -877,6 +874,29 @@ public class TestDirectoryScanner {
     public FsDatasetSpi getDataset() {
       throw new UnsupportedOperationException();
     }
+
+    @Override
+    public StorageLocation getStorageLocation() {
+      return null;
+    }
+
+    @Override
+    public URI getBaseURI() {
+      return (new File("/base")).toURI();
+    }
+
+    @Override
+    public DF getUsageStats(Configuration conf) {
+      return null;
+    }
+
+    @Override
+    public LinkedList<ScanInfo> compileReport(String bpid,
+        LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+        throws InterruptedException, IOException {
+      return null;
+    }
+
   }
 
   private final static TestFsVolumeSpi TEST_VOLUME = new TestFsVolumeSpi();
@@ -887,8 +907,8 @@ public class TestDirectoryScanner {
       
   void testScanInfoObject(long blockId, File blockFile, File metaFile)
       throws Exception {
-    DirectoryScanner.ScanInfo scanInfo =
-        new DirectoryScanner.ScanInfo(blockId, blockFile, metaFile, TEST_VOLUME);
+    FsVolumeSpi.ScanInfo scanInfo =
+        new FsVolumeSpi.ScanInfo(blockId, blockFile, metaFile, TEST_VOLUME);
     assertEquals(blockId, scanInfo.getBlockId());
     if (blockFile != null) {
       assertEquals(blockFile.getAbsolutePath(),
@@ -906,8 +926,8 @@ public class TestDirectoryScanner {
   }
   
   void testScanInfoObject(long blockId) throws Exception {
-    DirectoryScanner.ScanInfo scanInfo =
-        new DirectoryScanner.ScanInfo(blockId, null, null, null);
+    FsVolumeSpi.ScanInfo scanInfo =
+        new FsVolumeSpi.ScanInfo(blockId, null, null, null);
     assertEquals(blockId, scanInfo.getBlockId());
     assertNull(scanInfo.getBlockFile());
     assertNull(scanInfo.getMetaFile());
@@ -963,8 +983,8 @@ public class TestDirectoryScanner {
       List<FsVolumeSpi> volumes = new ArrayList<>();
       Iterator<FsVolumeSpi> iterator = fds.getFsVolumeReferences().iterator();
       while (iterator.hasNext()) {
-        FsVolumeSpi volume = iterator.next();
-        FsVolumeSpi spy = Mockito.spy(volume);
+        FsVolumeImpl volume = (FsVolumeImpl) iterator.next();
+        FsVolumeImpl spy = Mockito.spy(volume);
         Mockito.doThrow(new IOException("Error while getFinalizedDir"))
             .when(spy).getFinalizedDir(volume.getBlockPoolList()[0]);
         volumes.add(spy);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
index 86d2ff4..2103392 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
@@ -199,7 +199,7 @@ public class TestDiskError {
       try (FsDatasetSpi.FsVolumeReferences volumes =
           dn.getFSDataset().getFsVolumeReferences()) {
         for (FsVolumeSpi vol : volumes) {
-          String dir = vol.getBasePath();
+          String dir = vol.getStorageLocation().getFile().getAbsolutePath();
           Path dataDir = new Path(dir);
           FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
           assertEquals("Permission for dir: " + dataDir + ", is " + actual +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 1268108..7b7f04f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -56,12 +56,14 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   }
 
   @Override
-  public void addVolume(StorageLocation location, List<NamespaceInfo> nsInfos) throws IOException {
-
+  public void addVolume(StorageLocation location, List<NamespaceInfo> nsInfos)
+      throws IOException {
   }
 
   @Override
-  public void removeVolumes(Set<File> volumes, boolean clearFailure) {
+  public void removeVolumes(Collection<StorageLocation> volumes,
+      boolean clearFailure) {
+    throw new UnsupportedOperationException();
   }
 
   @Override
@@ -242,7 +244,7 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   }
 
   @Override
-  public Set<File> checkDataDir() {
+  public Set<StorageLocation> checkDataDir() {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
index 985a259..83d6c4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
@@ -18,11 +18,16 @@
 
 package org.apache.hadoop.hdfs.server.datanode.extdataset;
 
-import java.io.File;
 import java.io.IOException;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
+import java.util.LinkedList;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -44,21 +49,6 @@ public class ExternalVolumeImpl implements FsVolumeSpi {
   }
 
   @Override
-  public String getBasePath() {
-    return null;
-  }
-
-  @Override
-  public String getPath(String bpid) throws IOException {
-    return null;
-  }
-
-  @Override
-  public File getFinalizedDir(String bpid) throws IOException {
-    return null;
-  }
-
-  @Override
   public String getStorageID() {
     return null;
   }
@@ -100,4 +90,26 @@ public class ExternalVolumeImpl implements FsVolumeSpi {
   public FsDatasetSpi getDataset() {
     return null;
   }
+
+  @Override
+  public StorageLocation getStorageLocation() {
+    return null;
+  }
+
+  @Override
+  public URI getBaseURI() {
+    return null;
+  }
+
+  @Override
+  public DF getUsageStats(Configuration conf) {
+    return null;
+  }
+
+  @Override
+  public LinkedList<ScanInfo> compileReport(String bpid,
+      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+      throws InterruptedException, IOException {
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
index a465c05..07ddb59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
@@ -374,9 +374,12 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
   public long getRawCapacity() throws IOException {
     try (FsVolumeReferences volRefs = dataset.getFsVolumeReferences()) {
       Preconditions.checkState(volRefs.size() != 0);
-      DF df = new DF(new File(volRefs.get(0).getBasePath()),
-          dataset.datanode.getConf());
-      return df.getCapacity();
+      DF df = volRefs.get(0).getUsageStats(dataset.datanode.getConf());
+      if (df != null) {
+        return df.getCapacity();
+      } else {
+        return -1;
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 179b617..e48aae0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
 import org.apache.hadoop.hdfs.server.datanode.DNConf;
@@ -50,7 +51,9 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.MultipleIOException;
@@ -122,8 +125,10 @@ public class TestFsDatasetImpl {
   
   private final static String BLOCKPOOL = "BP-TEST";
 
-  private static Storage.StorageDirectory createStorageDirectory(File root) {
-    Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
+  private static Storage.StorageDirectory createStorageDirectory(File root)
+      throws SecurityException, IOException {
+    Storage.StorageDirectory sd = new Storage.StorageDirectory(
+        StorageLocation.parse(root.toURI().toString()));
     DataStorage.createStorageID(sd, false);
     return sd;
   }
@@ -196,16 +201,18 @@ public class TestFsDatasetImpl {
     for (int i = 0; i < numNewVolumes; i++) {
       String path = BASE_DIR + "/newData" + i;
       String pathUri = new Path(path).toUri().toString();
-      expectedVolumes.add(new File(pathUri).toString());
+      expectedVolumes.add(new File(pathUri).getAbsolutePath());
       StorageLocation loc = StorageLocation.parse(pathUri);
       Storage.StorageDirectory sd = createStorageDirectory(new File(path));
       DataStorage.VolumeBuilder builder =
           new DataStorage.VolumeBuilder(storage, sd);
-      when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
+      when(storage.prepareVolume(eq(datanode), eq(loc),
           anyListOf(NamespaceInfo.class)))
           .thenReturn(builder);
 
       dataset.addVolume(loc, nsInfos);
+      LOG.info("expectedVolumes " + i + " is " +
+          new File(pathUri).getAbsolutePath());
     }
 
     assertEquals(totalVolumes, getNumVolumes());
@@ -215,7 +222,9 @@ public class TestFsDatasetImpl {
     try (FsDatasetSpi.FsVolumeReferences volumes =
         dataset.getFsVolumeReferences()) {
       for (int i = 0; i < numNewVolumes; i++) {
-        actualVolumes.add(volumes.get(numExistingVolumes + i).getBasePath());
+        String volumeName = volumes.get(numExistingVolumes + i).toString();
+        actualVolumes.add(volumeName);
+        LOG.info("actualVolume " + i + " is " + volumeName);
       }
     }
     assertEquals(actualVolumes.size(), expectedVolumes.size());
@@ -262,9 +271,18 @@ public class TestFsDatasetImpl {
     final String[] dataDirs =
         conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
     final String volumePathToRemove = dataDirs[0];
-    Set<File> volumesToRemove = new HashSet<>();
-    volumesToRemove.add(StorageLocation.parse(volumePathToRemove).getFile());
-
+    Set<StorageLocation> volumesToRemove = new HashSet<>();
+    volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
+
+    FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
+    FsVolumeImpl volumeToRemove = null;
+    for (FsVolumeSpi vol: volReferences) {
+      if (vol.getStorageLocation().equals(volumesToRemove.iterator().next())) {
+        volumeToRemove = (FsVolumeImpl) vol;
+      }
+    }
+    assertTrue(volumeToRemove != null);
+    volReferences.close();
     dataset.removeVolumes(volumesToRemove, true);
     int expectedNumVolumes = dataDirs.length - 1;
     assertEquals("The volume has been removed from the volumeList.",
@@ -273,7 +291,7 @@ public class TestFsDatasetImpl {
         expectedNumVolumes, dataset.storageMap.size());
 
     try {
-      dataset.asyncDiskService.execute(volumesToRemove.iterator().next(),
+      dataset.asyncDiskService.execute(volumeToRemove,
           new Runnable() {
             @Override
             public void run() {}
@@ -281,7 +299,7 @@ public class TestFsDatasetImpl {
       fail("Expect RuntimeException: the volume has been removed from the "
            + "AsyncDiskService.");
     } catch (RuntimeException e) {
-      GenericTestUtils.assertExceptionContains("Cannot find root", e);
+      GenericTestUtils.assertExceptionContains("Cannot find volume", e);
     }
 
     int totalNumReplicas = 0;
@@ -306,7 +324,7 @@ public class TestFsDatasetImpl {
     Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
     DataStorage.VolumeBuilder builder =
         new DataStorage.VolumeBuilder(storage, sd);
-    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
+    when(storage.prepareVolume(eq(datanode), eq(loc),
         anyListOf(NamespaceInfo.class)))
         .thenReturn(builder);
 
@@ -315,8 +333,8 @@ public class TestFsDatasetImpl {
 
     when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
     when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
-    Set<File> volumesToRemove = new HashSet<>();
-    volumesToRemove.add(loc.getFile());
+    Set<StorageLocation> volumesToRemove = new HashSet<>();
+    volumesToRemove.add(loc);
     dataset.removeVolumes(volumesToRemove, true);
     assertEquals(numExistingVolumes, getNumVolumes());
   }
@@ -336,7 +354,8 @@ public class TestFsDatasetImpl {
     for (int i = 0; i < NUM_VOLUMES; i++) {
       FsVolumeImpl volume = mock(FsVolumeImpl.class);
       oldVolumes.add(volume);
-      when(volume.getBasePath()).thenReturn("data" + i);
+      when(volume.getStorageLocation()).thenReturn(
+          StorageLocation.parse(new File("data" + i).toURI().toString()));
       when(volume.checkClosed()).thenReturn(true);
       FsVolumeReference ref = mock(FsVolumeReference.class);
       when(ref.getVolume()).thenReturn(volume);
@@ -348,13 +367,16 @@ public class TestFsDatasetImpl {
     final FsVolumeImpl newVolume = mock(FsVolumeImpl.class);
     final FsVolumeReference newRef = mock(FsVolumeReference.class);
     when(newRef.getVolume()).thenReturn(newVolume);
-    when(newVolume.getBasePath()).thenReturn("data4");
+    when(newVolume.getStorageLocation()).thenReturn(
+        StorageLocation.parse(new File("data4").toURI().toString()));
     FsVolumeImpl blockedVolume = volumeList.getVolumes().get(1);
     doAnswer(new Answer() {
       @Override
       public Object answer(InvocationOnMock invocationOnMock)
           throws Throwable {
-        volumeList.removeVolume(new File("data4"), false);
+        volumeList.removeVolume(
+            StorageLocation.parse((new File("data4")).toURI().toString()),
+            false);
         volumeList.addVolume(newRef);
         return null;
       }
@@ -386,7 +408,8 @@ public class TestFsDatasetImpl {
     File badDir = new File(BASE_DIR, "bad");
     badDir.mkdirs();
     doReturn(mockVolume).when(spyDataset)
-        .createFsVolume(anyString(), any(File.class), any(StorageType.class));
+        .createFsVolume(anyString(), any(StorageDirectory.class),
+            any(StorageLocation.class));
     doThrow(new IOException("Failed to getVolumeMap()"))
       .when(mockVolume).getVolumeMap(
         anyString(),
@@ -396,7 +419,8 @@ public class TestFsDatasetImpl {
     Storage.StorageDirectory sd = createStorageDirectory(badDir);
     sd.lock();
     DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
-    when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()),
+    when(storage.prepareVolume(eq(datanode),
+        eq(StorageLocation.parse(badDir.toURI().toString())),
         Matchers.<List<NamespaceInfo>>any()))
         .thenReturn(builder);
 
@@ -540,7 +564,7 @@ public class TestFsDatasetImpl {
     DataStorage.VolumeBuilder builder =
         new DataStorage.VolumeBuilder(storage, sd);
     when(
-        storage.prepareVolume(eq(datanode), eq(loc.getFile()),
+        storage.prepareVolume(eq(datanode), eq(loc),
             anyListOf(NamespaceInfo.class))).thenReturn(builder);
 
     String cacheFilePath =
@@ -584,7 +608,7 @@ public class TestFsDatasetImpl {
     return dfsUsed;
   }
 
-  @Test(timeout = 30000)
+  @Test(timeout = 60000)
   public void testRemoveVolumeBeingWritten() throws Exception {
     // Will write and remove on dn0.
     final ExtendedBlock eb = new ExtendedBlock(BLOCK_POOL_IDS[0], 0);
@@ -636,10 +660,9 @@ public class TestFsDatasetImpl {
 
     class VolRemoveThread extends Thread {
       public void run() {
-        Set<File> volumesToRemove = new HashSet<>();
+        Set<StorageLocation> volumesToRemove = new HashSet<>();
         try {
-          volumesToRemove.add(StorageLocation.parse(
-              dataset.getVolume(eb).getBasePath()).getFile());
+          volumesToRemove.add(dataset.getVolume(eb).getStorageLocation());
         } catch (Exception e) {
           LOG.info("Problem preparing volumes to remove: ", e);
           Assert.fail("Exception in remove volume thread, check log for " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index 3d4c38c..6eff300 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -22,7 +22,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
@@ -71,8 +73,13 @@ public class TestFsVolumeList {
     for (int i = 0; i < 3; i++) {
       File curDir = new File(baseDir, "nextvolume-" + i);
       curDir.mkdirs();
-      FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
-          conf, StorageType.DEFAULT);
+      FsVolumeImpl volume = new FsVolumeImplBuilder()
+          .setConf(conf)
+          .setDataset(dataset)
+          .setStorageID("storage-id")
+          .setStorageDirectory(
+              new StorageDirectory(StorageLocation.parse(curDir.getPath())))
+          .build();
       volume.setCapacityForTesting(1024 * 1024 * 1024);
       volumes.add(volume);
       volumeList.addVolume(volume.obtainReference());
@@ -109,8 +116,13 @@ public class TestFsVolumeList {
     for (int i = 0; i < 3; i++) {
       File curDir = new File(baseDir, "volume-" + i);
       curDir.mkdirs();
-      FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
-          conf, StorageType.DEFAULT);
+      FsVolumeImpl volume = new FsVolumeImplBuilder()
+          .setConf(conf)
+          .setDataset(dataset)
+          .setStorageID("storage-id")
+          .setStorageDirectory(
+              new StorageDirectory(StorageLocation.parse(curDir.getPath())))
+          .build();
       volumes.add(volume);
       volumeList.addVolume(volume.obtainReference());
     }
@@ -139,8 +151,13 @@ public class TestFsVolumeList {
         Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
     File volDir = new File(baseDir, "volume-0");
     volDir.mkdirs();
-    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.DEFAULT);
+    FsVolumeImpl volume = new FsVolumeImplBuilder()
+        .setConf(conf)
+        .setDataset(dataset)
+        .setStorageID("storage-id")
+        .setStorageDirectory(
+            new StorageDirectory(StorageLocation.parse(volDir.getPath())))
+        .build();
     FsVolumeReference ref = volume.obtainReference();
     volumeList.addVolume(ref);
     assertNull(ref.getVolume());
@@ -155,8 +172,13 @@ public class TestFsVolumeList {
     volDir.mkdirs();
     // when storage type reserved is not configured,should consider
     // dfs.datanode.du.reserved.
-    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf,
-        StorageType.RAM_DISK);
+    FsVolumeImpl volume = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse("[RAM_DISK]"+volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 100L, volume.getReserved());
     // when storage type reserved is configured.
     conf.setLong(
@@ -165,17 +187,37 @@ public class TestFsVolumeList {
     conf.setLong(
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
             + StringUtils.toLowerCase(StorageType.SSD.toString()), 2L);
-    FsVolumeImpl volume1 = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.RAM_DISK);
+    FsVolumeImpl volume1 = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse("[RAM_DISK]"+volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 1L, volume1.getReserved());
-    FsVolumeImpl volume2 = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.SSD);
+    FsVolumeImpl volume2 = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse("[SSD]"+volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 2L, volume2.getReserved());
-    FsVolumeImpl volume3 = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.DISK);
+    FsVolumeImpl volume3 = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse("[DISK]"+volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 100L, volume3.getReserved());
-    FsVolumeImpl volume4 = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.DEFAULT);
+    FsVolumeImpl volume4 = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse(volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 100L, volume4.getReserved());
   }
 
@@ -197,8 +239,13 @@ public class TestFsVolumeList {
     long actualNonDfsUsage = 300L;
     long reservedForReplicas = 50L;
     conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, duReserved);
-    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf,
-        StorageType.DEFAULT);
+    FsVolumeImpl volume = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse(volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     FsVolumeImpl spyVolume = Mockito.spy(volume);
     // Set Capacity for testing
     long testCapacity = diskCapacity - duReserved;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
index 794a887..7df0333 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
@@ -331,8 +331,8 @@ public class TestDiskBalancerWithMockMover {
         .getFsVolumeReferences();
 
     nodeID = dataNode.getDatanodeUuid();
-    sourceName = references.get(0).getBasePath();
-    destName = references.get(1).getBasePath();
+    sourceName = references.get(0).getBaseURI().getPath();
+    destName = references.get(1).getBaseURI().getPath();
     sourceUUID = references.get(0).getStorageID();
     destUUID = references.get(1).getStorageID();
     references.close();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/52] [abbrv] hadoop git commit: HDFS-10957. Retire BKJM from trunk (Vinayakumar B)

Posted by cn...@apache.org.
HDFS-10957. Retire BKJM from trunk (Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31195488
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31195488
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31195488

Branch: refs/heads/HADOOP-13037
Commit: 311954883f714973784432589896553eb320b597
Parents: 35b9d7d
Author: Vinayakumar B <vi...@apache.org>
Authored: Thu Oct 6 19:28:25 2016 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Thu Oct 6 19:28:25 2016 +0530

----------------------------------------------------------------------
 .../src/contrib/bkjournal/README.txt            |  66 --
 .../dev-support/findbugsExcludeFile.xml         |   5 -
 .../hadoop-hdfs/src/contrib/bkjournal/pom.xml   | 175 ----
 .../bkjournal/BookKeeperEditLogInputStream.java | 264 -----
 .../BookKeeperEditLogOutputStream.java          | 188 ----
 .../bkjournal/BookKeeperJournalManager.java     | 893 -----------------
 .../contrib/bkjournal/CurrentInprogress.java    | 160 ---
 .../bkjournal/EditLogLedgerMetadata.java        | 217 ----
 .../hadoop/contrib/bkjournal/MaxTxId.java       | 103 --
 .../bkjournal/src/main/proto/bkjournal.proto    |  49 -
 .../hadoop/contrib/bkjournal/BKJMUtil.java      | 184 ----
 .../bkjournal/TestBookKeeperAsHASharedDir.java  | 414 --------
 .../bkjournal/TestBookKeeperConfiguration.java  | 174 ----
 .../bkjournal/TestBookKeeperEditLogStreams.java |  92 --
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 109 --
 .../bkjournal/TestBookKeeperJournalManager.java | 984 -------------------
 .../TestBookKeeperSpeculativeRead.java          | 167 ----
 .../bkjournal/TestBootstrapStandbyWithBKJM.java | 170 ----
 .../bkjournal/TestCurrentInprogress.java        | 160 ---
 .../hdfs/server/namenode/FSEditLogTestUtil.java |  40 -
 .../src/test/resources/log4j.properties         |  55 --
 .../markdown/HDFSHighAvailabilityWithNFS.md     | 114 ---
 hadoop-hdfs-project/pom.xml                     |   1 -
 hadoop-project/pom.xml                          |   6 -
 24 files changed, 4790 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt
deleted file mode 100644
index 7f67226..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-This module provides a BookKeeper backend for HFDS Namenode write
-ahead logging.  
-
-BookKeeper is a highly available distributed write ahead logging
-system. For more details, see
-   
-    http://zookeeper.apache.org/bookkeeper
-
--------------------------------------------------------------------------------
-How do I build?
-
- To generate the distribution packages for BK journal, do the
- following.
-
-   $ mvn clean package -Pdist
-
- This will generate a jar with all the dependencies needed by the journal
- manager, 
-
- target/hadoop-hdfs-bkjournal-<VERSION>.jar
-
- Note that the -Pdist part of the build command is important, as otherwise
- the dependencies would not be packaged in the jar. 
-
--------------------------------------------------------------------------------
-How do I use the BookKeeper Journal?
-
- To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal
- jar, generated above, into the lib directory of hdfs. In the standard 
- distribution of HDFS, this is at $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
-
-  cp target/hadoop-hdfs-bkjournal-<VERSION>.jar \
-    $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
-
- Then, in hdfs-site.xml, set the following properties.
-
-   <property>
-     <name>dfs.namenode.edits.dir</name>
-     <value>bookkeeper://localhost:2181/bkjournal,file:///path/for/edits</value>
-   </property>
-
-   <property>
-     <name>dfs.namenode.edits.journal-plugin.bookkeeper</name>
-     <value>org.apache.hadoop.contrib.bkjournal.BookKeeperJournalManager</value>
-   </property>
-
- In this example, the namenode is configured to use 2 write ahead
- logging devices. One writes to BookKeeper and the other to a local
- file system. At the moment is is not possible to only write to 
- BookKeeper, as the resource checker explicitly checked for local
- disks currently.
-
- The given example, configures the namenode to look for the journal
- metadata at the path /bkjournal on the a standalone zookeeper ensemble
- at localhost:2181. To configure a multiple host zookeeper ensemble,
- separate the hosts with semicolons. For example, if you have 3
- zookeeper servers, zk1, zk2 & zk3, each listening on port 2181, you
- would specify this with 
-  
-   bookkeeper://zk1:2181;zk2:2181;zk3:2181/bkjournal
-
- The final part /bkjournal specifies the znode in zookeeper where
- ledger metadata will be store. Administrators can set this to anything
- they wish.
-
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 45c3a75..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-<FindBugsFilter>
-     <Match>
-       <Class name="~org.apache.hadoop.contrib.bkjournal.BKJournalProtos.*" />
-     </Match>
-</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
deleted file mode 100644
index 7fb6c24..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
+++ /dev/null
@@ -1,175 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
-    <relativePath>../../../../../hadoop-project</relativePath>
-  </parent>
-
-  <groupId>org.apache.hadoop.contrib</groupId>
-  <artifactId>hadoop-hdfs-bkjournal</artifactId>
-  <version>3.0.0-alpha2-SNAPSHOT</version>
-  <description>Apache Hadoop HDFS BookKeeper Journal</description>
-  <name>Apache Hadoop HDFS BookKeeper Journal</name>
-  <packaging>jar</packaging>
-
-  <properties>
-    <hadoop.component>hdfs</hadoop.component>
-    <hadoop.common.build.dir>${basedir}/../../../../../hadoop-common-project/hadoop-common/target</hadoop.common.build.dir>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency> 
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency> 
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency> 
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.bookkeeper</groupId>
-      <artifactId>bookkeeper-server</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.zookeeper</groupId>
-      <artifactId>zookeeper</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
-        <executions>
-          <execution>
-            <id>compile-protoc</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto</param>
-                <param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>bkjournal.proto</include>
-                </includes>
-              </source>
-              <output>${project.build.directory}/generated-sources/java</output>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>dev-support/findbugsExcludeFile.xml</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-  <profiles>
-    <profile>
-      <id>dist</id>
-      <build>
-        <plugins>
-          <plugin>
-            <artifactId>maven-dependency-plugin</artifactId>
-            <version>2.8</version>
-            <executions>
-              <execution>
-                <id>dist</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>copy</goal>
-                </goals>
-                <configuration>
-                  <artifactItems>
-                    <artifactItem>
-                      <groupId>org.apache.bookkeeper</groupId>
-                      <artifactId>bookkeeper-server</artifactId>
-                      <type>jar</type>
-                    </artifactItem>
-                  </artifactItems>
-                  <outputDirectory>${project.build.directory}/lib</outputDirectory>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
deleted file mode 100644
index 86da807..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogInputStream.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import java.io.BufferedInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Enumeration;
-
-import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.LedgerEntry;
-import org.apache.bookkeeper.client.BKException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * Input stream which reads from a BookKeeper ledger.
- */
-class BookKeeperEditLogInputStream extends EditLogInputStream {
-  static final Log LOG = LogFactory.getLog(BookKeeperEditLogInputStream.class);
-
-  private final long firstTxId;
-  private final long lastTxId;
-  private final int logVersion;
-  private final boolean inProgress;
-  private final LedgerHandle lh;
-
-  private final FSEditLogOp.Reader reader;
-  private final FSEditLogLoader.PositionTrackingInputStream tracker;
-
-  /**
-   * Construct BookKeeper edit log input stream.
-   * Starts reading from the first entry of the ledger.
-   */
-  BookKeeperEditLogInputStream(final LedgerHandle lh, 
-                               final EditLogLedgerMetadata metadata)
-      throws IOException {
-    this(lh, metadata, 0);
-  }
-
-  /**
-   * Construct BookKeeper edit log input stream. 
-   * Starts reading from firstBookKeeperEntry. This allows the stream
-   * to take a shortcut during recovery, as it doesn't have to read
-   * every edit log transaction to find out what the last one is.
-   */
-  BookKeeperEditLogInputStream(LedgerHandle lh, EditLogLedgerMetadata metadata,
-                               long firstBookKeeperEntry) 
-      throws IOException {
-    this.lh = lh;
-    this.firstTxId = metadata.getFirstTxId();
-    this.lastTxId = metadata.getLastTxId();
-    this.logVersion = metadata.getDataLayoutVersion();
-    this.inProgress = metadata.isInProgress();
-
-    if (firstBookKeeperEntry < 0
-        || firstBookKeeperEntry > lh.getLastAddConfirmed()) {
-      throw new IOException("Invalid first bk entry to read: "
-          + firstBookKeeperEntry + ", LAC: " + lh.getLastAddConfirmed());
-    }
-    BufferedInputStream bin = new BufferedInputStream(
-        new LedgerInputStream(lh, firstBookKeeperEntry));
-    tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
-    DataInputStream in = new DataInputStream(tracker);
-
-    reader = FSEditLogOp.Reader.create(in, tracker, logVersion);
-  }
-
-  @Override
-  public long getFirstTxId() {
-    return firstTxId;
-  }
-
-  @Override
-  public long getLastTxId() {
-    return lastTxId;
-  }
-  
-  @Override
-  public int getVersion(boolean verifyVersion) throws IOException {
-    return logVersion;
-  }
-
-  @Override
-  protected FSEditLogOp nextOp() throws IOException {
-    return reader.readOp(false);
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      lh.close();
-    } catch (BKException e) {
-      throw new IOException("Exception closing ledger", e);
-    } catch (InterruptedException e) {
-      throw new IOException("Interrupted closing ledger", e);
-    }
-  }
-
-  @Override
-  public long getPosition() {
-    return tracker.getPos();
-  }
-
-  @Override
-  public long length() throws IOException {
-    return lh.getLength();
-  }
-  
-  @Override
-  public String getName() {
-    return String.format(
-        "BookKeeperLedger[ledgerId=%d,firstTxId=%d,lastTxId=%d]", lh.getId(),
-        firstTxId, lastTxId);
-  }
-
-  @Override
-  public boolean isInProgress() {
-    return inProgress;
-  }
-
-  /**
-   * Skip forward to specified transaction id.
-   * Currently we do this by just iterating forward.
-   * If this proves to be too expensive, this can be reimplemented
-   * with a binary search over bk entries
-   */
-  public void skipTo(long txId) throws IOException {
-    long numToSkip = getFirstTxId() - txId;
-
-    FSEditLogOp op = null;
-    for (long i = 0; i < numToSkip; i++) {
-      op = readOp();
-    }
-    if (op != null && op.getTransactionId() != txId-1) {
-      throw new IOException("Corrupt stream, expected txid "
-          + (txId-1) + ", got " + op.getTransactionId());
-    }
-  }
-
-  @Override
-  public String toString() {
-    return ("BookKeeperEditLogInputStream {" + this.getName() + "}");
-  }
-
-  @Override
-  public void setMaxOpSize(int maxOpSize) {
-    reader.setMaxOpSize(maxOpSize);
-  }
-
-  @Override
-  public boolean isLocalLog() {
-    return false;
-  }
-
-  /**
-   * Input stream implementation which can be used by 
-   * FSEditLogOp.Reader
-   */
-  private static class LedgerInputStream extends InputStream {
-    private long readEntries;
-    private InputStream entryStream = null;
-    private final LedgerHandle lh;
-    private final long maxEntry;
-
-    /**
-     * Construct ledger input stream
-     * @param lh the ledger handle to read from
-     * @param firstBookKeeperEntry ledger entry to start reading from
-     */
-    LedgerInputStream(LedgerHandle lh, long firstBookKeeperEntry) 
-        throws IOException {
-      this.lh = lh;
-      readEntries = firstBookKeeperEntry;
-
-      maxEntry = lh.getLastAddConfirmed();
-    }
-
-    /**
-     * Get input stream representing next entry in the
-     * ledger.
-     * @return input stream, or null if no more entries
-     */
-    private InputStream nextStream() throws IOException {
-      try {        
-        if (readEntries > maxEntry) {
-          return null;
-        }
-        Enumeration<LedgerEntry> entries 
-          = lh.readEntries(readEntries, readEntries);
-        readEntries++;
-        if (entries.hasMoreElements()) {
-            LedgerEntry e = entries.nextElement();
-            assert !entries.hasMoreElements();
-            return e.getEntryInputStream();
-        }
-      } catch (BKException e) {
-        throw new IOException("Error reading entries from bookkeeper", e);
-      } catch (InterruptedException e) {
-        throw new IOException("Interrupted reading entries from bookkeeper", e);
-      }
-      return null;
-    }
-
-    @Override
-    public int read() throws IOException {
-      byte[] b = new byte[1];
-      if (read(b, 0, 1) != 1) {
-        return -1;
-      } else {
-        return b[0];
-      }
-    }
-    
-    @Override
-    public int read(byte[] b, int off, int len) throws IOException {
-      try {
-        int read = 0;
-        if (entryStream == null) {
-          entryStream = nextStream();
-          if (entryStream == null) {
-            return read;
-          }
-        }
-
-        while (read < len) {
-          int thisread = entryStream.read(b, off+read, (len-read));
-          if (thisread == -1) {
-            entryStream = nextStream();
-            if (entryStream == null) {
-              return read;
-            }
-          } else {
-            read += thisread;
-          }
-        }
-        return read;
-      } catch (IOException e) {
-        throw e;
-      }
-
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java
deleted file mode 100644
index 865806b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperEditLogOutputStream.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.CountDownLatch;
-
-import java.util.Arrays;
-
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer;
-
-import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
-import org.apache.hadoop.io.DataOutputBuffer;
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * Output stream for BookKeeper Journal.
- * Multiple complete edit log entries are packed into a single bookkeeper
- * entry before sending it over the network. The fact that the edit log entries
- * are complete in the bookkeeper entries means that each bookkeeper log entry
- *can be read as a complete edit log. This is useful for recover, as we don't
- * need to read through the entire edit log segment to get the last written
- * entry.
- */
-class BookKeeperEditLogOutputStream
-  extends EditLogOutputStream implements AddCallback {
-  static final Log LOG = LogFactory.getLog(BookKeeperEditLogOutputStream.class);
-
-  private final DataOutputBuffer bufCurrent;
-  private final AtomicInteger outstandingRequests;
-  private final int transmissionThreshold;
-  private final LedgerHandle lh;
-  private CountDownLatch syncLatch;
-  private final AtomicInteger transmitResult
-    = new AtomicInteger(BKException.Code.OK);
-  private final Writer writer;
-
-  /**
-   * Construct an edit log output stream which writes to a ledger.
-
-   */
-  protected BookKeeperEditLogOutputStream(Configuration conf, LedgerHandle lh)
-      throws IOException {
-    super();
-
-    bufCurrent = new DataOutputBuffer();
-    outstandingRequests = new AtomicInteger(0);
-    syncLatch = null;
-    this.lh = lh;
-    this.writer = new Writer(bufCurrent);
-    this.transmissionThreshold
-      = conf.getInt(BookKeeperJournalManager.BKJM_OUTPUT_BUFFER_SIZE,
-                    BookKeeperJournalManager.BKJM_OUTPUT_BUFFER_SIZE_DEFAULT);
-  }
-
-  @Override
-  public void create(int layoutVersion) throws IOException {
-    // noop
-  }
-
-  @Override
-  public void close() throws IOException {
-    setReadyToFlush();
-    flushAndSync(true);
-    try {
-      lh.close();
-    } catch (InterruptedException ie) {
-      throw new IOException("Interrupted waiting on close", ie);
-    } catch (BKException bke) {
-      throw new IOException("BookKeeper error during close", bke);
-    }
-  }
-
-  @Override
-  public void abort() throws IOException {
-    try {
-      lh.close();
-    } catch (InterruptedException ie) {
-      throw new IOException("Interrupted waiting on close", ie);
-    } catch (BKException bke) {
-      throw new IOException("BookKeeper error during abort", bke);
-    }
-
-  }
-
-  @Override
-  public void writeRaw(final byte[] data, int off, int len) throws IOException {
-    throw new IOException("Not supported for BK");
-  }
-
-  @Override
-  public void write(FSEditLogOp op) throws IOException {
-    writer.writeOp(op);
-
-    if (bufCurrent.getLength() > transmissionThreshold) {
-      transmit();
-    }
-  }
-
-  @Override
-  public void setReadyToFlush() throws IOException {
-    transmit();
-
-    synchronized (this) {
-      syncLatch = new CountDownLatch(outstandingRequests.get());
-    }
-  }
-
-  @Override
-  public void flushAndSync(boolean durable) throws IOException {
-    assert(syncLatch != null);
-    try {
-      syncLatch.await();
-    } catch (InterruptedException ie) {
-      throw new IOException("Interrupted waiting on latch", ie);
-    }
-    if (transmitResult.get() != BKException.Code.OK) {
-      throw new IOException("Failed to write to bookkeeper; Error is ("
-                            + transmitResult.get() + ") "
-                            + BKException.getMessage(transmitResult.get()));
-    }
-
-    syncLatch = null;
-    // wait for whatever we wait on
-  }
-
-  /**
-   * Transmit the current buffer to bookkeeper.
-   * Synchronised at the FSEditLog level. #write() and #setReadyToFlush()
-   * are never called at the same time.
-   */
-  private void transmit() throws IOException {
-    if (!transmitResult.compareAndSet(BKException.Code.OK,
-                                     BKException.Code.OK)) {
-      throw new IOException("Trying to write to an errored stream;"
-          + " Error code : (" + transmitResult.get()
-          + ") " + BKException.getMessage(transmitResult.get()));
-    }
-    if (bufCurrent.getLength() > 0) {
-      byte[] entry = Arrays.copyOf(bufCurrent.getData(),
-                                   bufCurrent.getLength());
-      lh.asyncAddEntry(entry, this, null);
-      bufCurrent.reset();
-      outstandingRequests.incrementAndGet();
-    }
-  }
-
-  @Override
-  public void addComplete(int rc, LedgerHandle handle,
-                          long entryId, Object ctx) {
-    synchronized(this) {
-      outstandingRequests.decrementAndGet();
-      if (!transmitResult.compareAndSet(BKException.Code.OK, rc)) {
-        LOG.warn("Tried to set transmit result to (" + rc + ") \""
-            + BKException.getMessage(rc) + "\""
-            + " but is already (" + transmitResult.get() + ") \""
-            + BKException.getMessage(transmitResult.get()) + "\"");
-      }
-      CountDownLatch l = syncLatch;
-      if (l != null) {
-        l.countDown();
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
deleted file mode 100644
index 8e4d032..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
+++ /dev/null
@@ -1,893 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.namenode.JournalManager;
-import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
-import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.conf.Configuration;
-
-import org.apache.bookkeeper.conf.ClientConfiguration;
-import org.apache.bookkeeper.client.BKException;
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.LedgerHandle;
-import org.apache.bookkeeper.util.ZkUtils;
-
-import org.apache.zookeeper.data.Stat;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.AsyncCallback.StringCallback;
-import org.apache.zookeeper.ZKUtil;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.io.IOException;
-
-import java.net.URI;
-
-import org.apache.hadoop.hdfs.protocolPB.PBHelper;
-import org.apache.hadoop.contrib.bkjournal.BKJournalProtos.VersionProto;
-import com.google.protobuf.TextFormat;
-import static com.google.common.base.Charsets.UTF_8;
-
-import org.apache.commons.io.Charsets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import com.google.common.annotations.VisibleForTesting;
-/**
- * BookKeeper Journal Manager
- *
- * To use, add the following to hdfs-site.xml.
- * <pre>
- * {@code
- * <property>
- *   <name>dfs.namenode.edits.dir</name>
- *   <value>bookkeeper://zk1:2181;zk2:2181;zk3:2181/hdfsjournal</value>
- * </property>
- *
- * <property>
- *   <name>dfs.namenode.edits.journal-plugin.bookkeeper</name>
- *   <value>org.apache.hadoop.contrib.bkjournal.BookKeeperJournalManager</value>
- * </property>
- * }
- * </pre>
- * The URI format for bookkeeper is bookkeeper://[zkEnsemble]/[rootZnode]
- * [zookkeeper ensemble] is a list of semi-colon separated, zookeeper host:port
- * pairs. In the example above there are 3 servers, in the ensemble,
- * zk1, zk2 &amp; zk3, each one listening on port 2181.
- *
- * [root znode] is the path of the zookeeper znode, under which the editlog
- * information will be stored.
- *
- * Other configuration options are:
- * <ul>
- *   <li><b>dfs.namenode.bookkeeperjournal.output-buffer-size</b>
- *       Number of bytes a bookkeeper journal stream will buffer before
- *       forcing a flush. Default is 1024.</li>
- *   <li><b>dfs.namenode.bookkeeperjournal.ensemble-size</b>
- *       Number of bookkeeper servers in edit log ledger ensembles. This
- *       is the number of bookkeeper servers which need to be available
- *       for the ledger to be writable. Default is 3.</li>
- *   <li><b>dfs.namenode.bookkeeperjournal.quorum-size</b>
- *       Number of bookkeeper servers in the write quorum. This is the
- *       number of bookkeeper servers which must have acknowledged the
- *       write of an entry before it is considered written.
- *       Default is 2.</li>
- *   <li><b>dfs.namenode.bookkeeperjournal.digestPw</b>
- *       Password to use when creating ledgers. </li>
- *   <li><b>dfs.namenode.bookkeeperjournal.zk.session.timeout</b>
- *       Session timeout for Zookeeper client from BookKeeper Journal Manager.
- *       Hadoop recommends that, this value should be less than the ZKFC 
- *       session timeout value. Default value is 3000.</li>
- * </ul>
- */
-public class BookKeeperJournalManager implements JournalManager {
-  static final Log LOG = LogFactory.getLog(BookKeeperJournalManager.class);
-
-  public static final String BKJM_OUTPUT_BUFFER_SIZE
-    = "dfs.namenode.bookkeeperjournal.output-buffer-size";
-  public static final int BKJM_OUTPUT_BUFFER_SIZE_DEFAULT = 1024;
-
-  public static final String BKJM_BOOKKEEPER_ENSEMBLE_SIZE
-    = "dfs.namenode.bookkeeperjournal.ensemble-size";
-  public static final int BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT = 3;
-
- public static final String BKJM_BOOKKEEPER_QUORUM_SIZE
-    = "dfs.namenode.bookkeeperjournal.quorum-size";
-  public static final int BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT = 2;
-
-  public static final String BKJM_BOOKKEEPER_DIGEST_PW
-    = "dfs.namenode.bookkeeperjournal.digestPw";
-  public static final String BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT = "";
-
-  private static final int BKJM_LAYOUT_VERSION = -1;
-  
-  public static final String BKJM_ZK_SESSION_TIMEOUT 
-    = "dfs.namenode.bookkeeperjournal.zk.session.timeout";
-  public static final int BKJM_ZK_SESSION_TIMEOUT_DEFAULT = 3000;
-
-  private static final String BKJM_EDIT_INPROGRESS = "inprogress_";
-
-  public static final String BKJM_ZK_LEDGERS_AVAILABLE_PATH
-    = "dfs.namenode.bookkeeperjournal.zk.availablebookies";
-
-  public static final String BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT
-    = "/ledgers/available";
-
-  public static final String BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_MS
-    = "dfs.namenode.bookkeeperjournal.speculativeReadTimeoutMs";
-  public static final int BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_DEFAULT
-    = 2000;
-
-  public static final String BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_SEC
-    = "dfs.namenode.bookkeeperjournal.readEntryTimeoutSec";
-  public static final int BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_DEFAULT = 5;
-
-  public static final String BKJM_BOOKKEEPER_ACK_QUORUM_SIZE 
-    = "dfs.namenode.bookkeeperjournal.ack.quorum-size";
-
-  public static final String BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_SEC
-    = "dfs.namenode.bookkeeperjournal.addEntryTimeoutSec";
-  public static final int BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_DEFAULT = 5;
-
-  private ZooKeeper zkc;
-  private final Configuration conf;
-  private final BookKeeper bkc;
-  private final CurrentInprogress ci;
-  private final String basePath;
-  private final String ledgerPath;
-  private final String versionPath;
-  private final MaxTxId maxTxId;
-  private final int ensembleSize;
-  private final int quorumSize;
-  private final int ackQuorumSize;
-  private final int addEntryTimeout;
-  private final String digestpw;
-  private final int speculativeReadTimeout;
-  private final int readEntryTimeout;
-  private final CountDownLatch zkConnectLatch;
-  private final NamespaceInfo nsInfo;
-  private boolean initialized = false;
-  private LedgerHandle currentLedger = null;
-
-  /**
-   * Construct a Bookkeeper journal manager.
-   */
-  public BookKeeperJournalManager(Configuration conf, URI uri,
-      NamespaceInfo nsInfo) throws IOException {
-    this.conf = conf;
-    this.nsInfo = nsInfo;
-
-    String zkConnect = uri.getAuthority().replace(";", ",");
-    basePath = uri.getPath();
-    ensembleSize = conf.getInt(BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
-                               BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
-    quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE,
-                             BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
-    ackQuorumSize = conf.getInt(BKJM_BOOKKEEPER_ACK_QUORUM_SIZE, quorumSize);
-    addEntryTimeout = conf.getInt(BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_SEC,
-                             BKJM_BOOKKEEPER_ADD_ENTRY_TIMEOUT_DEFAULT);
-    speculativeReadTimeout = conf.getInt(
-                             BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_MS,
-                             BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_DEFAULT);
-    readEntryTimeout = conf.getInt(BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_SEC,
-                             BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_DEFAULT);
-
-    ledgerPath = basePath + "/ledgers";
-    String maxTxIdPath = basePath + "/maxtxid";
-    String currentInprogressNodePath = basePath + "/CurrentInprogress";
-    versionPath = basePath + "/version";
-    digestpw = conf.get(BKJM_BOOKKEEPER_DIGEST_PW,
-                        BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT);
-
-    try {
-      zkConnectLatch = new CountDownLatch(1);
-      int bkjmZKSessionTimeout = conf.getInt(BKJM_ZK_SESSION_TIMEOUT,
-          BKJM_ZK_SESSION_TIMEOUT_DEFAULT);
-      zkc = new ZooKeeper(zkConnect, bkjmZKSessionTimeout,
-          new ZkConnectionWatcher());
-      // Configured zk session timeout + some extra grace period (here
-      // BKJM_ZK_SESSION_TIMEOUT_DEFAULT used as grace period)
-      int zkConnectionLatchTimeout = bkjmZKSessionTimeout
-          + BKJM_ZK_SESSION_TIMEOUT_DEFAULT;
-      if (!zkConnectLatch
-          .await(zkConnectionLatchTimeout, TimeUnit.MILLISECONDS)) {
-        throw new IOException("Error connecting to zookeeper");
-      }
-
-      prepareBookKeeperEnv();
-      ClientConfiguration clientConf = new ClientConfiguration();
-      clientConf.setSpeculativeReadTimeout(speculativeReadTimeout);
-      clientConf.setReadEntryTimeout(readEntryTimeout);
-      clientConf.setAddEntryTimeout(addEntryTimeout);
-      bkc = new BookKeeper(clientConf, zkc);
-    } catch (KeeperException e) {
-      throw new IOException("Error initializing zk", e);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Interrupted while initializing bk journal manager",
-                            ie);
-    }
-
-    ci = new CurrentInprogress(zkc, currentInprogressNodePath);
-    maxTxId = new MaxTxId(zkc, maxTxIdPath);
-  }
-
-  /**
-   * Pre-creating bookkeeper metadata path in zookeeper.
-   */
-  private void prepareBookKeeperEnv() throws IOException {
-    // create bookie available path in zookeeper if it doesn't exists
-    final String zkAvailablePath = conf.get(BKJM_ZK_LEDGERS_AVAILABLE_PATH,
-        BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
-    final CountDownLatch zkPathLatch = new CountDownLatch(1);
-
-    final AtomicBoolean success = new AtomicBoolean(false);
-    StringCallback callback = new StringCallback() {
-      @Override
-      public void processResult(int rc, String path, Object ctx, String name) {
-        if (KeeperException.Code.OK.intValue() == rc
-            || KeeperException.Code.NODEEXISTS.intValue() == rc) {
-          LOG.info("Successfully created bookie available path : "
-              + zkAvailablePath);
-          success.set(true);
-        } else {
-          KeeperException.Code code = KeeperException.Code.get(rc);
-          LOG.error("Error : "
-                  + KeeperException.create(code, path).getMessage()
-                  + ", failed to create bookie available path : "
-                  + zkAvailablePath);
-        }
-        zkPathLatch.countDown();
-      }
-    };
-    ZkUtils.asyncCreateFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
-        Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
-
-    try {
-      if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)
-          || !success.get()) {
-        throw new IOException("Couldn't create bookie available path :"
-            + zkAvailablePath + ", timed out " + zkc.getSessionTimeout()
-            + " millis");
-      }
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new IOException(
-          "Interrupted when creating the bookie available path : "
-              + zkAvailablePath, e);
-    }
-  }
-
-  @Override
-  public void format(NamespaceInfo ns) throws IOException {
-    try {
-      // delete old info
-      Stat baseStat = null;
-      Stat ledgerStat = null;
-      if ((baseStat = zkc.exists(basePath, false)) != null) {
-        if ((ledgerStat = zkc.exists(ledgerPath, false)) != null) {
-          for (EditLogLedgerMetadata l : getLedgerList(true)) {
-            try {
-              bkc.deleteLedger(l.getLedgerId());
-            } catch (BKException.BKNoSuchLedgerExistsException bke) {
-              LOG.warn("Ledger " + l.getLedgerId() + " does not exist;"
-                       + " Cannot delete.");
-            }
-          }
-        }
-        ZKUtil.deleteRecursive(zkc, basePath);
-      }
-
-      // should be clean now.
-      zkc.create(basePath, new byte[] {'0'},
-          Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-
-      VersionProto.Builder builder = VersionProto.newBuilder();
-      builder.setNamespaceInfo(PBHelper.convert(ns))
-        .setLayoutVersion(BKJM_LAYOUT_VERSION);
-
-      byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
-      zkc.create(versionPath, data,
-                 Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-
-      zkc.create(ledgerPath, new byte[] {'0'},
-                 Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-    } catch (KeeperException ke) {
-      LOG.error("Error accessing zookeeper to format", ke);
-      throw new IOException("Error accessing zookeeper to format", ke);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Interrupted during format", ie);
-    } catch (BKException bke) {
-      throw new IOException("Error cleaning up ledgers during format", bke);
-    }
-  }
-  
-  @Override
-  public boolean hasSomeData() throws IOException {
-    try {
-      return zkc.exists(basePath, false) != null;
-    } catch (KeeperException ke) {
-      throw new IOException("Couldn't contact zookeeper", ke);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Interrupted while checking for data", ie);
-    }
-  }
-
-  synchronized private void checkEnv() throws IOException {
-    if (!initialized) {
-      try {
-        Stat versionStat = zkc.exists(versionPath, false);
-        if (versionStat == null) {
-          throw new IOException("Environment not initialized. "
-                                +"Have you forgotten to format?");
-        }
-        byte[] d = zkc.getData(versionPath, false, versionStat);
-
-        VersionProto.Builder builder = VersionProto.newBuilder();
-        TextFormat.merge(new String(d, UTF_8), builder);
-        if (!builder.isInitialized()) {
-          throw new IOException("Invalid/Incomplete data in znode");
-        }
-        VersionProto vp = builder.build();
-
-        // There's only one version at the moment
-        assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION;
-
-        NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo());
-
-        if (nsInfo.getNamespaceID() != readns.getNamespaceID() ||
-            !nsInfo.clusterID.equals(readns.getClusterID()) ||
-            !nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) {
-          String err = String.format("Environment mismatch. Running process %s"
-                                     +", stored in ZK %s", nsInfo, readns);
-          LOG.error(err);
-          throw new IOException(err);
-        }
-
-        ci.init();
-        initialized = true;
-      } catch (KeeperException ke) {
-        throw new IOException("Cannot access ZooKeeper", ke);
-      } catch (InterruptedException ie) {
-        Thread.currentThread().interrupt();
-        throw new IOException("Interrupted while checking environment", ie);
-      }
-    }
-  }
-
-  /**
-   * Start a new log segment in a BookKeeper ledger.
-   * First ensure that we have the write lock for this journal.
-   * Then create a ledger and stream based on that ledger.
-   * The ledger id is written to the inprogress znode, so that in the
-   * case of a crash, a recovery process can find the ledger we were writing
-   * to when we crashed.
-   * @param txId First transaction id to be written to the stream
-   */
-  @Override
-  public EditLogOutputStream startLogSegment(long txId, int layoutVersion)
-      throws IOException {
-    checkEnv();
-
-    if (txId <= maxTxId.get()) {
-      throw new IOException("We've already seen " + txId
-          + ". A new stream cannot be created with it");
-    }
-
-    try {
-      String existingInprogressNode = ci.read();
-      if (null != existingInprogressNode
-          && zkc.exists(existingInprogressNode, false) != null) {
-        throw new IOException("Inprogress node already exists");
-      }
-      if (currentLedger != null) {
-        // bookkeeper errored on last stream, clean up ledger
-        currentLedger.close();
-      }
-      currentLedger = bkc.createLedger(ensembleSize, quorumSize, ackQuorumSize,
-                                       BookKeeper.DigestType.MAC,
-                                       digestpw.getBytes(Charsets.UTF_8));
-    } catch (BKException bke) {
-      throw new IOException("Error creating ledger", bke);
-    } catch (KeeperException ke) {
-      throw new IOException("Error in zookeeper while creating ledger", ke);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Interrupted creating ledger", ie);
-    }
-
-    try {
-      String znodePath = inprogressZNode(txId);
-      EditLogLedgerMetadata l = new EditLogLedgerMetadata(znodePath,
-          layoutVersion, currentLedger.getId(), txId);
-      /* Write the ledger metadata out to the inprogress ledger znode
-       * This can fail if for some reason our write lock has
-       * expired (@see WriteLock) and another process has managed to
-       * create the inprogress znode.
-       * In this case, throw an exception. We don't want to continue
-       * as this would lead to a split brain situation.
-       */
-      l.write(zkc, znodePath);
-
-      maxTxId.store(txId);
-      ci.update(znodePath);
-      return new BookKeeperEditLogOutputStream(conf, currentLedger);
-    } catch (KeeperException ke) {
-      cleanupLedger(currentLedger);
-      throw new IOException("Error storing ledger metadata", ke);
-    }
-  }
-
-  private void cleanupLedger(LedgerHandle lh) {
-    try {
-      long id = currentLedger.getId();
-      currentLedger.close();
-      bkc.deleteLedger(id);
-    } catch (BKException bke) {
-      //log & ignore, an IOException will be thrown soon
-      LOG.error("Error closing ledger", bke);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      LOG.warn("Interrupted while closing ledger", ie);
-    }
-  }
-
-
-
-  /**
-   * Finalize a log segment. If the journal manager is currently
-   * writing to a ledger, ensure that this is the ledger of the log segment
-   * being finalized.
-   *
-   * Otherwise this is the recovery case. In the recovery case, ensure that
-   * the firstTxId of the ledger matches firstTxId for the segment we are
-   * trying to finalize.
-   */
-  @Override
-  public void finalizeLogSegment(long firstTxId, long lastTxId)
-      throws IOException {
-    checkEnv();
-
-    String inprogressPath = inprogressZNode(firstTxId);
-    try {
-      Stat inprogressStat = zkc.exists(inprogressPath, false);
-      if (inprogressStat == null) {
-        throw new IOException("Inprogress znode " + inprogressPath
-                              + " doesn't exist");
-      }
-
-      EditLogLedgerMetadata l
-        =  EditLogLedgerMetadata.read(zkc, inprogressPath);
-
-      if (currentLedger != null) { // normal, non-recovery case
-        if (l.getLedgerId() == currentLedger.getId()) {
-          try {
-            currentLedger.close();
-          } catch (BKException bke) {
-            LOG.error("Error closing current ledger", bke);
-          }
-          currentLedger = null;
-        } else {
-          throw new IOException(
-              "Active ledger has different ID to inprogress. "
-              + l.getLedgerId() + " found, "
-              + currentLedger.getId() + " expected");
-        }
-      }
-
-      if (l.getFirstTxId() != firstTxId) {
-        throw new IOException("Transaction id not as expected, "
-            + l.getFirstTxId() + " found, " + firstTxId + " expected");
-      }
-
-      l.finalizeLedger(lastTxId);
-      String finalisedPath = finalizedLedgerZNode(firstTxId, lastTxId);
-      try {
-        l.write(zkc, finalisedPath);
-      } catch (KeeperException.NodeExistsException nee) {
-        if (!l.verify(zkc, finalisedPath)) {
-          throw new IOException("Node " + finalisedPath + " already exists"
-                                + " but data doesn't match");
-        }
-      }
-      maxTxId.store(lastTxId);
-      zkc.delete(inprogressPath, inprogressStat.getVersion());
-      String inprogressPathFromCI = ci.read();
-      if (inprogressPath.equals(inprogressPathFromCI)) {
-        ci.clear();
-      }
-    } catch (KeeperException e) {
-      throw new IOException("Error finalising ledger", e);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Error finalising ledger", ie);
-    }
-  }
-
-  public void selectInputStreams(
-      Collection<EditLogInputStream> streams,
-      long fromTxnId, boolean inProgressOk) throws IOException {
-    selectInputStreams(streams, fromTxnId, inProgressOk, false);
-  }
-
-  @Override
-  public void selectInputStreams(Collection<EditLogInputStream> streams,
-      long fromTxId, boolean inProgressOk, boolean onlyDurableTxns)
-      throws IOException {
-    List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(fromTxId,
-        inProgressOk);
-    try {
-      BookKeeperEditLogInputStream elis = null;
-      for (EditLogLedgerMetadata l : currentLedgerList) {
-        long lastTxId = l.getLastTxId();
-        if (l.isInProgress()) {
-          lastTxId = recoverLastTxId(l, false);
-        }
-        // Check once again, required in case of InProgress and is case of any
-        // gap.
-        if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
-          LedgerHandle h;
-          if (l.isInProgress()) { // we don't want to fence the current journal
-            h = bkc.openLedgerNoRecovery(l.getLedgerId(),
-                BookKeeper.DigestType.MAC, digestpw.getBytes(Charsets.UTF_8));
-          } else {
-            h = bkc.openLedger(l.getLedgerId(), BookKeeper.DigestType.MAC,
-                digestpw.getBytes(Charsets.UTF_8));
-          }
-          elis = new BookKeeperEditLogInputStream(h, l);
-          elis.skipTo(fromTxId);
-        } else {
-          // If mismatches then there might be some gap, so we should not check
-          // further.
-          return;
-        }
-        streams.add(elis);
-        if (elis.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
-          return;
-        }
-        fromTxId = elis.getLastTxId() + 1;
-      }
-    } catch (BKException e) {
-      throw new IOException("Could not open ledger for " + fromTxId, e);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Interrupted opening ledger for " + fromTxId, ie);
-    }
-  }
-
-  long getNumberOfTransactions(long fromTxId, boolean inProgressOk)
-      throws IOException {
-    long count = 0;
-    long expectedStart = 0;
-    for (EditLogLedgerMetadata l : getLedgerList(inProgressOk)) {
-      long lastTxId = l.getLastTxId();
-      if (l.isInProgress()) {
-        lastTxId = recoverLastTxId(l, false);
-        if (lastTxId == HdfsServerConstants.INVALID_TXID) {
-          break;
-        }
-      }
-
-      assert lastTxId >= l.getFirstTxId();
-
-      if (lastTxId < fromTxId) {
-        continue;
-      } else if (l.getFirstTxId() <= fromTxId && lastTxId >= fromTxId) {
-        // we can start in the middle of a segment
-        count = (lastTxId - l.getFirstTxId()) + 1;
-        expectedStart = lastTxId + 1;
-      } else {
-        if (expectedStart != l.getFirstTxId()) {
-          if (count == 0) {
-            throw new CorruptionException("StartTxId " + l.getFirstTxId()
-                + " is not as expected " + expectedStart
-                + ". Gap in transaction log?");
-          } else {
-            break;
-          }
-        }
-        count += (lastTxId - l.getFirstTxId()) + 1;
-        expectedStart = lastTxId + 1;
-      }
-    }
-    return count;
-  }
-
-  @Override
-  public void recoverUnfinalizedSegments() throws IOException {
-    checkEnv();
-
-    synchronized (this) {
-      try {
-        List<String> children = zkc.getChildren(ledgerPath, false);
-        for (String child : children) {
-          if (!child.startsWith(BKJM_EDIT_INPROGRESS)) {
-            continue;
-          }
-          String znode = ledgerPath + "/" + child;
-          EditLogLedgerMetadata l = EditLogLedgerMetadata.read(zkc, znode);
-          try {
-            long endTxId = recoverLastTxId(l, true);
-            if (endTxId == HdfsServerConstants.INVALID_TXID) {
-              LOG.error("Unrecoverable corruption has occurred in segment "
-                  + l.toString() + " at path " + znode
-                  + ". Unable to continue recovery.");
-              throw new IOException("Unrecoverable corruption,"
-                  + " please check logs.");
-            }
-            finalizeLogSegment(l.getFirstTxId(), endTxId);
-          } catch (SegmentEmptyException see) {
-            LOG.warn("Inprogress znode " + child
-                + " refers to a ledger which is empty. This occurs when the NN"
-                + " crashes after opening a segment, but before writing the"
-                + " OP_START_LOG_SEGMENT op. It is safe to delete."
-                + " MetaData [" + l.toString() + "]");
-
-            // If the max seen transaction is the same as what would
-            // have been the first transaction of the failed ledger,
-            // decrement it, as that transaction never happened and as
-            // such, is _not_ the last seen
-            if (maxTxId.get() == l.getFirstTxId()) {
-              maxTxId.reset(maxTxId.get() - 1);
-            }
-
-            zkc.delete(znode, -1);
-          }
-        }
-      } catch (KeeperException.NoNodeException nne) {
-          // nothing to recover, ignore
-      } catch (KeeperException ke) {
-        throw new IOException("Couldn't get list of inprogress segments", ke);
-      } catch (InterruptedException ie) {
-        Thread.currentThread().interrupt();
-        throw new IOException("Interrupted getting list of inprogress segments",
-                              ie);
-      }
-    }
-  }
-
-  @Override
-  public void purgeLogsOlderThan(long minTxIdToKeep)
-      throws IOException {
-    checkEnv();
-
-    for (EditLogLedgerMetadata l : getLedgerList(false)) {
-      if (l.getLastTxId() < minTxIdToKeep) {
-        try {
-          Stat stat = zkc.exists(l.getZkPath(), false);
-          zkc.delete(l.getZkPath(), stat.getVersion());
-          bkc.deleteLedger(l.getLedgerId());
-        } catch (InterruptedException ie) {
-          Thread.currentThread().interrupt();
-          LOG.error("Interrupted while purging " + l, ie);
-        } catch (BKException bke) {
-          LOG.error("Couldn't delete ledger from bookkeeper", bke);
-        } catch (KeeperException ke) {
-          LOG.error("Error deleting ledger entry in zookeeper", ke);
-        }
-      }
-    }
-  }
-
-  @Override
-  public void doPreUpgrade() throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void doUpgrade(Storage storage) throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public long getJournalCTime() throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void doFinalize() throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage,
-      int targetLayoutVersion) throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void doRollback() throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void discardSegments(long startTxId) throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      bkc.close();
-      zkc.close();
-    } catch (BKException bke) {
-      throw new IOException("Couldn't close bookkeeper client", bke);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Interrupted while closing journal manager", ie);
-    }
-  }
-
-  /**
-   * Set the amount of memory that this stream should use to buffer edits.
-   * Setting this will only affect future output stream. Streams
-   * which have currently be created won't be affected.
-   */
-  @Override
-  public void setOutputBufferCapacity(int size) {
-    conf.getInt(BKJM_OUTPUT_BUFFER_SIZE, size);
-  }
-
-  /**
-   * Find the id of the last edit log transaction writen to a edit log
-   * ledger.
-   */
-  private long recoverLastTxId(EditLogLedgerMetadata l, boolean fence)
-      throws IOException, SegmentEmptyException {
-    LedgerHandle lh = null;
-    try {
-      if (fence) {
-        lh = bkc.openLedger(l.getLedgerId(),
-                            BookKeeper.DigestType.MAC,
-                            digestpw.getBytes(Charsets.UTF_8));
-      } else {
-        lh = bkc.openLedgerNoRecovery(l.getLedgerId(),
-                                      BookKeeper.DigestType.MAC,
-                                      digestpw.getBytes(Charsets.UTF_8));
-      }
-    } catch (BKException bke) {
-      throw new IOException("Exception opening ledger for " + l, bke);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Interrupted opening ledger for " + l, ie);
-    }
-
-    BookKeeperEditLogInputStream in = null;
-
-    try {
-      long lastAddConfirmed = lh.getLastAddConfirmed();
-      if (lastAddConfirmed == -1) {
-        throw new SegmentEmptyException();
-      }
-
-      in = new BookKeeperEditLogInputStream(lh, l, lastAddConfirmed);
-
-      long endTxId = HdfsServerConstants.INVALID_TXID;
-      FSEditLogOp op = in.readOp();
-      while (op != null) {
-        if (endTxId == HdfsServerConstants.INVALID_TXID
-            || op.getTransactionId() == endTxId+1) {
-          endTxId = op.getTransactionId();
-        }
-        op = in.readOp();
-      }
-      return endTxId;
-    } finally {
-      if (in != null) {
-        in.close();
-      }
-    }
-  }
-
-  /**
-   * Get a list of all segments in the journal.
-   */
-  List<EditLogLedgerMetadata> getLedgerList(boolean inProgressOk)
-      throws IOException {
-    return getLedgerList(-1, inProgressOk);
-  }
-
-  private List<EditLogLedgerMetadata> getLedgerList(long fromTxId,
-      boolean inProgressOk) throws IOException {
-    List<EditLogLedgerMetadata> ledgers
-      = new ArrayList<EditLogLedgerMetadata>();
-    try {
-      List<String> ledgerNames = zkc.getChildren(ledgerPath, false);
-      for (String ledgerName : ledgerNames) {
-        if (!inProgressOk && ledgerName.contains(BKJM_EDIT_INPROGRESS)) {
-          continue;
-        }
-        String legderMetadataPath = ledgerPath + "/" + ledgerName;
-        try {
-          EditLogLedgerMetadata editLogLedgerMetadata = EditLogLedgerMetadata
-              .read(zkc, legderMetadataPath);
-          if (editLogLedgerMetadata.getLastTxId() != HdfsServerConstants.INVALID_TXID
-              && editLogLedgerMetadata.getLastTxId() < fromTxId) {
-            // exclude already read closed edits, but include inprogress edits
-            // as this will be handled in caller
-            continue;
-          }
-          ledgers.add(editLogLedgerMetadata);
-        } catch (KeeperException.NoNodeException e) {
-          LOG.warn("ZNode: " + legderMetadataPath
-              + " might have finalized and deleted."
-              + " So ignoring NoNodeException.");
-        }
-      }
-    } catch (KeeperException e) {
-      throw new IOException("Exception reading ledger list from zk", e);
-    } catch (InterruptedException ie) {
-      Thread.currentThread().interrupt();
-      throw new IOException("Interrupted getting list of ledgers from zk", ie);
-    }
-
-    Collections.sort(ledgers, EditLogLedgerMetadata.COMPARATOR);
-    return ledgers;
-  }
-
-  /**
-   * Get the znode path for a finalize ledger
-   */
-  String finalizedLedgerZNode(long startTxId, long endTxId) {
-    return String.format("%s/edits_%018d_%018d",
-                         ledgerPath, startTxId, endTxId);
-  }
-
-  /**
-   * Get the znode path for the inprogressZNode
-   */
-  String inprogressZNode(long startTxid) {
-    return ledgerPath + "/inprogress_" + Long.toString(startTxid, 16);
-  }
-
-  @VisibleForTesting
-  void setZooKeeper(ZooKeeper zk) {
-    this.zkc = zk;
-  }
-
-  /**
-   * Simple watcher to notify when zookeeper has connected
-   */
-  private class ZkConnectionWatcher implements Watcher {
-    public void process(WatchedEvent event) {
-      if (Event.KeeperState.SyncConnected.equals(event.getState())) {
-        zkConnectLatch.countDown();
-      }
-    }
-  }
-  
-  private static class SegmentEmptyException extends IOException {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java
deleted file mode 100644
index 32d65cb..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import java.io.IOException;
-import java.net.InetAddress;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.KeeperException.NodeExistsException;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.data.Stat;
-
-import org.apache.hadoop.contrib.bkjournal.BKJournalProtos.CurrentInprogressProto;
-import com.google.protobuf.TextFormat;
-import static com.google.common.base.Charsets.UTF_8;
-
-/**
- * Distributed write permission lock, using ZooKeeper. Read the version number
- * and return the current inprogress node path available in CurrentInprogress
- * path. If it exist, caller can treat that some other client already operating
- * on it. Then caller can take action. If there is no inprogress node exist,
- * then caller can treat that there is no client operating on it. Later same
- * caller should update the his newly created inprogress node path. At this
- * point, if some other activities done on this node, version number might
- * change, so update will fail. So, this read, update api will ensure that there
- * is only node can continue further after checking with CurrentInprogress.
- */
-
-class CurrentInprogress {
-  static final Log LOG = LogFactory.getLog(CurrentInprogress.class);
-
-  private final ZooKeeper zkc;
-  private final String currentInprogressNode;
-  private volatile int versionNumberForPermission = -1;
-  private final String hostName = InetAddress.getLocalHost().toString();
-
-  CurrentInprogress(ZooKeeper zkc, String lockpath) throws IOException {
-    this.currentInprogressNode = lockpath;
-    this.zkc = zkc;
-  }
-
-  void init() throws IOException {
-    try {
-      Stat isCurrentInprogressNodeExists = zkc.exists(currentInprogressNode,
-                                                      false);
-      if (isCurrentInprogressNodeExists == null) {
-        try {
-          zkc.create(currentInprogressNode, null, Ids.OPEN_ACL_UNSAFE,
-                     CreateMode.PERSISTENT);
-        } catch (NodeExistsException e) {
-          // Node might created by other process at the same time. Ignore it.
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(currentInprogressNode + " already created by other process.",
-                      e);
-          }
-        }
-      }
-    } catch (KeeperException e) {
-      throw new IOException("Exception accessing Zookeeper", e);
-    } catch (InterruptedException ie) {
-      throw new IOException("Interrupted accessing Zookeeper", ie);
-    }
-  }
-
-  /**
-   * Update the path with prepending version number and hostname
-   * 
-   * @param path
-   *          - to be updated in zookeeper
-   * @throws IOException
-   */
-  void update(String path) throws IOException {
-    CurrentInprogressProto.Builder builder = CurrentInprogressProto.newBuilder();
-    builder.setPath(path).setHostname(hostName);
-
-    String content = TextFormat.printToString(builder.build());
-
-    try {
-      zkc.setData(this.currentInprogressNode, content.getBytes(UTF_8),
-          this.versionNumberForPermission);
-    } catch (KeeperException e) {
-      throw new IOException("Exception when setting the data "
-          + "[" + content + "] to CurrentInprogress. ", e);
-    } catch (InterruptedException e) {
-      throw new IOException("Interrupted while setting the data "
-          + "[" + content + "] to CurrentInprogress", e);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Updated data[" + content + "] to CurrentInprogress");
-    }
-  }
-
-  /**
-   * Read the CurrentInprogress node data from Zookeeper and also get the znode
-   * version number. Return the 3rd field from the data. i.e saved path with
-   * #update api
-   * 
-   * @return available inprogress node path. returns null if not available.
-   * @throws IOException
-   */
-  String read() throws IOException {
-    Stat stat = new Stat();
-    byte[] data = null;
-    try {
-      data = zkc.getData(this.currentInprogressNode, false, stat);
-    } catch (KeeperException e) {
-      throw new IOException("Exception while reading the data from "
-          + currentInprogressNode, e);
-    } catch (InterruptedException e) {
-      throw new IOException("Interrupted while reading data from "
-          + currentInprogressNode, e);
-    }
-    this.versionNumberForPermission = stat.getVersion();
-    if (data != null) {
-      CurrentInprogressProto.Builder builder = CurrentInprogressProto.newBuilder();
-      TextFormat.merge(new String(data, UTF_8), builder);
-      if (!builder.isInitialized()) {
-        throw new IOException("Invalid/Incomplete data in znode");
-      }
-      return builder.build().getPath();
-    } else {
-      LOG.debug("No data available in CurrentInprogress");
-    }
-    return null;
-  }
-
-  /** Clear the CurrentInprogress node data */
-  void clear() throws IOException {
-    try {
-      zkc.setData(this.currentInprogressNode, null, versionNumberForPermission);
-    } catch (KeeperException e) {
-      throw new IOException(
-          "Exception when setting the data to CurrentInprogress node", e);
-    } catch (InterruptedException e) {
-      throw new IOException(
-          "Interrupted when setting the data to CurrentInprogress node", e);
-    }
-    LOG.debug("Cleared the data from CurrentInprogress");
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
deleted file mode 100644
index 2d1f8b9..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import java.io.IOException;
-import java.util.Comparator;
-
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.KeeperException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.contrib.bkjournal.BKJournalProtos.EditLogLedgerProto;
-import com.google.protobuf.TextFormat;
-import static com.google.common.base.Charsets.UTF_8;
-
-/**
- * Utility class for storing the metadata associated 
- * with a single edit log segment, stored in a single ledger
- */
-public class EditLogLedgerMetadata {
-  static final Log LOG = LogFactory.getLog(EditLogLedgerMetadata.class);
-
-  private String zkPath;
-  private final int dataLayoutVersion;
-  private final long ledgerId;
-  private final long firstTxId;
-  private long lastTxId;
-  private boolean inprogress;
-  
-  public static final Comparator COMPARATOR 
-    = new Comparator<EditLogLedgerMetadata>() {
-    public int compare(EditLogLedgerMetadata o1,
-        EditLogLedgerMetadata o2) {
-      if (o1.firstTxId < o2.firstTxId) {
-        return -1;
-      } else if (o1.firstTxId == o2.firstTxId) {
-        return 0;
-      } else {
-        return 1;
-      }
-    }
-  };
-
-  EditLogLedgerMetadata(String zkPath, int dataLayoutVersion,
-                        long ledgerId, long firstTxId) {
-    this.zkPath = zkPath;
-    this.dataLayoutVersion = dataLayoutVersion;
-    this.ledgerId = ledgerId;
-    this.firstTxId = firstTxId;
-    this.lastTxId = HdfsServerConstants.INVALID_TXID;
-    this.inprogress = true;
-  }
-  
-  EditLogLedgerMetadata(String zkPath, int dataLayoutVersion,
-                        long ledgerId, long firstTxId,
-                        long lastTxId) {
-    this.zkPath = zkPath;
-    this.dataLayoutVersion = dataLayoutVersion;
-    this.ledgerId = ledgerId;
-    this.firstTxId = firstTxId;
-    this.lastTxId = lastTxId;
-    this.inprogress = false;
-  }
-
-  String getZkPath() {
-    return zkPath;
-  }
-
-  long getFirstTxId() {
-    return firstTxId;
-  }
-  
-  long getLastTxId() {
-    return lastTxId;
-  }
-  
-  long getLedgerId() {
-    return ledgerId;
-  }
-  
-  boolean isInProgress() {
-    return this.inprogress;
-  }
-
-  int getDataLayoutVersion() {
-    return this.dataLayoutVersion;
-  }
-
-  void finalizeLedger(long newLastTxId) {
-    assert this.lastTxId == HdfsServerConstants.INVALID_TXID;
-    this.lastTxId = newLastTxId;
-    this.inprogress = false;      
-  }
-  
-  static EditLogLedgerMetadata read(ZooKeeper zkc, String path)
-      throws IOException, KeeperException.NoNodeException  {
-    try {
-      byte[] data = zkc.getData(path, false, null);
-
-      EditLogLedgerProto.Builder builder = EditLogLedgerProto.newBuilder();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Reading " + path + " data: " + new String(data, UTF_8));
-      }
-      TextFormat.merge(new String(data, UTF_8), builder);
-      if (!builder.isInitialized()) {
-        throw new IOException("Invalid/Incomplete data in znode");
-      }
-      EditLogLedgerProto ledger = builder.build();
-
-      int dataLayoutVersion = ledger.getDataLayoutVersion();
-      long ledgerId = ledger.getLedgerId();
-      long firstTxId = ledger.getFirstTxId();
-      if (ledger.hasLastTxId()) {
-        long lastTxId = ledger.getLastTxId();
-        return new EditLogLedgerMetadata(path, dataLayoutVersion,
-                                         ledgerId, firstTxId, lastTxId);
-      } else {
-        return new EditLogLedgerMetadata(path, dataLayoutVersion,
-                                         ledgerId, firstTxId);
-      }
-    } catch(KeeperException.NoNodeException nne) {
-      throw nne;
-    } catch(KeeperException ke) {
-      throw new IOException("Error reading from zookeeper", ke);
-    } catch (InterruptedException ie) {
-      throw new IOException("Interrupted reading from zookeeper", ie);
-    }
-  }
-    
-  void write(ZooKeeper zkc, String path)
-      throws IOException, KeeperException.NodeExistsException {
-    this.zkPath = path;
-
-    EditLogLedgerProto.Builder builder = EditLogLedgerProto.newBuilder();
-    builder.setDataLayoutVersion(dataLayoutVersion)
-      .setLedgerId(ledgerId).setFirstTxId(firstTxId);
-
-    if (!inprogress) {
-      builder.setLastTxId(lastTxId);
-    }
-    try {
-      zkc.create(path, TextFormat.printToString(builder.build()).getBytes(UTF_8),
-                 Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
-    } catch (KeeperException.NodeExistsException nee) {
-      throw nee;
-    } catch (KeeperException e) {
-      throw new IOException("Error creating ledger znode", e);
-    } catch (InterruptedException ie) {
-      throw new IOException("Interrupted creating ledger znode", ie);
-    }
-  }
-  
-  boolean verify(ZooKeeper zkc, String path) {
-    try {
-      EditLogLedgerMetadata other = read(zkc, path);
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Verifying " + this.toString() 
-                  + " against " + other);
-      }
-      return other.equals(this);
-    } catch (KeeperException e) {
-      LOG.error("Couldn't verify data in " + path, e);
-      return false;
-    } catch (IOException ie) {
-      LOG.error("Couldn't verify data in " + path, ie);
-      return false;
-    }
-  }
-  
-  public boolean equals(Object o) {
-    if (!(o instanceof EditLogLedgerMetadata)) {
-      return false;
-    }
-    EditLogLedgerMetadata ol = (EditLogLedgerMetadata)o;
-    return ledgerId == ol.ledgerId
-      && dataLayoutVersion == ol.dataLayoutVersion
-      && firstTxId == ol.firstTxId
-      && lastTxId == ol.lastTxId;
-  }
-
-  public int hashCode() {
-    int hash = 1;
-    hash = hash * 31 + (int) ledgerId;
-    hash = hash * 31 + (int) firstTxId;
-    hash = hash * 31 + (int) lastTxId;
-    hash = hash * 31 + dataLayoutVersion;
-    return hash;
-  }
-    
-  public String toString() {
-    return "[LedgerId:"+ledgerId +
-      ", firstTxId:" + firstTxId +
-      ", lastTxId:" + lastTxId +
-      ", dataLayoutVersion:" + dataLayoutVersion + "]";
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java
deleted file mode 100644
index 5a2eefa..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/MaxTxId.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.contrib.bkjournal;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.data.Stat;
-
-import org.apache.hadoop.contrib.bkjournal.BKJournalProtos.MaxTxIdProto;
-import com.google.protobuf.TextFormat;
-import static com.google.common.base.Charsets.UTF_8;
-
-/**
- * Utility class for storing and reading
- * the max seen txid in zookeeper
- */
-class MaxTxId {
-  static final Log LOG = LogFactory.getLog(MaxTxId.class);
-  
-  private final ZooKeeper zkc;
-  private final String path;
-
-  private Stat currentStat;
-
-  MaxTxId(ZooKeeper zkc, String path) {
-    this.zkc = zkc;
-    this.path = path;
-  }
-
-  synchronized void store(long maxTxId) throws IOException {
-    long currentMax = get();
-    if (currentMax < maxTxId) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Setting maxTxId to " + maxTxId);
-      }
-      reset(maxTxId);
-    }
-  }
-
-  synchronized void reset(long maxTxId) throws IOException {
-    try {
-      MaxTxIdProto.Builder builder = MaxTxIdProto.newBuilder().setTxId(maxTxId);
-
-      byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
-      if (currentStat != null) {
-        currentStat = zkc.setData(path, data, currentStat
-            .getVersion());
-      } else {
-        zkc.create(path, data, Ids.OPEN_ACL_UNSAFE,
-                   CreateMode.PERSISTENT);
-      }
-    } catch (KeeperException e) {
-      throw new IOException("Error writing max tx id", e);
-    } catch (InterruptedException e) {
-      throw new IOException("Interrupted while writing max tx id", e);
-    }
-  }
-
-  synchronized long get() throws IOException {
-    try {
-      currentStat = zkc.exists(path, false);
-      if (currentStat == null) {
-        return 0;
-      } else {
-
-        byte[] bytes = zkc.getData(path, false, currentStat);
-
-        MaxTxIdProto.Builder builder = MaxTxIdProto.newBuilder();
-        TextFormat.merge(new String(bytes, UTF_8), builder);
-        if (!builder.isInitialized()) {
-          throw new IOException("Invalid/Incomplete data in znode");
-        }
-
-        return builder.build().getTxId();
-      }
-    } catch (KeeperException e) {
-      throw new IOException("Error reading the max tx id from zk", e);
-    } catch (InterruptedException ie) {
-      throw new IOException("Interrupted while reading thr max tx id", ie);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31195488/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto
deleted file mode 100644
index 15fa479..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// This file contains protocol buffers that are used by bkjournal
-// mostly for storing data in zookeeper
-
-option java_package = "org.apache.hadoop.contrib.bkjournal";
-option java_outer_classname = "BKJournalProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
-
-import "hdfs.proto";
-import "HdfsServer.proto";
-
-message VersionProto {
-  required int32 layoutVersion = 1;
-  optional NamespaceInfoProto namespaceInfo = 2;
-}
-
-message EditLogLedgerProto {
-  required int32 dataLayoutVersion = 1;
-  required int64 ledgerId = 2;
-  required int64 firstTxId = 3;
-  optional int64 lastTxId = 4;
-}
-
-message MaxTxIdProto {
-  required int64 txId = 1;
-}
-
-message CurrentInprogressProto {
-  required string path = 1;
-  optional string hostname = 2;
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/52] [abbrv] hadoop git commit: HADOOP-13627. Have an explicit KerberosAuthException for UGI to throw, text from public constants. Contributed by Xiao Chen.

Posted by cn...@apache.org.
HADOOP-13627. Have an explicit KerberosAuthException for UGI to throw, text from public constants. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e853be6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e853be6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e853be6

Branch: refs/heads/HADOOP-13037
Commit: 2e853be6577a5b98fd860e6d64f89ca6d160514a
Parents: 3565c9a
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Oct 7 13:46:27 2016 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Oct 7 13:46:27 2016 -0700

----------------------------------------------------------------------
 .../hadoop/security/KerberosAuthException.java  | 118 +++++++++++++++++++
 .../hadoop/security/UGIExceptionMessages.java   |  46 ++++++++
 .../hadoop/security/UserGroupInformation.java   |  74 +++++++-----
 3 files changed, 209 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e853be6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
new file mode 100644
index 0000000..811c7c9
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import static org.apache.hadoop.security.UGIExceptionMessages.*;
+
+import java.io.IOException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Thrown when {@link UserGroupInformation} failed with an unrecoverable error,
+ * such as failure in kerberos login/logout, invalid subject etc.
+ *
+ * Caller should not retry when catching this exception.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class KerberosAuthException extends IOException {
+  static final long serialVersionUID = 31L;
+
+  private String user;
+  private String principal;
+  private String keytabFile;
+  private String ticketCacheFile;
+  private String initialMessage;
+
+  public KerberosAuthException(String msg) {
+    super(msg);
+  }
+
+  public KerberosAuthException(Throwable cause) {
+    super(cause);
+  }
+
+  public KerberosAuthException(String initialMsg, Throwable cause) {
+    this(cause);
+    initialMessage = initialMsg;
+  }
+
+  public void setUser(final String u) {
+    user = u;
+  }
+
+  public void setPrincipal(final String p) {
+    principal = p;
+  }
+
+  public void setKeytabFile(final String k) {
+    keytabFile = k;
+  }
+
+  public void setTicketCacheFile(final String t) {
+    ticketCacheFile = t;
+  }
+
+  /** @return The initial message, or null if not set. */
+  public String getInitialMessage() {
+    return initialMessage;
+  }
+
+  /** @return The keytab file path, or null if not set. */
+  public String getKeytabFile() {
+    return keytabFile;
+  }
+
+  /** @return The principal, or null if not set. */
+  public String getPrincipal() {
+    return principal;
+  }
+
+  /** @return The ticket cache file path, or null if not set. */
+  public String getTicketCacheFile() {
+    return ticketCacheFile;
+  }
+
+  /** @return The user, or null if not set. */
+  public String getUser() {
+    return user;
+  }
+
+  @Override
+  public String getMessage() {
+    final StringBuilder sb = new StringBuilder();
+    if (initialMessage != null) {
+      sb.append(initialMessage);
+    }
+    if (user != null) {
+      sb.append(FOR_USER + user);
+    }
+    if (principal != null) {
+      sb.append(FOR_PRINCIPAL + principal);
+    }
+    if (keytabFile != null) {
+      sb.append(FROM_KEYTAB + keytabFile);
+    }
+    if (ticketCacheFile != null) {
+      sb.append(USING_TICKET_CACHE_FILE+ ticketCacheFile);
+    }
+    sb.append(" " + super.getMessage());
+    return sb.toString();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e853be6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UGIExceptionMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UGIExceptionMessages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UGIExceptionMessages.java
new file mode 100644
index 0000000..c4d30e5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UGIExceptionMessages.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+/**
+ * Standard strings to use in exception messages
+ * in {@link KerberosAuthException} when throwing.
+ */
+final class UGIExceptionMessages {
+
+  public static final String FAILURE_TO_LOGIN = "failure to login:";
+  public static final String FOR_USER = " for user: ";
+  public static final String FOR_PRINCIPAL = " for principal: ";
+  public static final String FROM_KEYTAB = " from keytab ";
+  public static final String LOGIN_FAILURE = "Login failure";
+  public static final String LOGOUT_FAILURE = "Logout failure";
+  public static final String MUST_FIRST_LOGIN =
+      "login must be done first";
+  public static final String MUST_FIRST_LOGIN_FROM_KEYTAB =
+      "loginUserFromKeyTab must be done first";
+  public static final String SUBJECT_MUST_CONTAIN_PRINCIPAL =
+      "Provided Subject must contain a KerberosPrincipal";
+  public static final String SUBJECT_MUST_NOT_BE_NULL =
+      "Subject must not be null";
+  public static final String USING_TICKET_CACHE_FILE =
+      " using ticket cache file: ";
+
+  //checkstyle: Utility classes should not have a public or default constructor.
+  private UGIExceptionMessages() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e853be6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 637e3fa..329859d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_MET
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES;
+import static org.apache.hadoop.security.UGIExceptionMessages.*;
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 import java.io.File;
@@ -755,8 +756,11 @@ public class UserGroupInformation {
       ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
       return ugi;
     } catch (LoginException le) {
-      throw new IOException("failure to login using ticket cache file " +
-          ticketCache, le);
+      KerberosAuthException kae =
+          new KerberosAuthException(FAILURE_TO_LOGIN, le);
+      kae.setUser(user);
+      kae.setTicketCacheFile(ticketCache);
+      throw kae;
     }
   }
 
@@ -765,16 +769,17 @@ public class UserGroupInformation {
    *
    * @param subject             The KerberosPrincipal to use in UGI
    *
-   * @throws IOException        if the kerberos login fails
+   * @throws IOException
+   * @throws KerberosAuthException if the kerberos login fails
    */
   public static UserGroupInformation getUGIFromSubject(Subject subject)
       throws IOException {
     if (subject == null) {
-      throw new IOException("Subject must not be null");
+      throw new KerberosAuthException(SUBJECT_MUST_NOT_BE_NULL);
     }
 
     if (subject.getPrincipals(KerberosPrincipal.class).isEmpty()) {
-      throw new IOException("Provided Subject must contain a KerberosPrincipal");
+      throw new KerberosAuthException(SUBJECT_MUST_CONTAIN_PRINCIPAL);
     }
 
     KerberosPrincipal principal =
@@ -894,7 +899,7 @@ public class UserGroupInformation {
       loginUser.spawnAutoRenewalThreadForUserCreds();
     } catch (LoginException le) {
       LOG.debug("failure to login", le);
-      throw new IOException("failure to login: " + le, le);
+      throw new KerberosAuthException(FAILURE_TO_LOGIN, le);
     }
     if (LOG.isDebugEnabled()) {
       LOG.debug("UGI loginUser:"+loginUser);
@@ -1001,7 +1006,8 @@ public class UserGroupInformation {
    * file and logs them in. They become the currently logged-in user.
    * @param user the principal name to load from the keytab
    * @param path the path to the keytab file
-   * @throws IOException if the keytab file can't be read
+   * @throws IOException
+   * @throws KerberosAuthException if it's a kerberos login exception.
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
@@ -1030,8 +1036,10 @@ public class UserGroupInformation {
       if (start > 0) {
         metrics.loginFailure.add(Time.now() - start);
       }
-      throw new IOException("Login failure for " + user + " from keytab " + 
-                            path+ ": " + le, le);
+      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
+      kae.setUser(user);
+      kae.setKeytabFile(path);
+      throw kae;
     }
     LOG.info("Login successful for user " + keytabPrincipal
         + " using keytab file " + keytabFile);
@@ -1042,8 +1050,9 @@ public class UserGroupInformation {
    * This method assumes that the user logged in by calling
    * {@link #loginUserFromKeytab(String, String)}.
    *
-   * @throws IOException if a failure occurred in logout, or if the user did
-   * not log in by invoking loginUserFromKeyTab() before.
+   * @throws IOException
+   * @throws KerberosAuthException if a failure occurred in logout,
+   * or if the user did not log in by invoking loginUserFromKeyTab() before.
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
@@ -1054,7 +1063,7 @@ public class UserGroupInformation {
     }
     LoginContext login = getLogin();
     if (login == null || keytabFile == null) {
-      throw new IOException("loginUserFromKeytab must be done first");
+      throw new KerberosAuthException(MUST_FIRST_LOGIN_FROM_KEYTAB);
     }
 
     try {
@@ -1065,9 +1074,10 @@ public class UserGroupInformation {
         login.logout();
       }
     } catch (LoginException le) {
-      throw new IOException("Logout failure for " + user + " from keytab " +
-          keytabFile + ": " + le,
-          le);
+      KerberosAuthException kae = new KerberosAuthException(LOGOUT_FAILURE, le);
+      kae.setUser(user.toString());
+      kae.setKeytabFile(keytabFile);
+      throw kae;
     }
 
     LOG.info("Logout successful for user " + keytabPrincipal
@@ -1078,6 +1088,7 @@ public class UserGroupInformation {
    * Re-login a user from keytab if TGT is expired or is close to expiry.
    * 
    * @throws IOException
+   * @throws KerberosAuthException if it's a kerberos login exception.
    */
   public synchronized void checkTGTAndReloginFromKeytab() throws IOException {
     if (!isSecurityEnabled()
@@ -1099,12 +1110,12 @@ public class UserGroupInformation {
    * happened already.
    * The Subject field of this UserGroupInformation object is updated to have
    * the new credentials.
-   * @throws IOException on a failure
+   * @throws IOException
+   * @throws KerberosAuthException on a failure
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized void reloginFromKeytab()
-  throws IOException {
+  public synchronized void reloginFromKeytab() throws IOException {
     if (!isSecurityEnabled() ||
          user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS ||
          !isKeytab)
@@ -1124,7 +1135,7 @@ public class UserGroupInformation {
     
     LoginContext login = getLogin();
     if (login == null || keytabFile == null) {
-      throw new IOException("loginUserFromKeyTab must be done first");
+      throw new KerberosAuthException(MUST_FIRST_LOGIN_FROM_KEYTAB);
     }
     
     long start = 0;
@@ -1156,8 +1167,10 @@ public class UserGroupInformation {
       if (start > 0) {
         metrics.loginFailure.add(Time.now() - start);
       }
-      throw new IOException("Login failure for " + keytabPrincipal + 
-          " from keytab " + keytabFile + ": " + le, le);
+      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
+      kae.setPrincipal(keytabPrincipal);
+      kae.setKeytabFile(keytabFile);
+      throw kae;
     } 
   }
 
@@ -1166,19 +1179,19 @@ public class UserGroupInformation {
    * method assumes that login had happened already.
    * The Subject field of this UserGroupInformation object is updated to have
    * the new credentials.
-   * @throws IOException on a failure
+   * @throws IOException
+   * @throws KerberosAuthException on a failure
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized void reloginFromTicketCache()
-  throws IOException {
+  public synchronized void reloginFromTicketCache() throws IOException {
     if (!isSecurityEnabled() || 
         user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS ||
         !isKrbTkt)
       return;
     LoginContext login = getLogin();
     if (login == null) {
-      throw new IOException("login must be done first");
+      throw new KerberosAuthException(MUST_FIRST_LOGIN);
     }
     long now = Time.now();
     if (!hasSufficientTimeElapsed(now)) {
@@ -1205,8 +1218,9 @@ public class UserGroupInformation {
       login.login();
       setLogin(login);
     } catch (LoginException le) {
-      throw new IOException("Login failure for " + getUserName() + ": " + le,
-          le);
+      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
+      kae.setUser(getUserName());
+      throw kae;
     } 
   }
 
@@ -1252,8 +1266,10 @@ public class UserGroupInformation {
       if (start > 0) {
         metrics.loginFailure.add(Time.now() - start);
       }
-      throw new IOException("Login failure for " + user + " from keytab " + 
-                            path + ": " + le, le);
+      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
+      kae.setUser(user);
+      kae.setKeytabFile(path);
+      throw kae;
     } finally {
       if(oldKeytabFile != null) keytabFile = oldKeytabFile;
       if(oldKeytabPrincipal != null) keytabPrincipal = oldKeytabPrincipal;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/52] [abbrv] hadoop git commit: HADOOP-13684. Snappy may complain Hadoop is built without snappy if libhadoop is not found. Contributed by Wei-Chiu Chuang.

Posted by cn...@apache.org.
HADOOP-13684. Snappy may complain Hadoop is built without snappy if libhadoop is not found. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b32b142
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b32b142
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b32b142

Branch: refs/heads/HADOOP-13037
Commit: 4b32b1420d98ea23460d05ae94f2698109b3d6f7
Parents: 2fb392a
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Tue Oct 11 13:21:33 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Tue Oct 11 13:21:33 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/io/compress/SnappyCodec.java  | 30 +++++++++++---------
 1 file changed, 16 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b32b142/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 2a9c5d0..20a4cd6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -60,20 +60,22 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
    * Are the native snappy libraries loaded & initialized?
    */
   public static void checkNativeCodeLoaded() {
-      if (!NativeCodeLoader.isNativeCodeLoaded() ||
-          !NativeCodeLoader.buildSupportsSnappy()) {
-        throw new RuntimeException("native snappy library not available: " +
-            "this version of libhadoop was built without " +
-            "snappy support.");
-      }
-      if (!SnappyCompressor.isNativeCodeLoaded()) {
-        throw new RuntimeException("native snappy library not available: " +
-            "SnappyCompressor has not been loaded.");
-      }
-      if (!SnappyDecompressor.isNativeCodeLoaded()) {
-        throw new RuntimeException("native snappy library not available: " +
-            "SnappyDecompressor has not been loaded.");
-      }
+    if (!NativeCodeLoader.buildSupportsSnappy()) {
+      throw new RuntimeException("native snappy library not available: " +
+          "this version of libhadoop was built without " +
+          "snappy support.");
+    }
+    if (!NativeCodeLoader.isNativeCodeLoaded()) {
+      throw new RuntimeException("Failed to load libhadoop.");
+    }
+    if (!SnappyCompressor.isNativeCodeLoaded()) {
+      throw new RuntimeException("native snappy library not available: " +
+          "SnappyCompressor has not been loaded.");
+    }
+    if (!SnappyDecompressor.isNativeCodeLoaded()) {
+      throw new RuntimeException("native snappy library not available: " +
+          "SnappyDecompressor has not been loaded.");
+    }
   }
   
   public static boolean isNativeCodeLoaded() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/52] [abbrv] hadoop git commit: HDFS-10903. Replace config key literal strings with config key names II: hadoop hdfs. Contributed by Chen Liang

Posted by cn...@apache.org.
HDFS-10903. Replace config key literal strings with config key names II: hadoop hdfs. Contributed by Chen Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c9a0106
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c9a0106
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c9a0106

Branch: refs/heads/HADOOP-13037
Commit: 3c9a01062e9097c2ed1db75318482543db2e382f
Parents: 61f0490
Author: Mingliang Liu <li...@apache.org>
Authored: Tue Oct 11 16:29:30 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue Oct 11 16:29:30 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/http/server/FSOperations.java | 9 +++++++--
 .../hadoop/lib/service/hadoop/FileSystemAccessService.java  | 6 ++++--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +++
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml         | 8 ++++++++
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java   | 3 ++-
 .../hdfs/server/blockmanagement/TestBlockTokenWithDFS.java  | 3 ++-
 6 files changed, 26 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 46948f9..001bc92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -48,6 +48,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
+
 /**
  * FileSystem operation executors used by {@link HttpFSServer}.
  */
@@ -462,7 +465,8 @@ public class FSOperations {
         blockSize = fs.getDefaultBlockSize(path);
       }
       FsPermission fsPermission = new FsPermission(permission);
-      int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
+      int bufferSize = fs.getConf().getInt(HTTPFS_BUFFER_SIZE_KEY,
+          HTTP_BUFFER_SIZE_DEFAULT);
       OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
       IOUtils.copyBytes(is, os, bufferSize, true);
       os.close();
@@ -752,7 +756,8 @@ public class FSOperations {
      */
     @Override
     public InputStream execute(FileSystem fs) throws IOException {
-      int bufferSize = HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096);
+      int bufferSize = HttpFSServerWebApp.get().getConfig().getInt(
+          HTTPFS_BUFFER_SIZE_KEY, HTTP_BUFFER_SIZE_DEFAULT);
       return fs.open(path, bufferSize);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
index 0b767be..61d3b45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
@@ -50,6 +50,8 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+
 @InterfaceAudience.Private
 public class FileSystemAccessService extends BaseService implements FileSystemAccess {
   private static final Logger LOG = LoggerFactory.getLogger(FileSystemAccessService.class);
@@ -159,7 +161,7 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
         throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL);
       }
       Configuration conf = new Configuration();
-      conf.set("hadoop.security.authentication", "kerberos");
+      conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
       UserGroupInformation.setConfiguration(conf);
       try {
         UserGroupInformation.loginUserFromKeytab(principal, keytab);
@@ -169,7 +171,7 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
       LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
     } else if (security.equals("simple")) {
       Configuration conf = new Configuration();
-      conf.set("hadoop.security.authentication", "simple");
+      conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
       UserGroupInformation.setConfiguration(conf);
       LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index df45e2a..18209ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -992,6 +992,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.disk.balancer.plan.threshold.percent";
   public static final int DFS_DISK_BALANCER_PLAN_THRESHOLD_DEFAULT = 10;
 
+  public static final String HTTPFS_BUFFER_SIZE_KEY =
+      "httpfs.buffer.size";
+  public static final int HTTP_BUFFER_SIZE_DEFAULT = 4096;
 
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 672b597..db4035d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4273,4 +4273,12 @@
       consecutive warnings within this interval.</description>
   </property>
 
+  <property>
+    <name>httpfs.buffer.size</name>
+    <value>4096</value>
+    <description>
+      The size buffer to be used when creating or opening httpfs filesystem IO stream.
+    </description>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
index 4147851..ae0f0c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -89,7 +90,7 @@ public class TestFileAppend4 {
     
     // handle failures in the DFSClient pipeline quickly
     // (for cluster.shutdown(); fs.close() idiom)
-    conf.setInt("ipc.client.connect.max.retries", 1);
+    conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
   }
   
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 9374ae8..5a8a39a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -218,7 +219,7 @@ public class TestBlockTokenWithDFS {
     conf.setInt("io.bytes.per.checksum", BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
-    conf.setInt("ipc.client.connect.max.retries", 0);
+    conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     return conf;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/52] [abbrv] hadoop git commit: HDFS-10968. BlockManager#isInNewRack should consider decommissioning nodes. Contributed by Jing Zhao.

Posted by cn...@apache.org.
HDFS-10968. BlockManager#isInNewRack should consider decommissioning nodes. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d106213
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d106213
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d106213

Branch: refs/heads/HADOOP-13037
Commit: 4d106213c0f4835b723c9a50bd8080a9017122d7
Parents: 6a38d11
Author: Jing Zhao <ji...@apache.org>
Authored: Fri Oct 7 22:44:54 2016 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Fri Oct 7 22:44:54 2016 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |   6 +-
 ...constructStripedBlocksWithRackAwareness.java | 158 +++++++++++++++----
 2 files changed, 130 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d106213/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8b74609..7949439 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1781,8 +1781,12 @@ public class BlockManager implements BlockStatsMXBean {
 
   private boolean isInNewRack(DatanodeDescriptor[] srcs,
       DatanodeDescriptor target) {
+    LOG.debug("check if target {} increases racks, srcs={}", target,
+        Arrays.asList(srcs));
     for (DatanodeDescriptor src : srcs) {
-      if (src.getNetworkLocation().equals(target.getNetworkLocation())) {
+      if (!src.isDecommissionInProgress() &&
+          src.getNetworkLocation().equals(target.getNetworkLocation())) {
+        LOG.debug("the target {} is in the same rack with src {}", target, src);
         return false;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d106213/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index 152e153..3bc13a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -35,12 +35,14 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
-import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -58,57 +60,44 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     GenericTestUtils.setLogLevel(BlockManager.LOG, Level.ALL);
   }
 
-  private static final String[] hosts = getHosts();
-  private static final String[] racks = getRacks();
+  private static final String[] hosts =
+      getHosts(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1);
+  private static final String[] racks =
+      getRacks(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1, NUM_DATA_BLOCKS);
 
-  private static String[] getHosts() {
-    String[] hosts = new String[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1];
+  private static String[] getHosts(int numHosts) {
+    String[] hosts = new String[numHosts];
     for (int i = 0; i < hosts.length; i++) {
       hosts[i] = "host" + (i + 1);
     }
     return hosts;
   }
 
-  private static String[] getRacks() {
-    String[] racks = new String[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1];
-    int numHostEachRack = (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS - 1) /
-        (NUM_DATA_BLOCKS - 1) + 1;
+  private static String[] getRacks(int numHosts, int numRacks) {
+    String[] racks = new String[numHosts];
+    int numHostEachRack = numHosts / numRacks;
+    int residue = numHosts % numRacks;
     int j = 0;
-    // we have NUM_DATA_BLOCKS racks
-    for (int i = 1; i <= NUM_DATA_BLOCKS; i++) {
-      if (j == racks.length - 1) {
-        assert i == NUM_DATA_BLOCKS;
+    for (int i = 1; i <= numRacks; i++) {
+      int limit = i <= residue ? numHostEachRack + 1 : numHostEachRack;
+      for (int k = 0; k < limit; k++) {
         racks[j++] = "/r" + i;
-      } else {
-        for (int k = 0; k < numHostEachRack && j < racks.length - 1; k++) {
-          racks[j++] = "/r" + i;
-        }
       }
     }
+    assert j == numHosts;
     return racks;
   }
 
   private MiniDFSCluster cluster;
+  private static final HdfsConfiguration conf = new HdfsConfiguration();
   private DistributedFileSystem fs;
-  private FSNamesystem fsn;
-  private BlockManager bm;
 
-  @Before
-  public void setup() throws Exception {
-    final HdfsConfiguration conf = new HdfsConfiguration();
+  @BeforeClass
+  public static void setup() throws Exception {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
         false);
-
-    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
-        .numDataNodes(hosts.length).build();
-    cluster.waitActive();
-
-    fsn = cluster.getNamesystem();
-    bm = fsn.getBlockManager();
-
-    fs = cluster.getFileSystem();
-    fs.setErasureCodingPolicy(new Path("/"), null);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
   }
 
   @After
@@ -132,6 +121,15 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     return dnProp;
   }
 
+  private DataNode getDataNode(String host) {
+    for (DataNode dn : cluster.getDataNodes()) {
+      if (dn.getDatanodeId().getHostName().equals(host)) {
+        return dn;
+      }
+    }
+    return null;
+  }
+
   /**
    * When there are all the internal blocks available but they are not placed on
    * enough racks, NameNode should avoid normal decoding reconstruction but copy
@@ -143,9 +141,19 @@ public class TestReconstructStripedBlocksWithRackAwareness {
    */
   @Test
   public void testReconstructForNotEnoughRacks() throws Exception {
+    LOG.info("cluster hosts: {}, racks: {}", Arrays.asList(hosts),
+        Arrays.asList(racks));
+
+    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
+        .numDataNodes(hosts.length).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    fs.setErasureCodingPolicy(new Path("/"), null);
+    FSNamesystem fsn = cluster.getNamesystem();
+    BlockManager bm = fsn.getBlockManager();
+
     MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
         hosts[hosts.length - 1]);
-
     final Path file = new Path("/foo");
     // the file's block is in 9 dn but 5 racks
     DFSTestUtil.createFile(fs, file,
@@ -206,6 +214,12 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 
   @Test
   public void testChooseExcessReplicasToDelete() throws Exception {
+    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
+        .numDataNodes(hosts.length).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    fs.setErasureCodingPolicy(new Path("/"), null);
+
     MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
         hosts[hosts.length - 1]);
 
@@ -242,4 +256,82 @@ public class TestReconstructStripedBlocksWithRackAwareness {
       Assert.assertFalse(dn.getHostName().equals("host1"));
     }
   }
+
+  /**
+   * In case we have 10 internal blocks on 5 racks, where 9 of blocks are live
+   * and 1 decommissioning, make sure the reconstruction happens correctly.
+   */
+  @Test
+  public void testReconstructionWithDecommission() throws Exception {
+    final String[] racks = getRacks(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2,
+        NUM_DATA_BLOCKS);
+    final String[] hosts = getHosts(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2);
+    // we now have 11 hosts on 6 racks with distribution: 2-2-2-2-2-1
+    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
+        .numDataNodes(hosts.length).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    fs.setErasureCodingPolicy(new Path("/"), null);
+
+    final BlockManager bm = cluster.getNamesystem().getBlockManager();
+    final DatanodeManager dm = bm.getDatanodeManager();
+
+    // stop h9 and h10 and create a file with 6+3 internal blocks
+    MiniDFSCluster.DataNodeProperties h9 = stopDataNode(hosts[hosts.length - 3]);
+    MiniDFSCluster.DataNodeProperties h10 = stopDataNode(hosts[hosts.length - 2]);
+    final Path file = new Path("/foo");
+    DFSTestUtil.createFile(fs, file,
+        BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L);
+    final BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
+        .getINode(file.toString()).asFile().getLastBlock();
+
+    // bring h9 back
+    cluster.restartDataNode(h9);
+    cluster.waitActive();
+
+    // stop h11 so that the reconstruction happens
+    MiniDFSCluster.DataNodeProperties h11 = stopDataNode(hosts[hosts.length - 1]);
+    boolean recovered = bm.countNodes(blockInfo).liveReplicas() >=
+        NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
+    for (int i = 0; i < 10 & !recovered; i++) {
+      Thread.sleep(1000);
+      recovered = bm.countNodes(blockInfo).liveReplicas() >=
+          NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
+    }
+    Assert.assertTrue(recovered);
+
+    // mark h9 as decommissioning
+    DataNode datanode9 = getDataNode(hosts[hosts.length - 3]);
+    Assert.assertNotNull(datanode9);
+    final DatanodeDescriptor dn9 = dm.getDatanode(datanode9.getDatanodeId());
+    dn9.startDecommission();
+
+    // restart h10 and h11
+    cluster.restartDataNode(h10);
+    cluster.restartDataNode(h11);
+    cluster.waitActive();
+    DataNodeTestUtils.triggerBlockReport(getDataNode(hosts[hosts.length - 1]));
+
+    // start decommissioning h9
+    boolean satisfied = bm.isPlacementPolicySatisfied(blockInfo);
+    Assert.assertFalse(satisfied);
+    final DecommissionManager decomManager =
+        (DecommissionManager) Whitebox.getInternalState(dm, "decomManager");
+    cluster.getNamesystem().writeLock();
+    try {
+      dn9.stopDecommission();
+      decomManager.startDecommission(dn9);
+    } finally {
+      cluster.getNamesystem().writeUnlock();
+    }
+
+    // make sure the decommission finishes and the block in on 6 racks
+    boolean decommissioned = dn9.isDecommissioned();
+    for (int i = 0; i < 10 && !decommissioned; i++) {
+      Thread.sleep(1000);
+      decommissioned = dn9.isDecommissioned();
+    }
+    Assert.assertTrue(decommissioned);
+    Assert.assertTrue(bm.isPlacementPolicySatisfied(blockInfo));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/52] [abbrv] hadoop git commit: HDFS-10984. Expose nntop output as metrics. Contributed by Siddharth Wagle.

Posted by cn...@apache.org.
HDFS-10984. Expose nntop output as metrics. Contributed by Siddharth Wagle.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61f0490a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61f0490a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61f0490a

Branch: refs/heads/HADOOP-13037
Commit: 61f0490a73085bbaf6639d9234277e59dc1145db
Parents: dacd3ec
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Oct 11 15:55:02 2016 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Oct 11 15:55:02 2016 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSNamesystem.java      |  6 ++
 .../server/namenode/top/metrics/TopMetrics.java | 67 ++++++++++++++++++--
 .../server/namenode/metrics/TestTopMetrics.java | 63 ++++++++++++++++++
 3 files changed, 129 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2471dc8..b9b02ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -89,6 +89,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.monotonicNow;
+import static org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics.TOPMETRICS_METRICS_SOURCE_NAME;
 
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
@@ -989,6 +990,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     // Add audit logger to calculate top users
     if (topConf.isEnabled) {
       topMetrics = new TopMetrics(conf, topConf.nntopReportingPeriodsMs);
+      if (DefaultMetricsSystem.instance().getSource(
+          TOPMETRICS_METRICS_SOURCE_NAME) == null) {
+        DefaultMetricsSystem.instance().register(TOPMETRICS_METRICS_SOURCE_NAME,
+            "Top N operations by user", topMetrics);
+      }
       auditLoggers.add(new TopAuditLogger(topMetrics));
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index ab55392..2719c88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -17,24 +17,32 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.top.metrics;
 
-import java.net.InetAddress;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
 import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.net.InetAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
 import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
 
 /**
@@ -58,8 +66,11 @@ import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowMan
  * Thread-safe: relies on thread-safety of RollingWindowManager
  */
 @InterfaceAudience.Private
-public class TopMetrics {
+public class TopMetrics implements MetricsSource {
   public static final Logger LOG = LoggerFactory.getLogger(TopMetrics.class);
+  public static final String TOPMETRICS_METRICS_SOURCE_NAME =
+      "NNTopUserOpCounts";
+  private final boolean isMetricsSourceEnabled;
 
   private static void logConf(Configuration conf) {
     LOG.info("NNTop conf: " + DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY +
@@ -83,6 +94,8 @@ public class TopMetrics {
       rollingWindowManagers.put(reportingPeriods[i], new RollingWindowManager(
           conf, reportingPeriods[i]));
     }
+    isMetricsSourceEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
+        DFSConfigKeys.NNTOP_ENABLED_DEFAULT);
   }
 
   /**
@@ -128,4 +141,44 @@ public class TopMetrics {
           TopConf.ALL_CMDS, userName, 1);
     }
   }
+
+  /**
+   * Flatten out the top window metrics into
+   * {@link org.apache.hadoop.metrics2.MetricsRecord}s for consumption by
+   * external metrics systems. Each metrics record added corresponds to the
+   * reporting period a.k.a window length of the configured rolling windows.
+   */
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    if (!isMetricsSourceEnabled) {
+      return;
+    }
+
+    for (final TopWindow window : getTopWindows()) {
+      MetricsRecordBuilder rb = collector.addRecord(buildOpRecordName(window))
+          .setContext("dfs");
+      for (final Op op: window.getOps()) {
+        rb.addCounter(buildOpTotalCountMetricsInfo(op), op.getTotalCount());
+        for (User user : op.getTopUsers()) {
+          rb.addCounter(buildOpRecordMetricsInfo(op, user), user.getCount());
+        }
+      }
+    }
+  }
+
+  private String buildOpRecordName(TopWindow window) {
+    return TOPMETRICS_METRICS_SOURCE_NAME + ".windowMs="
+      + window.getWindowLenMs();
+  }
+
+  private MetricsInfo buildOpTotalCountMetricsInfo(Op op) {
+    return Interns.info("op=" + StringUtils.deleteWhitespace(op.getOpType())
+      + ".TotalCount", "Total operation count");
+  }
+
+  private MetricsInfo buildOpRecordMetricsInfo(Op op, User user) {
+    return Interns.info("op=" + StringUtils.deleteWhitespace(op.getOpType())
+      + ".user=" + user.getUser()
+      + ".count", "Total operations performed by user");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java
new file mode 100644
index 0000000..4d3a4f0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.metrics;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
+import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.junit.Test;
+
+import static org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics.TOPMETRICS_METRICS_SOURCE_NAME;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Test for MetricsSource part of the {@link TopMetrics} impl.
+ */
+public class TestTopMetrics {
+  @Test
+  public void testPresence() {
+    Configuration conf = new Configuration();
+    TopConf topConf = new TopConf(conf);
+    TopMetrics topMetrics = new TopMetrics(conf,
+        topConf.nntopReportingPeriodsMs);
+    // Dummy command
+    topMetrics.report("test", "listStatus");
+    topMetrics.report("test", "listStatus");
+    topMetrics.report("test", "listStatus");
+
+    MetricsRecordBuilder rb = getMetrics(topMetrics);
+    MetricsCollector mc = rb.parent();
+
+    verify(mc).addRecord(TOPMETRICS_METRICS_SOURCE_NAME + ".windowMs=60000");
+    verify(mc).addRecord(TOPMETRICS_METRICS_SOURCE_NAME + ".windowMs=300000");
+    verify(mc).addRecord(TOPMETRICS_METRICS_SOURCE_NAME + ".windowMs=1500000");
+
+    verify(rb, times(3)).addCounter(Interns.info("op=listStatus.TotalCount",
+        "Total operation count"), 3L);
+    verify(rb, times(3)).addCounter(Interns.info("op=*.TotalCount",
+        "Total operation count"), 3L);
+
+    verify(rb, times(3)).addCounter(Interns.info("op=listStatus." +
+        "user=test.count", "Total operations performed by user"), 3L);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/52] [abbrv] hadoop git commit: HADOOP-13696. change hadoop-common dependency scope of jsch to provided. Contributed by Yuanbo Liu.

Posted by cn...@apache.org.
HADOOP-13696. change hadoop-common dependency scope of jsch to provided. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cef61d50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cef61d50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cef61d50

Branch: refs/heads/HADOOP-13037
Commit: cef61d505e289f074130cc3981c20f7692437cee
Parents: af50da3
Author: Steve Loughran <st...@apache.org>
Authored: Mon Oct 10 12:32:39 2016 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Oct 10 12:32:39 2016 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/pom.xml | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef61d50/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 54d1cdd..92582ae 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -235,6 +235,7 @@
     <dependency>
       <groupId>com.jcraft</groupId>
       <artifactId>jsch</artifactId>
+      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.curator</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/52] [abbrv] hadoop git commit: MAPREDUCE-6776. yarn.app.mapreduce.client.job.max-retries should have a more useful default (miklos.szegedi@cloudera.com via rkanter)

Posted by cn...@apache.org.
MAPREDUCE-6776. yarn.app.mapreduce.client.job.max-retries should have a more useful default (miklos.szegedi@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3f37e6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3f37e6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3f37e6f

Branch: refs/heads/HADOOP-13037
Commit: f3f37e6fb8172f6434e06eb9a137c0c155b3952e
Parents: 2e853be
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Oct 7 14:47:06 2016 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Oct 7 14:47:06 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/mapreduce/MRJobConfig.java    |  2 +-
 .../src/main/resources/mapred-default.xml       | 10 +++---
 .../apache/hadoop/mapred/JobClientUnitTest.java | 34 ++++++++++++++++----
 3 files changed, 34 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 5716404..1325b74 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -505,7 +505,7 @@ public interface MRJobConfig {
    */
   public static final String MR_CLIENT_JOB_MAX_RETRIES =
       MR_PREFIX + "client.job.max-retries";
-  public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 0;
+  public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 3;
 
   /**
    * How long to wait between jobclient retries on failure

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 73aaa7a..fe29212 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1505,12 +1505,12 @@
 
 <property>
   <name>yarn.app.mapreduce.client.job.max-retries</name>
-  <value>0</value>
+  <value>3</value>
   <description>The number of retries the client will make for getJob and
-  dependent calls.  The default is 0 as this is generally only needed for
-  non-HDFS DFS where additional, high level retries are required to avoid
-  spurious failures during the getJob call.  30 is a good value for
-  WASB</description>
+    dependent calls.
+    This is needed for non-HDFS DFS where additional, high level
+    retries are required to avoid spurious failures during the getJob call.
+    30 is a good value for WASB</description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
index 4895a5b..e02232d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
@@ -225,10 +225,10 @@ public class JobClientUnitTest {
 
     //To prevent the test from running for a very long time, lower the retry
     JobConf conf = new JobConf();
-    conf.set(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, "3");
+    conf.setInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, 2);
 
     TestJobClientGetJob client = new TestJobClientGetJob(conf);
-    JobID id = new JobID("ajob",1);
+    JobID id = new JobID("ajob", 1);
     RunningJob rj = mock(RunningJob.class);
     client.setRunningJob(rj);
 
@@ -236,13 +236,35 @@ public class JobClientUnitTest {
     assertNotNull(client.getJob(id));
     assertEquals(client.getLastGetJobRetriesCounter(), 0);
 
-    //3 retry
+    //2 retries
+    client.setGetJobRetries(2);
+    assertNotNull(client.getJob(id));
+    assertEquals(client.getLastGetJobRetriesCounter(), 2);
+
+    //beyond yarn.app.mapreduce.client.job.max-retries, will get null
     client.setGetJobRetries(3);
+    assertNull(client.getJob(id));
+  }
+
+  @Test
+  public void testGetJobRetryDefault() throws Exception {
+
+    //To prevent the test from running for a very long time, lower the retry
+    JobConf conf = new JobConf();
+
+    TestJobClientGetJob client = new TestJobClientGetJob(conf);
+    JobID id = new JobID("ajob", 1);
+    RunningJob rj = mock(RunningJob.class);
+    client.setRunningJob(rj);
+
+    //3 retries (default)
+    client.setGetJobRetries(MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES);
     assertNotNull(client.getJob(id));
-    assertEquals(client.getLastGetJobRetriesCounter(), 3);
+    assertEquals(client.getLastGetJobRetriesCounter(),
+        MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES);
 
-    //beyond MAPREDUCE_JOBCLIENT_GETJOB_MAX_RETRY_KEY, will get null
-    client.setGetJobRetries(5);
+    //beyond yarn.app.mapreduce.client.job.max-retries, will get null
+    client.setGetJobRetries(MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES + 1);
     assertNull(client.getJob(id));
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/52] [abbrv] hadoop git commit: HADOOP-13692. hadoop-aws should declare explicit dependency on Jackson 2 jars to prevent classpath conflicts. Contributed by Chris Nauroth.

Posted by cn...@apache.org.
HADOOP-13692. hadoop-aws should declare explicit dependency on Jackson 2 jars to prevent classpath conflicts. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69620f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69620f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69620f95

Branch: refs/heads/HADOOP-13037
Commit: 69620f955997250d1b543d86d4907ee50218152a
Parents: 3059b25
Author: Chris Nauroth <cn...@apache.org>
Authored: Fri Oct 7 11:41:19 2016 -0700
Committer: Chris Nauroth <cn...@apache.org>
Committed: Fri Oct 7 11:41:19 2016 -0700

----------------------------------------------------------------------
 hadoop-tools/hadoop-aws/pom.xml | 12 ++++++++++++
 1 file changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69620f95/hadoop-tools/hadoop-aws/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 49b0379..1c1bb02 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -286,6 +286,18 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-annotations</artifactId>
+    </dependency>
+    <dependency>
       <groupId>joda-time</groupId>
       <artifactId>joda-time</artifactId>
     </dependency>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/52] [abbrv] hadoop git commit: HADOOP-13323. Downgrade stack trace on FS load from Warn to debug. Contributed by Steve Loughran.

Posted by cn...@apache.org.
HADOOP-13323. Downgrade stack trace on FS load from Warn to debug. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d46c3f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d46c3f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d46c3f6

Branch: refs/heads/HADOOP-13037
Commit: 2d46c3f6b7d55b6a2f124d07fe26d37359615df4
Parents: 2cc841f
Author: Chris Nauroth <cn...@apache.org>
Authored: Thu Oct 6 10:57:01 2016 -0700
Committer: Chris Nauroth <cn...@apache.org>
Committed: Thu Oct 6 10:57:01 2016 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/fs/FileSystem.java  | 10 +++++++++-
 .../apache/hadoop/fs/TestFileSystemInitialization.java  | 12 ++++--------
 2 files changed, 13 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d46c3f6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index c36598f..cc062c4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -2858,7 +2858,15 @@ public abstract class FileSystem extends Configured implements Closeable {
                   ClassUtil.findContainingJar(fs.getClass()), e);
             }
           } catch (ServiceConfigurationError ee) {
-            LOG.warn("Cannot load filesystem", ee);
+            LOG.warn("Cannot load filesystem: " + ee);
+            Throwable cause = ee.getCause();
+            // print all the nested exception messages
+            while (cause != null) {
+              LOG.warn(cause.toString());
+              cause = cause.getCause();
+            }
+            // and at debug: the full stack
+            LOG.debug("Stack Trace", ee);
           }
         }
         FILE_SYSTEMS_LOADED = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d46c3f6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
index 18e8b01..4d627a5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
@@ -47,16 +47,12 @@ public class TestFileSystemInitialization {
 
   @Test
   public void testMissingLibraries() {
-    boolean catched = false;
     try {
       Configuration conf = new Configuration();
-      FileSystem.getFileSystemClass("s3a", conf);
-    } catch (Exception e) {
-      catched = true;
-    } catch (ServiceConfigurationError e) {
-      // S3A shouldn't find AWS SDK and fail
-      catched = true;
+      Class<? extends FileSystem> fs = FileSystem.getFileSystemClass("s3a",
+          conf);
+      fail("Expected an exception, got a filesystem: " + fs);
+    } catch (Exception | ServiceConfigurationError expected) {
     }
-    assertTrue(catched);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/52] [abbrv] hadoop git commit: HADOOP-12611. TestZKSignerSecretProvider#testMultipleInit occasionally fail (ebadger via rkanter)

Posted by cn...@apache.org.
HADOOP-12611. TestZKSignerSecretProvider#testMultipleInit occasionally fail (ebadger via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c183b9de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c183b9de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c183b9de

Branch: refs/heads/HADOOP-13037
Commit: c183b9de8d072a35dcde96a20b1550981f886e86
Parents: 459a483
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Oct 7 09:33:24 2016 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Oct 7 09:33:31 2016 -0700

----------------------------------------------------------------------
 .../util/RolloverSignerSecretProvider.java      |   2 +-
 .../util/TestZKSignerSecretProvider.java        | 221 +++++++++----------
 2 files changed, 100 insertions(+), 123 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c183b9de/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
index fda5572..66b2fde 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
 public abstract class RolloverSignerSecretProvider
     extends SignerSecretProvider {
 
-  private static Logger LOG = LoggerFactory.getLogger(
+  static Logger LOG = LoggerFactory.getLogger(
     RolloverSignerSecretProvider.class);
   /**
    * Stores the currently valid secrets.  The current secret is the 0th element

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c183b9de/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
index 8211314..5e640bb 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
@@ -17,7 +17,12 @@ import java.util.Arrays;
 import java.util.Properties;
 import java.util.Random;
 import javax.servlet.ServletContext;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.curator.test.TestingServer;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -25,7 +30,6 @@ import org.junit.Test;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -34,9 +38,14 @@ public class TestZKSignerSecretProvider {
   private TestingServer zkServer;
 
   // rollover every 2 sec
-  private final int timeout = 4000;
+  private final int timeout = 100;
   private final long rolloverFrequency = timeout / 2;
 
+  static final Log LOG = LogFactory.getLog(TestZKSignerSecretProvider.class);
+  {
+    LogManager.getLogger( RolloverSignerSecretProvider.LOG.getName() ).setLevel(Level.DEBUG);
+  }
+
   @Before
   public void setup() throws Exception {
     zkServer = new TestingServer();
@@ -60,8 +69,8 @@ public class TestZKSignerSecretProvider {
     byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
     byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
     byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
-    ZKSignerSecretProvider secretProvider =
-        spy(new ZKSignerSecretProvider(seed));
+    MockZKSignerSecretProvider secretProvider =
+        spy(new MockZKSignerSecretProvider(seed));
     Properties config = new Properties();
     config.setProperty(
         ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
@@ -77,7 +86,8 @@ public class TestZKSignerSecretProvider {
       Assert.assertEquals(2, allSecrets.length);
       Assert.assertArrayEquals(secret1, allSecrets[0]);
       Assert.assertNull(allSecrets[1]);
-      verify(secretProvider, timeout(timeout).times(1)).rollSecret();
+      verify(secretProvider, timeout(timeout).atLeastOnce()).rollSecret();
+      secretProvider.realRollSecret();
 
       currentSecret = secretProvider.getCurrentSecret();
       allSecrets = secretProvider.getAllSecrets();
@@ -85,7 +95,8 @@ public class TestZKSignerSecretProvider {
       Assert.assertEquals(2, allSecrets.length);
       Assert.assertArrayEquals(secret2, allSecrets[0]);
       Assert.assertArrayEquals(secret1, allSecrets[1]);
-      verify(secretProvider, timeout(timeout).times(2)).rollSecret();
+      verify(secretProvider, timeout(timeout).atLeast(2)).rollSecret();
+      secretProvider.realRollSecret();
 
       currentSecret = secretProvider.getCurrentSecret();
       allSecrets = secretProvider.getAllSecrets();
@@ -93,128 +104,70 @@ public class TestZKSignerSecretProvider {
       Assert.assertEquals(2, allSecrets.length);
       Assert.assertArrayEquals(secret3, allSecrets[0]);
       Assert.assertArrayEquals(secret2, allSecrets[1]);
-      verify(secretProvider, timeout(timeout).times(3)).rollSecret();
+      verify(secretProvider, timeout(timeout).atLeast(3)).rollSecret();
+      secretProvider.realRollSecret();
     } finally {
       secretProvider.destroy();
     }
   }
 
-  @Test
-  public void testMultipleInit() throws Exception {
-    // use the same seed so we can predict the RNG
-    long seedA = System.currentTimeMillis();
-    Random rand = new Random(seedA);
-    byte[] secretA2 = Long.toString(rand.nextLong()).getBytes();
-    byte[] secretA1 = Long.toString(rand.nextLong()).getBytes();
-    // use the same seed so we can predict the RNG
-    long seedB = System.currentTimeMillis() + rand.nextLong();
-    rand = new Random(seedB);
-    byte[] secretB2 = Long.toString(rand.nextLong()).getBytes();
-    byte[] secretB1 = Long.toString(rand.nextLong()).getBytes();
-    // use the same seed so we can predict the RNG
-    long seedC = System.currentTimeMillis() + rand.nextLong();
-    rand = new Random(seedC);
-    byte[] secretC2 = Long.toString(rand.nextLong()).getBytes();
-    byte[] secretC1 = Long.toString(rand.nextLong()).getBytes();
-    ZKSignerSecretProvider secretProviderA =
-        spy(new ZKSignerSecretProvider(seedA));
-    ZKSignerSecretProvider secretProviderB =
-        spy(new ZKSignerSecretProvider(seedB));
-    ZKSignerSecretProvider secretProviderC =
-        spy(new ZKSignerSecretProvider(seedC));
-    Properties config = new Properties();
-    config.setProperty(
-        ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
-        zkServer.getConnectString());
-    config.setProperty(ZKSignerSecretProvider.ZOOKEEPER_PATH,
-        "/secret");
-    try {
-      secretProviderA.init(config, getDummyServletContext(), rolloverFrequency);
-      secretProviderB.init(config, getDummyServletContext(), rolloverFrequency);
-      secretProviderC.init(config, getDummyServletContext(), rolloverFrequency);
-
-      byte[] currentSecretA = secretProviderA.getCurrentSecret();
-      byte[][] allSecretsA = secretProviderA.getAllSecrets();
-      byte[] currentSecretB = secretProviderB.getCurrentSecret();
-      byte[][] allSecretsB = secretProviderB.getAllSecrets();
-      byte[] currentSecretC = secretProviderC.getCurrentSecret();
-      byte[][] allSecretsC = secretProviderC.getAllSecrets();
-      Assert.assertArrayEquals(currentSecretA, currentSecretB);
-      Assert.assertArrayEquals(currentSecretB, currentSecretC);
-      Assert.assertEquals(2, allSecretsA.length);
-      Assert.assertEquals(2, allSecretsB.length);
-      Assert.assertEquals(2, allSecretsC.length);
-      Assert.assertArrayEquals(allSecretsA[0], allSecretsB[0]);
-      Assert.assertArrayEquals(allSecretsB[0], allSecretsC[0]);
-      Assert.assertNull(allSecretsA[1]);
-      Assert.assertNull(allSecretsB[1]);
-      Assert.assertNull(allSecretsC[1]);
-      char secretChosen = 'z';
-      if (Arrays.equals(secretA1, currentSecretA)) {
-        Assert.assertArrayEquals(secretA1, allSecretsA[0]);
-        secretChosen = 'A';
-      } else if (Arrays.equals(secretB1, currentSecretB)) {
-        Assert.assertArrayEquals(secretB1, allSecretsA[0]);
-        secretChosen = 'B';
-      }else if (Arrays.equals(secretC1, currentSecretC)) {
-        Assert.assertArrayEquals(secretC1, allSecretsA[0]);
-        secretChosen = 'C';
-      } else {
-        Assert.fail("It appears that they all agreed on the same secret, but "
-                + "not one of the secrets they were supposed to");
-      }
-      verify(secretProviderA, timeout(timeout).times(1)).rollSecret();
-      verify(secretProviderB, timeout(timeout).times(1)).rollSecret();
-      verify(secretProviderC, timeout(timeout).times(1)).rollSecret();
+  /**
+   * A hack to test ZKSignerSecretProvider.
+   * We want to test that ZKSignerSecretProvider.rollSecret() is periodically
+   * called at the expected frequency, but we want to exclude the
+   * race-condition.
+   */
+  private class MockZKSignerSecretProvider extends ZKSignerSecretProvider {
+    MockZKSignerSecretProvider(long seed) {
+      super(seed);
+    }
+    @Override
+    protected synchronized void rollSecret() {
+      // this is a no-op: simply used for Mockito to verify that rollSecret()
+      // is periodically called at the expected frequency
+    }
 
-      currentSecretA = secretProviderA.getCurrentSecret();
-      allSecretsA = secretProviderA.getAllSecrets();
-      currentSecretB = secretProviderB.getCurrentSecret();
-      allSecretsB = secretProviderB.getAllSecrets();
-      currentSecretC = secretProviderC.getCurrentSecret();
-      allSecretsC = secretProviderC.getAllSecrets();
-      Assert.assertArrayEquals(currentSecretA, currentSecretB);
-      Assert.assertArrayEquals(currentSecretB, currentSecretC);
-      Assert.assertEquals(2, allSecretsA.length);
-      Assert.assertEquals(2, allSecretsB.length);
-      Assert.assertEquals(2, allSecretsC.length);
-      Assert.assertArrayEquals(allSecretsA[0], allSecretsB[0]);
-      Assert.assertArrayEquals(allSecretsB[0], allSecretsC[0]);
-      Assert.assertArrayEquals(allSecretsA[1], allSecretsB[1]);
-      Assert.assertArrayEquals(allSecretsB[1], allSecretsC[1]);
-      // The second secret used is prechosen by whoever won the init; so it
-      // should match with whichever we saw before
-      if (secretChosen == 'A') {
-        Assert.assertArrayEquals(secretA2, currentSecretA);
-      } else if (secretChosen == 'B') {
-        Assert.assertArrayEquals(secretB2, currentSecretA);
-      } else if (secretChosen == 'C') {
-        Assert.assertArrayEquals(secretC2, currentSecretA);
-      }
-    } finally {
-      secretProviderC.destroy();
-      secretProviderB.destroy();
-      secretProviderA.destroy();
+    public void realRollSecret() {
+      // the test code manually calls ZKSignerSecretProvider.rollSecret()
+      // to update the state
+      super.rollSecret();
     }
   }
 
   @Test
-  public void testMultipleUnsychnronized() throws Exception {
+  public void testMultiple1() throws Exception {
+    testMultiple(1);
+  }
+
+  @Test
+  public void testMultiple2() throws Exception {
+    testMultiple(2);
+  }
+
+  /**
+   * @param order:
+   *            1: secretProviderA wins both realRollSecret races
+   *            2: secretProviderA wins 1st race, B wins 2nd
+   * @throws Exception
+   */
+  public void testMultiple(int order) throws Exception {
     long seedA = System.currentTimeMillis();
     Random rand = new Random(seedA);
     byte[] secretA2 = Long.toString(rand.nextLong()).getBytes();
     byte[] secretA1 = Long.toString(rand.nextLong()).getBytes();
     byte[] secretA3 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secretA4 = Long.toString(rand.nextLong()).getBytes();
     // use the same seed so we can predict the RNG
     long seedB = System.currentTimeMillis() + rand.nextLong();
     rand = new Random(seedB);
     byte[] secretB2 = Long.toString(rand.nextLong()).getBytes();
     byte[] secretB1 = Long.toString(rand.nextLong()).getBytes();
     byte[] secretB3 = Long.toString(rand.nextLong()).getBytes();
-    ZKSignerSecretProvider secretProviderA =
-        spy(new ZKSignerSecretProvider(seedA));
-    ZKSignerSecretProvider secretProviderB =
-        spy(new ZKSignerSecretProvider(seedB));
+    byte[] secretB4 = Long.toString(rand.nextLong()).getBytes();
+    MockZKSignerSecretProvider secretProviderA =
+        spy(new MockZKSignerSecretProvider(seedA));
+    MockZKSignerSecretProvider secretProviderB =
+        spy(new MockZKSignerSecretProvider(seedB));
     Properties config = new Properties();
     config.setProperty(
         ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
@@ -223,14 +176,24 @@ public class TestZKSignerSecretProvider {
         "/secret");
     try {
       secretProviderA.init(config, getDummyServletContext(), rolloverFrequency);
+      secretProviderB.init(config, getDummyServletContext(), rolloverFrequency);
 
       byte[] currentSecretA = secretProviderA.getCurrentSecret();
       byte[][] allSecretsA = secretProviderA.getAllSecrets();
+      byte[] currentSecretB = secretProviderB.getCurrentSecret();
+      byte[][] allSecretsB = secretProviderB.getAllSecrets();
       Assert.assertArrayEquals(secretA1, currentSecretA);
+      Assert.assertArrayEquals(secretA1, currentSecretB);
       Assert.assertEquals(2, allSecretsA.length);
+      Assert.assertEquals(2, allSecretsB.length);
       Assert.assertArrayEquals(secretA1, allSecretsA[0]);
+      Assert.assertArrayEquals(secretA1, allSecretsB[0]);
       Assert.assertNull(allSecretsA[1]);
-      verify(secretProviderA, timeout(timeout).times(1)).rollSecret();
+      Assert.assertNull(allSecretsB[1]);
+      verify(secretProviderA, timeout(timeout).atLeastOnce()).rollSecret();
+      verify(secretProviderB, timeout(timeout).atLeastOnce()).rollSecret();
+      secretProviderA.realRollSecret();
+      secretProviderB.realRollSecret();
 
       currentSecretA = secretProviderA.getCurrentSecret();
       allSecretsA = secretProviderA.getAllSecrets();
@@ -238,18 +201,32 @@ public class TestZKSignerSecretProvider {
       Assert.assertEquals(2, allSecretsA.length);
       Assert.assertArrayEquals(secretA2, allSecretsA[0]);
       Assert.assertArrayEquals(secretA1, allSecretsA[1]);
-      Thread.sleep((rolloverFrequency / 5));
 
-      secretProviderB.init(config, getDummyServletContext(), rolloverFrequency);
-
-      byte[] currentSecretB = secretProviderB.getCurrentSecret();
-      byte[][] allSecretsB = secretProviderB.getAllSecrets();
+      currentSecretB = secretProviderB.getCurrentSecret();
+      allSecretsB = secretProviderB.getAllSecrets();
       Assert.assertArrayEquals(secretA2, currentSecretB);
       Assert.assertEquals(2, allSecretsA.length);
       Assert.assertArrayEquals(secretA2, allSecretsB[0]);
       Assert.assertArrayEquals(secretA1, allSecretsB[1]);
-      verify(secretProviderA, timeout(timeout).times(2)).rollSecret();
-      verify(secretProviderB, timeout(timeout).times(1)).rollSecret();
+      verify(secretProviderA, timeout(timeout).atLeast(2)).rollSecret();
+      verify(secretProviderB, timeout(timeout).atLeastOnce()).rollSecret();
+
+      switch (order) {
+        case 1:
+          secretProviderA.realRollSecret();
+          secretProviderB.realRollSecret();
+          secretProviderA.realRollSecret();
+          secretProviderB.realRollSecret();
+          break;
+        case 2:
+          secretProviderB.realRollSecret();
+          secretProviderA.realRollSecret();
+          secretProviderB.realRollSecret();
+          secretProviderA.realRollSecret();
+          break;
+        default:
+          throw new Exception("Invalid order selected");
+      }
 
       currentSecretA = secretProviderA.getCurrentSecret();
       allSecretsA = secretProviderA.getAllSecrets();
@@ -260,13 +237,13 @@ public class TestZKSignerSecretProvider {
       Assert.assertEquals(2, allSecretsB.length);
       Assert.assertArrayEquals(allSecretsA[0], allSecretsB[0]);
       Assert.assertArrayEquals(allSecretsA[1], allSecretsB[1]);
-      if (Arrays.equals(secretA3, currentSecretA)) {
-        Assert.assertArrayEquals(secretA3, allSecretsA[0]);
-      } else if (Arrays.equals(secretB3, currentSecretB)) {
-        Assert.assertArrayEquals(secretB3, allSecretsA[0]);
-      } else {
-        Assert.fail("It appears that they all agreed on the same secret, but "
-                + "not one of the secrets they were supposed to");
+      switch (order) {
+        case 1:
+          Assert.assertArrayEquals(secretA4, allSecretsA[0]);
+          break;
+        case 2:
+          Assert.assertArrayEquals(secretB4, allSecretsA[0]);
+          break;
       }
     } finally {
       secretProviderB.destroy();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/52] [abbrv] hadoop git commit: HADOOP-13697. LogLevel#main should not throw exception if no arguments. Contributed by Mingliang Liu

Posted by cn...@apache.org.
HADOOP-13697. LogLevel#main should not throw exception if no arguments. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fb392a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fb392a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fb392a5

Branch: refs/heads/HADOOP-13037
Commit: 2fb392a587d288b628936ca6d18fabad04afc585
Parents: 809cfd2
Author: Mingliang Liu <li...@apache.org>
Authored: Fri Oct 7 14:05:40 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue Oct 11 10:57:08 2016 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/log/LogLevel.java       | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fb392a5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index 4fa839f..79eae12 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -47,15 +47,17 @@ import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * Change log level in runtime.
  */
 @InterfaceStability.Evolving
 public class LogLevel {
-  public static final String USAGES = "\nUsage: General options are:\n"
+  public static final String USAGES = "\nUsage: Command options are:\n"
       + "\t[-getlevel <host:port> <classname> [-protocol (http|https)]\n"
       + "\t[-setlevel <host:port> <classname> <level> "
       + "[-protocol (http|https)]\n";
@@ -67,7 +69,7 @@ public class LogLevel {
    */
   public static void main(String[] args) throws Exception {
     CLI cli = new CLI(new Configuration());
-    System.exit(cli.run(args));
+    System.exit(ToolRunner.run(cli, args));
   }
 
   /**
@@ -81,6 +83,7 @@ public class LogLevel {
 
   private static void printUsage() {
     System.err.println(USAGES);
+    GenericOptionsParser.printGenericCommandUsage(System.err);
   }
 
   public static boolean isValidProtocol(String protocol) {
@@ -107,7 +110,7 @@ public class LogLevel {
         sendLogLevelRequest();
       } catch (HadoopIllegalArgumentException e) {
         printUsage();
-        throw e;
+        return -1;
       }
       return 0;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/52] [abbrv] hadoop git commit: Merge branch 'trunk' into HADOOP-12756

Posted by cn...@apache.org.
Merge branch 'trunk' into HADOOP-12756


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a57bba47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a57bba47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a57bba47

Branch: refs/heads/HADOOP-13037
Commit: a57bba470b396c163baef7ac9447c063180ec15b
Parents: 26d5df3 6a38d11
Author: Kai Zheng <ka...@intel.com>
Authored: Sun Oct 9 10:29:40 2016 +0800
Committer: Kai Zheng <ka...@intel.com>
Committed: Sun Oct 9 10:29:40 2016 +0800

----------------------------------------------------------------------
 .../IncludePublicAnnotationsJDiffDoclet.java    |    64 +
 .../util/RolloverSignerSecretProvider.java      |     2 +-
 .../util/TestZKSignerSecretProvider.java        |   221 +-
 .../dev-support/findbugsExcludeFile.xml         |     5 +
 .../jdiff/Apache_Hadoop_Common_2.7.2.xml        | 41149 ++++++-----------
 .../org/apache/hadoop/conf/ConfServlet.java     |    19 +-
 .../org/apache/hadoop/conf/Configuration.java   |   284 +-
 .../apache/hadoop/fs/DFCachingGetSpaceUsed.java |    48 +
 .../src/main/java/org/apache/hadoop/fs/DU.java  |     8 +-
 .../apache/hadoop/fs/FileEncryptionInfo.java    |    21 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |    13 +-
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |     6 +-
 .../apache/hadoop/fs/permission/AclEntry.java   |    24 +-
 .../hadoop/fs/permission/AclEntryScope.java     |     2 +-
 .../hadoop/fs/permission/AclEntryType.java      |    23 +-
 .../apache/hadoop/fs/permission/AclStatus.java  |     2 +-
 .../org/apache/hadoop/fs/shell/AclCommands.java |     6 +-
 .../hadoop/fs/shell/CommandWithDestination.java |     5 +-
 .../org/apache/hadoop/fs/viewfs/ViewFs.java     |     2 +-
 .../java/org/apache/hadoop/io/BloomMapFile.java |    11 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java |     9 +-
 .../main/java/org/apache/hadoop/io/MapFile.java |    10 +-
 .../java/org/apache/hadoop/io/SequenceFile.java |    16 +-
 .../apache/hadoop/io/compress/BZip2Codec.java   |     9 +-
 .../apache/hadoop/io/compress/DefaultCodec.java |     9 +-
 .../apache/hadoop/io/compress/GzipCodec.java    |     9 +-
 .../hadoop/io/file/tfile/Compression.java       |    14 +-
 .../org/apache/hadoop/ipc/ExternalCall.java     |    91 +
 .../main/java/org/apache/hadoop/ipc/Server.java |    88 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |     2 +-
 .../apache/hadoop/net/SocksSocketFactory.java   |     4 +-
 .../org/apache/hadoop/security/Credentials.java |     8 +-
 .../hadoop/security/KerberosAuthException.java  |   118 +
 .../hadoop/security/UGIExceptionMessages.java   |    46 +
 .../hadoop/security/UserGroupInformation.java   |   105 +-
 .../org/apache/hadoop/security/token/Token.java |    60 +-
 .../java/org/apache/hadoop/util/LineReader.java |     6 +-
 .../org/apache/hadoop/util/SysInfoWindows.java  |    58 +-
 .../java/org/apache/hadoop/util/hash/Hash.java  |     6 +-
 .../src/main/resources/core-default.xml         |     6 +-
 .../src/site/markdown/FileSystemShell.md        |     3 +-
 .../src/site/markdown/filesystem/filesystem.md  |    77 +-
 .../org/apache/hadoop/conf/TestConfServlet.java |   122 +-
 .../apache/hadoop/conf/TestConfiguration.java   |   140 +-
 .../apache/hadoop/fs/FileContextURIBase.java    |     4 +-
 .../hadoop/fs/TestDFCachingGetSpaceUsed.java    |    75 +
 .../hadoop/fs/TestDelegationTokenRenewer.java   |     3 +-
 .../hadoop/fs/TestFileSystemInitialization.java |    12 +-
 .../AbstractContractRootDirectoryTest.java      |    34 +-
 .../fs/contract/AbstractFSContractTestBase.java |     2 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |    48 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java     |    85 +
 .../org/apache/hadoop/net/ServerSocketUtil.java |    23 +
 .../security/TestUserGroupInformation.java      |    33 +-
 .../apache/hadoop/util/TestSysInfoWindows.java  |     7 +-
 .../hadoop/crypto/key/kms/server/KMS.java       |    76 +-
 .../hadoop/crypto/key/kms/server/KMSWebApp.java |     2 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   |    76 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |     4 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |     9 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java    |   146 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |    30 +
 .../hdfs/client/CreateEncryptionZoneFlag.java   |    70 +
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |   536 +
 .../apache/hadoop/hdfs/client/HdfsUtils.java    |    86 +
 .../apache/hadoop/hdfs/client/package-info.java |    27 +
 .../server/datanode/DiskBalancerWorkItem.java   |     2 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java    |    88 +-
 .../hdfs/web/resources/AclPermissionParam.java  |    23 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |     1 -
 .../jdiff/Apache_Hadoop_HDFS_2.7.2.xml          | 21704 +--------
 .../src/contrib/bkjournal/README.txt            |    66 -
 .../dev-support/findbugsExcludeFile.xml         |     5 -
 .../hadoop-hdfs/src/contrib/bkjournal/pom.xml   |   175 -
 .../bkjournal/BookKeeperEditLogInputStream.java |   264 -
 .../BookKeeperEditLogOutputStream.java          |   188 -
 .../bkjournal/BookKeeperJournalManager.java     |   893 -
 .../contrib/bkjournal/CurrentInprogress.java    |   160 -
 .../bkjournal/EditLogLedgerMetadata.java        |   217 -
 .../hadoop/contrib/bkjournal/MaxTxId.java       |   103 -
 .../bkjournal/src/main/proto/bkjournal.proto    |    49 -
 .../hadoop/contrib/bkjournal/BKJMUtil.java      |   184 -
 .../bkjournal/TestBookKeeperAsHASharedDir.java  |   414 -
 .../bkjournal/TestBookKeeperConfiguration.java  |   174 -
 .../bkjournal/TestBookKeeperEditLogStreams.java |    92 -
 .../bkjournal/TestBookKeeperHACheckpoints.java  |   109 -
 .../bkjournal/TestBookKeeperJournalManager.java |   984 -
 .../TestBookKeeperSpeculativeRead.java          |   167 -
 .../bkjournal/TestBootstrapStandbyWithBKJM.java |   170 -
 .../bkjournal/TestCurrentInprogress.java        |   160 -
 .../hdfs/server/namenode/FSEditLogTestUtil.java |    40 -
 .../src/test/resources/log4j.properties         |    55 -
 .../java/org/apache/hadoop/hdfs/HAUtil.java     |     5 +-
 .../hdfs/client/CreateEncryptionZoneFlag.java   |    71 -
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |   524 -
 .../apache/hadoop/hdfs/client/HdfsUtils.java    |    86 -
 .../apache/hadoop/hdfs/client/package-info.java |    27 -
 .../block/BlockPoolTokenSecretManager.java      |     3 +-
 .../token/block/BlockTokenSecretManager.java    |     6 +
 .../hadoop/hdfs/server/balancer/Dispatcher.java |     2 +-
 .../server/blockmanagement/BlockManager.java    |    70 +-
 .../BlockPlacementPolicyDefault.java            |     5 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |     5 +
 .../hdfs/server/datanode/DiskBalancer.java      |   210 +-
 .../erasurecode/StripedBlockReader.java         |     2 +-
 .../datanode/fsdataset/impl/BlockPoolSlice.java |     3 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |    11 +-
 .../datanode/fsdataset/impl/ReplicaMap.java     |    41 +-
 .../server/diskbalancer/planner/MoveStep.java   |     6 +-
 .../hdfs/server/namenode/CacheManager.java      |    12 +-
 .../ContentSummaryComputationContext.java       |    94 +-
 .../namenode/EncryptionFaultInjector.java       |     6 +
 .../server/namenode/EncryptionZoneManager.java  |    25 +-
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |     4 +-
 .../hdfs/server/namenode/FSDirAttrOp.java       |   122 +-
 .../hdfs/server/namenode/FSDirDeleteOp.java     |    67 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |   148 +-
 .../server/namenode/FSDirErasureCodingOp.java   |     2 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java      |     2 +-
 .../hdfs/server/namenode/FSDirRenameOp.java     |   200 +-
 .../hdfs/server/namenode/FSDirSnapshotOp.java   |    22 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   281 +-
 .../hdfs/server/namenode/FSDirSymlinkOp.java    |     2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   163 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java      |    27 +-
 .../hdfs/server/namenode/FSDirectory.java       |    94 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |    94 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   244 +-
 .../hdfs/server/namenode/FSNamesystemLock.java  |   187 +-
 .../hadoop/hdfs/server/namenode/INode.java      |     1 +
 .../server/namenode/INodeAttributeProvider.java |    10 +-
 .../hdfs/server/namenode/INodeDirectory.java    |    11 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |     1 +
 .../hadoop/hdfs/server/namenode/INodeId.java    |    16 -
 .../hdfs/server/namenode/INodeReference.java    |     2 +
 .../hdfs/server/namenode/INodeSymlink.java      |     1 +
 .../hdfs/server/namenode/INodesInPath.java      |     7 +-
 .../hdfs/server/namenode/NamenodeFsck.java      |    23 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |     2 +
 .../hdfs/server/namenode/XAttrStorage.java      |     7 +-
 .../ha/RequestHedgingProxyProvider.java         |    25 +-
 .../snapshot/DirectorySnapshottableFeature.java |     9 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |    14 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |     1 +
 .../server/protocol/RemoteEditLogManifest.java  |     7 +-
 .../apache/hadoop/hdfs/tools/CryptoAdmin.java   |    51 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |     2 +-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    |     2 +-
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto |     2 +-
 .../src/main/resources/hdfs-default.xml         |     6 +-
 .../src/site/markdown/FaultInjectFramework.md   |   254 -
 .../src/site/markdown/HDFSCommands.md           |     8 +-
 .../src/site/markdown/HDFSDiskbalancer.md       |     5 +-
 .../src/site/markdown/HDFSErasureCoding.md      |    11 +-
 .../markdown/HDFSHighAvailabilityWithNFS.md     |   114 -
 .../src/site/markdown/TransparentEncryption.md  |    16 +
 .../hadoop-hdfs/src/site/markdown/ViewFs.md     |     2 +-
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md    |     2 +-
 .../hadoop/fs/TestEnhancedByteBufferAccess.java |    17 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   103 +-
 .../hadoop/hdfs/TestDFSClientRetries.java       |     4 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java    |  2021 +-
 .../hadoop/hdfs/TestEncryptedTransfer.java      |   742 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |   109 +-
 .../apache/hadoop/hdfs/TestFileCorruption.java  |    61 +
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |     1 +
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  |   119 +
 .../hadoop/hdfs/protocolPB/TestPBHelper.java    |    30 +-
 .../TestPendingInvalidateBlock.java             |    37 +-
 .../fsdataset/impl/FsDatasetImplTestUtils.java  |     2 +-
 .../fsdataset/impl/TestDatanodeRestart.java     |    13 +-
 .../impl/TestInterDatanodeProtocol.java         |     3 +-
 .../datanode/fsdataset/impl/TestReplicaMap.java |     3 +-
 .../fsdataset/impl/TestWriteToReplica.java      |     3 +-
 .../server/diskbalancer/TestDiskBalancer.java   |   156 +
 .../server/namenode/TestCacheDirectives.java    |    10 +
 .../namenode/TestDiskspaceQuotaUpdate.java      |   301 +-
 .../hdfs/server/namenode/TestFSDirectory.java   |    48 +
 .../hdfs/server/namenode/TestFSNamesystem.java  |   251 -
 .../server/namenode/TestFSNamesystemLock.java   |   317 +
 .../hadoop/hdfs/server/namenode/TestFsck.java   |  2403 +-
 .../server/namenode/TestNameNodeMXBean.java     |    34 +-
 .../server/namenode/TestSnapshotPathINodes.java |    22 +
 .../server/namenode/ha/TestEditLogTailer.java   |    42 +-
 .../ha/TestRequestHedgingProxyProvider.java     |    18 +-
 .../snapshot/TestRenameWithSnapshots.java       |   199 +
 .../shortcircuit/TestShortCircuitCache.java     |     9 +-
 .../hadoop/hdfs/web/TestWebHdfsTokens.java      |    27 +-
 .../apache/hadoop/security/TestPermission.java  |   131 +-
 .../src/test/resources/testCryptoConf.xml       |    90 +
 hadoop-hdfs-project/pom.xml                     |     1 -
 .../Apache_Hadoop_MapReduce_Common_2.7.2.xml    |   727 +-
 .../Apache_Hadoop_MapReduce_Core_2.7.2.xml      |  7059 +--
 .../Apache_Hadoop_MapReduce_JobClient_2.7.2.xml |   962 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java    |    90 +-
 .../mapreduce/v2/app/TaskHeartbeatHandler.java  |    24 +-
 .../v2/app/rm/RMContainerAllocator.java         |    51 +-
 .../hadoop/mapreduce/v2/app/TestRecovery.java   |    66 +
 .../v2/app/TestTaskHeartbeatHandler.java        |    67 +
 .../v2/app/launcher/TestContainerLauncher.java  |    30 +
 .../app/launcher/TestContainerLauncherImpl.java |    30 +
 .../v2/app/rm/TestRMContainerAllocator.java     |    46 +
 .../mapreduce/v2/app/webapp/TestAMWebApp.java   |     8 +-
 .../hadoop-mapreduce-client-core/pom.xml        |     6 +-
 .../java/org/apache/hadoop/mapred/Master.java   |    70 +-
 .../java/org/apache/hadoop/mapred/Task.java     |     8 +-
 .../apache/hadoop/mapreduce/MRJobConfig.java    |    11 +-
 .../hadoop/mapreduce/util/MRJobConfUtil.java    |    16 +
 .../src/main/resources/mapred-default.xml       |    10 +-
 .../org/apache/hadoop/mapred/TestMaster.java    |    56 +-
 .../mapreduce/v2/hs/HistoryFileManager.java     |    21 +
 .../hadoop/mapred/ResourceMgrDelegate.java      |     6 +
 .../apache/hadoop/mapred/JobClientUnitTest.java |    34 +-
 .../hadoop-mapreduce-client/pom.xml             |     4 +-
 .../maven/plugin/cmakebuilder/CompileMojo.java  |     4 +-
 .../maven/plugin/cmakebuilder/TestMojo.java     |     4 +-
 .../hadoop/maven/plugin/protoc/ProtocMojo.java  |     4 +
 .../apache/hadoop/maven/plugin/util/Exec.java   |     6 +-
 .../plugin/versioninfo/VersionInfoMojo.java     |    55 +-
 hadoop-project-dist/pom.xml                     |    21 +-
 hadoop-project/pom.xml                          |     8 +-
 hadoop-tools/hadoop-aws/pom.xml                 |    12 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |   171 +-
 .../hadoop/fs/s3a/S3AInstrumentation.java       |    10 +
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |    17 +
 .../org/apache/hadoop/fs/s3a/Statistic.java     |     4 +
 .../src/site/markdown/tools/hadoop-aws/index.md |    42 +-
 .../hadoop/fs/s3a/ITestS3AConfiguration.java    |     8 +
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |    55 -
 .../fs/s3a/ITestS3AFileOperationCost.java       |    85 +
 .../hadoop/fs/s3a/ITestS3AMiscOperations.java   |    63 +
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |    13 +-
 .../fs/s3a/TestS3AExceptionTranslation.java     |   127 +
 hadoop-tools/hadoop-azure-datalake/pom.xml      |     4 +
 ...ClientCredentialBasedAccesTokenProvider.java |     5 +-
 hadoop-tools/hadoop-azure/pom.xml               |     6 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |    16 +-
 .../tools/mapred/CopyMapper_Counter.properties  |    24 +
 .../contract/AbstractContractDistCpTest.java    |     2 +-
 hadoop-tools/hadoop-kafka/pom.xml               |     2 +-
 hadoop-tools/hadoop-openstack/pom.xml           |    18 +-
 .../swift/auth/ApiKeyAuthenticationRequest.java |     2 +-
 .../fs/swift/auth/entities/AccessToken.java     |     2 +-
 .../hadoop/fs/swift/auth/entities/Catalog.java  |     2 +-
 .../hadoop/fs/swift/auth/entities/Endpoint.java |     2 +-
 .../hadoop/fs/swift/auth/entities/Tenant.java   |     2 +-
 .../hadoop/fs/swift/auth/entities/User.java     |     2 +-
 .../snative/SwiftNativeFileSystemStore.java     |     3 +-
 .../apache/hadoop/fs/swift/util/JSONUtil.java   |    24 +-
 hadoop-tools/hadoop-rumen/pom.xml               |     9 +
 .../apache/hadoop/tools/rumen/Anonymizer.java   |    23 +-
 .../hadoop/tools/rumen/HadoopLogsAnalyzer.java  |     3 +-
 .../tools/rumen/JsonObjectMapperParser.java     |    17 +-
 .../tools/rumen/JsonObjectMapperWriter.java     |    21 +-
 .../apache/hadoop/tools/rumen/LoggedJob.java    |     2 +-
 .../hadoop/tools/rumen/LoggedLocation.java      |     2 +-
 .../tools/rumen/LoggedNetworkTopology.java      |     2 +-
 .../rumen/LoggedSingleRelativeRanking.java      |     4 +-
 .../apache/hadoop/tools/rumen/LoggedTask.java   |     2 +-
 .../hadoop/tools/rumen/LoggedTaskAttempt.java   |     2 +-
 .../hadoop/tools/rumen/datatypes/NodeName.java  |     2 +-
 .../rumen/serializers/BlockingSerializer.java   |    10 +-
 .../DefaultAnonymizingRumenSerializer.java      |     8 +-
 .../serializers/DefaultRumenSerializer.java     |     9 +-
 .../serializers/ObjectStringSerializer.java     |    10 +-
 .../apache/hadoop/tools/rumen/state/State.java  |     2 +-
 .../tools/rumen/state/StateDeserializer.java    |    14 +-
 .../hadoop/tools/rumen/state/StatePool.java     |    36 +-
 .../hadoop/tools/rumen/TestHistograms.java      |    13 +-
 hadoop-tools/hadoop-sls/pom.xml                 |     4 +
 .../hadoop/yarn/sls/RumenToSLSConverter.java    |     8 +-
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |     7 +-
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  |    10 +-
 .../jdiff/Apache_Hadoop_YARN_API_2.7.2.xml      |   530 +-
 .../jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml   |   613 +-
 .../jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml   |  1185 +-
 .../Apache_Hadoop_YARN_Server_Common_2.7.2.xml  |  1056 +-
 .../yarn/api/ContainerManagementProtocol.java   |    54 +
 .../api/protocolrecords/CommitResponse.java     |    42 +
 .../protocolrecords/KillApplicationRequest.java |    18 +
 .../ReInitializeContainerRequest.java           |   110 +
 .../ReInitializeContainerResponse.java          |    38 +
 .../RestartContainerResponse.java               |    38 +
 .../api/protocolrecords/RollbackResponse.java   |    42 +
 .../records/ApplicationSubmissionContext.java   |    21 +
 .../api/records/ApplicationTimeoutType.java     |    41 +
 .../yarn/api/records/ReservationDefinition.java |    44 +-
 .../org/apache/hadoop/yarn/api/records/URL.java |    58 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |    24 +-
 .../ReplaceLabelsOnNodeRequest.java             |     8 +
 .../proto/containermanagement_protocol.proto    |     6 +
 ..._server_resourcemanager_service_protos.proto |     2 +-
 .../src/main/proto/yarn_protos.proto            |    11 +
 .../src/main/proto/yarn_service_protos.proto    |    19 +
 .../apache/hadoop/yarn/api/records/TestURL.java |    99 +
 .../hadoop/yarn/client/api/YarnClient.java      |    14 +
 .../yarn/client/api/impl/AMRMClientImpl.java    |    10 +-
 .../impl/ContainerManagementProtocolProxy.java  |     7 +-
 .../yarn/client/api/impl/YarnClientImpl.java    |    35 +-
 .../hadoop/yarn/client/cli/RMAdminCLI.java      |    39 +-
 .../TestOpportunisticContainerAllocation.java   |   398 +
 .../yarn/client/api/impl/TestYarnClient.java    |    52 +
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  |     3 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java     |    10 +-
 ...ContainerManagementProtocolPBClientImpl.java |    73 +
 ...ontainerManagementProtocolPBServiceImpl.java |    86 +-
 .../impl/pb/CommitResponsePBImpl.java           |    67 +
 .../impl/pb/KillApplicationRequestPBImpl.java   |    18 +
 .../pb/ReInitializeContainerRequestPBImpl.java  |   173 +
 .../pb/ReInitializeContainerResponsePBImpl.java |    68 +
 .../impl/pb/RestartContainerResponsePBImpl.java |    67 +
 .../impl/pb/RollbackResponsePBImpl.java         |    67 +
 .../pb/ApplicationSubmissionContextPBImpl.java  |    83 +
 .../records/impl/pb/ContainerStatusPBImpl.java  |     2 +
 .../yarn/api/records/impl/pb/ProtoUtils.java    |    19 +
 .../impl/pb/ReservationDefinitionPBImpl.java    |    31 +
 .../client/api/impl/TimelineClientImpl.java     |     3 +-
 .../pb/ReplaceLabelsOnNodeRequestPBImpl.java    |    14 +-
 .../yarn/util/AbstractLivelinessMonitor.java    |    32 +-
 .../hadoop/yarn/webapp/YarnWebParams.java       |     3 +-
 .../src/main/resources/yarn-default.xml         |    28 +-
 .../hadoop/yarn/TestContainerLaunchRPC.java     |    30 +
 .../yarn/TestContainerResourceIncreaseRPC.java  |    30 +
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |   264 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |   269 +-
 ...pplicationHistoryManagerOnTimelineStore.java |    14 +-
 ...pplicationHistoryManagerOnTimelineStore.java |    14 +-
 .../metrics/ApplicationMetricsConstants.java    |     3 +
 .../OpportunisticContainerAllocator.java        |    22 +-
 .../OpportunisticContainerContext.java          |    49 +-
 .../java/org/apache/hadoop/yarn/TestRPC.java    |    30 +
 .../yarn/server/nodemanager/NMAuditLogger.java  |     4 +
 .../yarn/server/nodemanager/NodeManager.java    |     3 +-
 .../amrmproxy/DefaultRequestInterceptor.java    |     4 +-
 .../containermanager/ContainerManagerImpl.java  |    53 +-
 .../container/ContainerImpl.java                |    92 +-
 .../monitor/ContainersMonitorImpl.java          |    31 +-
 .../nodemanager/metrics/NodeManagerMetrics.java |    26 +
 .../scheduler/DistributedScheduler.java         |    59 +-
 .../impl/container-executor.c                   |    46 +-
 .../impl/container-executor.h                   |    10 +-
 .../main/native/container-executor/impl/main.c  |   264 +-
 .../TestContainerManagerWithLCE.java            |    12 +
 .../containermanager/TestContainerManager.java  |   103 +-
 .../monitor/TestContainersMonitor.java          |    39 +
 .../server/resourcemanager/AdminService.java    |    46 +
 .../server/resourcemanager/ClientRMService.java |    20 +-
 ...pportunisticContainerAllocatorAMService.java |   215 +-
 .../resourcemanager/RMActiveServiceContext.java |    16 +
 .../server/resourcemanager/RMAppManager.java    |     4 +
 .../server/resourcemanager/RMAuditLogger.java   |     3 +
 .../yarn/server/resourcemanager/RMContext.java  |     5 +
 .../server/resourcemanager/RMContextImpl.java   |    12 +
 .../server/resourcemanager/RMServerUtils.java   |    21 +-
 .../yarn/server/resourcemanager/RMZKUtils.java  |    19 +-
 .../server/resourcemanager/ResourceManager.java |    23 +-
 .../resourcemanager/amlauncher/AMLauncher.java  |    21 -
 .../metrics/TimelineServiceV1Publisher.java     |     6 +
 .../metrics/TimelineServiceV2Publisher.java     |     5 +
 .../recovery/ZKRMStateStore.java                |   269 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |    49 +-
 .../rmapp/monitor/RMAppLifetimeMonitor.java     |   130 +
 .../rmapp/monitor/RMAppToMonitor.java           |    77 +
 .../rmapp/monitor/package-info.java             |    28 +
 .../scheduler/AbstractYarnScheduler.java        |   416 +-
 .../scheduler/AppSchedulingInfo.java            |   619 +-
 .../scheduler/SchedulerApplicationAttempt.java  |    66 +-
 .../scheduler/capacity/CapacityScheduler.java   |  1729 +-
 .../scheduler/capacity/LeafQueue.java           |    16 +
 .../scheduler/common/fica/FiCaSchedulerApp.java |     4 +
 .../distributed/NodeQueueLoadMonitor.java       |    45 +-
 .../scheduler/fair/FSLeafQueue.java             |     2 +-
 .../scheduler/fair/FSParentQueue.java           |     6 +-
 .../scheduler/fair/FairScheduler.java           |  1047 +-
 .../resourcemanager/webapp/ErrorBlock.java      |    39 +
 .../server/resourcemanager/webapp/RMWebApp.java |     1 +
 .../resourcemanager/webapp/RMWebServices.java   |    24 +-
 .../webapp/RedirectionErrorPage.java            |    47 +
 .../resourcemanager/webapp/RmController.java    |     4 +
 .../resourcemanager/webapp/dao/AppState.java    |     8 +
 .../webapp/dao/ReservationDefinitionInfo.java   |    11 +
 .../yarn/server/resourcemanager/MockRM.java     |    22 +-
 .../server/resourcemanager/NodeManager.java     |    29 +
 .../resourcemanager/TestAMAuthorization.java    |    31 +
 .../TestApplicationMasterLauncher.java          |    30 +
 .../resourcemanager/TestClientRMService.java    |     7 +-
 ...pportunisticContainerAllocatorAMService.java |    10 +-
 .../resourcemanager/TestRMAdminService.java     |   103 +-
 .../metrics/TestSystemMetricsPublisher.java     |    18 +
 .../TestSystemMetricsPublisherForV2.java        |     9 +
 .../reservation/ReservationSystemTestUtil.java  |    10 +-
 .../rmapp/TestApplicationLifetimeMonitor.java   |   165 +
 .../TestRMWebServicesAppsModification.java      |     4 +
 .../webapp/TestRedirectionErrorPage.java        |    68 +
 .../yarn/server/webproxy/ProxyUriUtils.java     |    53 +-
 .../server/webproxy/WebAppProxyServlet.java     |   274 +-
 .../server/webproxy/amfilter/AmIpFilter.java    |    64 +-
 .../server/webproxy/TestWebAppProxyServlet.java |    24 +-
 .../server/webproxy/amfilter/TestAmFilter.java  |    29 +-
 .../src/site/markdown/CapacityScheduler.md      |     2 +-
 .../src/site/markdown/NodeLabel.md              |     2 +-
 .../src/site/markdown/ReservationSystem.md      |     2 +-
 .../src/site/markdown/ResourceManagerRest.md    |     3 +
 .../site/markdown/WritingYarnApplications.md    |     6 +-
 .../hadoop-yarn-site/src/site/markdown/YARN.md  |     6 +-
 hadoop-yarn-project/hadoop-yarn/pom.xml         |     4 +-
 pom.xml                                         |     1 -
 407 files changed, 32657 insertions(+), 72369 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a57bba47/hadoop-project/pom.xml
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/52] [abbrv] hadoop git commit: HDFS-10972. Add unit test for HDFS command 'dfsadmin -getDatanodeInfo'. Contributed by Xiaobing Zhou

Posted by cn...@apache.org.
HDFS-10972. Add unit test for HDFS command 'dfsadmin -getDatanodeInfo'. Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3441c746
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3441c746
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3441c746

Branch: refs/heads/HADOOP-13037
Commit: 3441c746b5f35c46fca5a0f252c86c8357fe932e
Parents: cef61d5
Author: Mingliang Liu <li...@apache.org>
Authored: Mon Oct 10 11:33:37 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Oct 10 11:33:37 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 124 +++++++++++++++++--
 1 file changed, 113 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3441c746/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index e71c5cc..94ecb9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -30,12 +30,14 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -68,6 +70,10 @@ public class TestDFSAdmin {
   private DFSAdmin admin;
   private DataNode datanode;
   private NameNode namenode;
+  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
+  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
+  private static final PrintStream OLD_OUT = System.out;
+  private static final PrintStream OLD_ERR = System.err;
 
   @Before
   public void setUp() throws Exception {
@@ -77,12 +83,32 @@ public class TestDFSAdmin {
     admin = new DFSAdmin();
   }
 
+  private void redirectStream() {
+    System.setOut(new PrintStream(out));
+    System.setErr(new PrintStream(err));
+  }
+
+  private void resetStream() {
+    out.reset();
+    err.reset();
+  }
+
   @After
   public void tearDown() throws Exception {
+    try {
+      System.out.flush();
+      System.err.flush();
+    } finally {
+      System.setOut(OLD_OUT);
+      System.setErr(OLD_ERR);
+    }
+
     if (cluster != null) {
       cluster.shutdown();
       cluster = null;
     }
+
+    resetStream();
   }
 
   private void restartCluster() throws IOException {
@@ -111,28 +137,104 @@ public class TestDFSAdmin {
       String nodeType, String address, final List<String> outs,
       final List<String> errs) throws IOException {
     ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
-    PrintStream out = new PrintStream(bufOut);
+    PrintStream outStream = new PrintStream(bufOut);
     ByteArrayOutputStream bufErr = new ByteArrayOutputStream();
-    PrintStream err = new PrintStream(bufErr);
+    PrintStream errStream = new PrintStream(bufErr);
 
     if (methodName.equals("getReconfigurableProperties")) {
-      admin.getReconfigurableProperties(nodeType, address, out, err);
+      admin.getReconfigurableProperties(
+          nodeType,
+          address,
+          outStream,
+          errStream);
     } else if (methodName.equals("getReconfigurationStatus")) {
-      admin.getReconfigurationStatus(nodeType, address, out, err);
+      admin.getReconfigurationStatus(nodeType, address, outStream, errStream);
     } else if (methodName.equals("startReconfiguration")) {
-      admin.startReconfiguration(nodeType, address, out, err);
+      admin.startReconfiguration(nodeType, address, outStream, errStream);
     }
 
-    Scanner scanner = new Scanner(bufOut.toString());
+    scanIntoList(bufOut, outs);
+    scanIntoList(bufErr, errs);
+  }
+
+  private static void scanIntoList(
+      final ByteArrayOutputStream baos,
+      final List<String> list) {
+    final Scanner scanner = new Scanner(baos.toString());
     while (scanner.hasNextLine()) {
-      outs.add(scanner.nextLine());
+      list.add(scanner.nextLine());
     }
     scanner.close();
-    scanner = new Scanner(bufErr.toString());
-    while (scanner.hasNextLine()) {
-      errs.add(scanner.nextLine());
+  }
+
+  @Test(timeout = 30000)
+  public void testGetDatanodeInfo() throws Exception {
+    redirectStream();
+    final Configuration dfsConf = new HdfsConfiguration();
+    final int numDn = 2;
+
+    /* init cluster */
+    try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
+        .numDataNodes(numDn).build()) {
+
+      miniCluster.waitActive();
+      assertEquals(numDn, miniCluster.getDataNodes().size());
+      final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+
+      /* init reused vars */
+      List<String> outs = null;
+      int ret;
+
+      /**
+       * test erroneous run
+       */
+      resetStream();
+      outs = Lists.newArrayList();
+
+      /* invoke getDatanodeInfo */
+      ret = ToolRunner.run(
+          dfsAdmin,
+          new String[] {"-getDatanodeInfo", "128.0.0.1:1234"});
+
+      /* collect outputs */
+      scanIntoList(out, outs);
+
+      /* verify results */
+      assertEquals(-1, ret);
+      assertTrue("Unexpected getDatanodeInfo stdout", outs.isEmpty());
+
+      /**
+       * test normal run
+       */
+      for (int i = 0; i < numDn; i++) {
+        resetStream();
+        final DataNode dn = miniCluster.getDataNodes().get(i);
+
+        /* invoke getDatanodeInfo */
+        final String addr = String.format(
+            "%s:%d",
+            dn.getXferAddress().getHostString(),
+            dn.getIpcPort());
+        ret = ToolRunner.run(
+            dfsAdmin,
+            new String[] {"-getDatanodeInfo", addr});
+
+        /* collect outputs */
+        outs = Lists.newArrayList();
+        scanIntoList(out, outs);
+
+        /* verify results */
+        assertEquals(0, ret);
+        assertEquals(
+            "One line per DataNode like: Uptime: XXX, Software version: x.y.z,"
+                + " Config version: core-x.y.z,hdfs-x",
+            1, outs.size());
+        assertThat(outs.get(0),
+            is(allOf(containsString("Uptime:"),
+                containsString("Software version"),
+                containsString("Config version"))));
+      }
     }
-    scanner.close();
   }
 
   @Test(timeout = 30000)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/52] [abbrv] hadoop git commit: HDFS-10955. Pass IIP for FSDirAttr methods. Contributed by Daryn Sharp.

Posted by cn...@apache.org.
HDFS-10955. Pass IIP for FSDirAttr methods. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48b9d5fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48b9d5fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48b9d5fd

Branch: refs/heads/HADOOP-13037
Commit: 48b9d5fd2a96728b1118be217ca597c4098e99ca
Parents: 1d330fb
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Oct 6 16:33:46 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Oct 6 16:33:46 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirAttrOp.java       | 110 ++++++++-----------
 .../hdfs/server/namenode/FSEditLogLoader.java   |  62 +++++++----
 .../hdfs/server/namenode/FSNamesystem.java      |   3 +-
 3 files changed, 83 insertions(+), 92 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48b9d5fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 4c5ecb1d..91d9bce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -50,9 +50,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KE
 
 public class FSDirAttrOp {
   static HdfsFileStatus setPermission(
-      FSDirectory fsd, final String srcArg, FsPermission permission)
+      FSDirectory fsd, final String src, FsPermission permission)
       throws IOException {
-    String src = srcArg;
     if (FSDirectory.isExactReservedName(src)) {
       throw new InvalidPathException(src);
     }
@@ -61,13 +60,12 @@ public class FSDirAttrOp {
     fsd.writeLock();
     try {
       iip = fsd.resolvePathForWrite(pc, src);
-      src = iip.getPath();
       fsd.checkOwner(pc, iip);
-      unprotectedSetPermission(fsd, src, permission);
+      unprotectedSetPermission(fsd, iip, permission);
     } finally {
       fsd.writeUnlock();
     }
-    fsd.getEditLog().logSetPermissions(src, permission);
+    fsd.getEditLog().logSetPermissions(iip.getPath(), permission);
     return fsd.getAuditFileInfo(iip);
   }
 
@@ -82,7 +80,6 @@ public class FSDirAttrOp {
     fsd.writeLock();
     try {
       iip = fsd.resolvePathForWrite(pc, src);
-      src = iip.getPath();
       fsd.checkOwner(pc, iip);
       if (!pc.isSuperUser()) {
         if (username != null && !pc.getUser().equals(username)) {
@@ -92,11 +89,11 @@ public class FSDirAttrOp {
           throw new AccessControlException("User does not belong to " + group);
         }
       }
-      unprotectedSetOwner(fsd, src, username, group);
+      unprotectedSetOwner(fsd, iip, username, group);
     } finally {
       fsd.writeUnlock();
     }
-    fsd.getEditLog().logSetOwner(src, username, group);
+    fsd.getEditLog().logSetOwner(iip.getPath(), username, group);
     return fsd.getAuditFileInfo(iip);
   }
 
@@ -109,20 +106,18 @@ public class FSDirAttrOp {
     fsd.writeLock();
     try {
       iip = fsd.resolvePathForWrite(pc, src);
-      src = iip.getPath();
       // Write access is required to set access and modification times
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
       }
       final INode inode = iip.getLastINode();
       if (inode == null) {
-        throw new FileNotFoundException("File/Directory " + src +
+        throw new FileNotFoundException("File/Directory " + iip.getPath() +
                                             " does not exist.");
       }
-      boolean changed = unprotectedSetTimes(fsd, inode, mtime, atime, true,
-          iip.getLatestSnapshotId());
+      boolean changed = unprotectedSetTimes(fsd, iip, mtime, atime, true);
       if (changed) {
-        fsd.getEditLog().logTimes(src, mtime, atime);
+        fsd.getEditLog().logTimes(iip.getPath(), mtime, atime);
       }
     } finally {
       fsd.writeUnlock();
@@ -139,16 +134,15 @@ public class FSDirAttrOp {
     fsd.writeLock();
     try {
       final INodesInPath iip = fsd.resolvePathForWrite(pc, src);
-      src = iip.getPath();
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
       }
 
-      final BlockInfo[] blocks = unprotectedSetReplication(fsd, src,
+      final BlockInfo[] blocks = unprotectedSetReplication(fsd, iip,
                                                            replication);
       isFile = blocks != null;
       if (isFile) {
-        fsd.getEditLog().logSetReplication(src, replication);
+        fsd.getEditLog().logSetReplication(iip.getPath(), replication);
       }
     } finally {
       fsd.writeUnlock();
@@ -186,15 +180,14 @@ public class FSDirAttrOp {
     INodesInPath iip;
     fsd.writeLock();
     try {
-      src = FSDirectory.resolvePath(src, fsd);
-      iip = fsd.getINodesInPath4Write(src);
+      iip = fsd.resolvePathForWrite(pc, src);
 
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
       }
 
       unprotectedSetStoragePolicy(fsd, bm, iip, policyId);
-      fsd.getEditLog().logSetStoragePolicy(src, policyId);
+      fsd.getEditLog().logSetStoragePolicy(iip.getPath(), policyId);
     } finally {
       fsd.writeUnlock();
     }
@@ -232,11 +225,10 @@ public class FSDirAttrOp {
     fsd.readLock();
     try {
       final INodesInPath iip = fsd.resolvePath(pc, src, false);
-      src = iip.getPath();
       if (fsd.isPermissionEnabled()) {
         fsd.checkTraverse(pc, iip);
       }
-      return INodeFile.valueOf(iip.getLastINode(), src)
+      return INodeFile.valueOf(iip.getLastINode(), iip.getPath())
           .getPreferredBlockSize();
     } finally {
       fsd.readUnlock();
@@ -250,14 +242,16 @@ public class FSDirAttrOp {
    */
   static void setQuota(FSDirectory fsd, String src, long nsQuota, long ssQuota,
       StorageType type) throws IOException {
+    FSPermissionChecker pc = fsd.getPermissionChecker();
     if (fsd.isPermissionEnabled()) {
-      FSPermissionChecker pc = fsd.getPermissionChecker();
       pc.checkSuperuserPrivilege();
     }
 
     fsd.writeLock();
     try {
-      INodeDirectory changed = unprotectedSetQuota(fsd, src, nsQuota, ssQuota, type);
+      INodesInPath iip = fsd.resolvePathForWrite(pc, src);
+      INodeDirectory changed =
+          unprotectedSetQuota(fsd, iip, nsQuota, ssQuota, type);
       if (changed != null) {
         final QuotaCounts q = changed.getQuotaCounts();
         if (type == null) {
@@ -273,58 +267,40 @@ public class FSDirAttrOp {
   }
 
   static void unprotectedSetPermission(
-      FSDirectory fsd, String src, FsPermission permissions)
+      FSDirectory fsd, INodesInPath iip, FsPermission permissions)
       throws FileNotFoundException, UnresolvedLinkException,
              QuotaExceededException, SnapshotAccessControlException {
     assert fsd.hasWriteLock();
-    final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
-    final INode inode = inodesInPath.getLastINode();
-    if (inode == null) {
-      throw new FileNotFoundException("File does not exist: " + src);
-    }
-    int snapshotId = inodesInPath.getLatestSnapshotId();
+    final INode inode = FSDirectory.resolveLastINode(iip);
+    int snapshotId = iip.getLatestSnapshotId();
     inode.setPermission(permissions, snapshotId);
   }
 
   static void unprotectedSetOwner(
-      FSDirectory fsd, String src, String username, String groupname)
+      FSDirectory fsd, INodesInPath iip, String username, String groupname)
       throws FileNotFoundException, UnresolvedLinkException,
       QuotaExceededException, SnapshotAccessControlException {
     assert fsd.hasWriteLock();
-    final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
-    INode inode = inodesInPath.getLastINode();
-    if (inode == null) {
-      throw new FileNotFoundException("File does not exist: " + src);
-    }
+    final INode inode = FSDirectory.resolveLastINode(iip);
     if (username != null) {
-      inode = inode.setUser(username, inodesInPath.getLatestSnapshotId());
+      inode.setUser(username, iip.getLatestSnapshotId());
     }
     if (groupname != null) {
-      inode.setGroup(groupname, inodesInPath.getLatestSnapshotId());
+      inode.setGroup(groupname, iip.getLatestSnapshotId());
     }
   }
 
   static boolean setTimes(
-      FSDirectory fsd, INode inode, long mtime, long atime, boolean force,
-      int latestSnapshotId) throws QuotaExceededException {
+      FSDirectory fsd, INodesInPath iip, long mtime, long atime, boolean force)
+          throws QuotaExceededException {
     fsd.writeLock();
     try {
-      return unprotectedSetTimes(fsd, inode, mtime, atime, force,
-                                 latestSnapshotId);
+      return unprotectedSetTimes(fsd, iip, mtime, atime, force);
     } finally {
       fsd.writeUnlock();
     }
   }
 
-  static boolean unprotectedSetTimes(
-      FSDirectory fsd, String src, long mtime, long atime, boolean force)
-      throws UnresolvedLinkException, QuotaExceededException {
-    assert fsd.hasWriteLock();
-    final INodesInPath i = fsd.getINodesInPath(src, true);
-    return unprotectedSetTimes(fsd, i.getLastINode(), mtime, atime,
-                               force, i.getLatestSnapshotId());
-  }
-
   /**
    * See {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String,
    *     long, long, StorageType)}
@@ -339,7 +315,8 @@ public class FSDirAttrOp {
    * @throws SnapshotAccessControlException if path is in RO snapshot
    */
   static INodeDirectory unprotectedSetQuota(
-      FSDirectory fsd, String src, long nsQuota, long ssQuota, StorageType type)
+      FSDirectory fsd, INodesInPath iip, long nsQuota,
+      long ssQuota, StorageType type)
       throws FileNotFoundException, PathIsNotDirectoryException,
       QuotaExceededException, UnresolvedLinkException,
       SnapshotAccessControlException, UnsupportedActionException {
@@ -363,9 +340,8 @@ public class FSDirAttrOp {
           nsQuota);
     }
 
-    String srcs = FSDirectory.normalizePath(src);
-    final INodesInPath iip = fsd.getINodesInPath4Write(srcs, true);
-    INodeDirectory dirNode = INodeDirectory.valueOf(iip.getLastINode(), srcs);
+    INodeDirectory dirNode =
+        INodeDirectory.valueOf(iip.getLastINode(), iip.getPath());
     if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) {
       throw new IllegalArgumentException("Cannot clear namespace quota on root.");
     } else { // a directory inode
@@ -401,13 +377,12 @@ public class FSDirAttrOp {
   }
 
   static BlockInfo[] unprotectedSetReplication(
-      FSDirectory fsd, String src, short replication)
+      FSDirectory fsd, INodesInPath iip, short replication)
       throws QuotaExceededException, UnresolvedLinkException,
       SnapshotAccessControlException, UnsupportedActionException {
     assert fsd.hasWriteLock();
 
     final BlockManager bm = fsd.getBlockManager();
-    final INodesInPath iip = fsd.getINodesInPath4Write(src, true);
     final INode inode = iip.getLastINode();
     if (inode == null || !inode.isFile() || inode.asFile().isStriped()) {
       // TODO we do not support replication on stripe layout files yet
@@ -438,10 +413,10 @@ public class FSDirAttrOp {
     if (oldBR != -1) {
       if (oldBR > targetReplication) {
         FSDirectory.LOG.info("Decreasing replication from {} to {} for {}",
-                             oldBR, targetReplication, src);
+                             oldBR, targetReplication, iip.getPath());
       } else {
         FSDirectory.LOG.info("Increasing replication from {} to {} for {}",
-                             oldBR, targetReplication, src);
+                             oldBR, targetReplication, iip.getPath());
       }
     }
     return file.getBlocks();
@@ -476,8 +451,7 @@ public class FSDirAttrOp {
       }
       inode.asFile().setStoragePolicyID(policyId, snapshotId);
     } else if (inode.isDirectory()) {
-      setDirStoragePolicy(fsd, inode.asDirectory(), policyId,
-          snapshotId);
+      setDirStoragePolicy(fsd, iip, policyId);
     } else {
       throw new FileNotFoundException(iip.getPath()
           + " is not a file or directory");
@@ -485,8 +459,8 @@ public class FSDirAttrOp {
   }
 
   private static void setDirStoragePolicy(
-      FSDirectory fsd, INodeDirectory inode, byte policyId,
-      int latestSnapshotId) throws IOException {
+      FSDirectory fsd, INodesInPath iip, byte policyId) throws IOException {
+    INode inode = FSDirectory.resolveLastINode(iip);
     List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
     XAttr xAttr = BlockStoragePolicySuite.buildXAttr(policyId);
     List<XAttr> newXAttrs = null;
@@ -501,14 +475,16 @@ public class FSDirAttrOp {
           Arrays.asList(xAttr),
           EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
     }
-    XAttrStorage.updateINodeXAttrs(inode, newXAttrs, latestSnapshotId);
+    XAttrStorage.updateINodeXAttrs(inode, newXAttrs, iip.getLatestSnapshotId());
   }
 
-  private static boolean unprotectedSetTimes(
-      FSDirectory fsd, INode inode, long mtime, long atime, boolean force,
-      int latest) throws QuotaExceededException {
+  static boolean unprotectedSetTimes(
+      FSDirectory fsd, INodesInPath iip, long mtime, long atime, boolean force)
+          throws QuotaExceededException {
     assert fsd.hasWriteLock();
     boolean status = false;
+    INode inode = iip.getLastINode();
+    int latest = iip.getLatestSnapshotId();
     if (mtime != -1) {
       inode = inode.setModificationTime(mtime, latest);
       status = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48b9d5fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 9c72a86..09201cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -521,10 +521,12 @@ public class FSEditLogLoader {
     }
     case OP_SET_REPLICATION: {
       SetReplicationOp setReplicationOp = (SetReplicationOp)op;
+      String src = renameReservedPathsOnUpgrade(
+          setReplicationOp.path, logVersion);
+      INodesInPath iip = fsDir.getINodesInPath4Write(src);
       short replication = fsNamesys.getBlockManager().adjustReplication(
           setReplicationOp.replication);
-      FSDirAttrOp.unprotectedSetReplication(fsDir, renameReservedPathsOnUpgrade(
-          setReplicationOp.path, logVersion), replication);
+      FSDirAttrOp.unprotectedSetReplication(fsDir, iip, replication);
       break;
     }
     case OP_CONCAT_DELETE: {
@@ -589,52 +591,66 @@ public class FSEditLogLoader {
     }
     case OP_SET_PERMISSIONS: {
       SetPermissionsOp setPermissionsOp = (SetPermissionsOp)op;
-      FSDirAttrOp.unprotectedSetPermission(fsDir, renameReservedPathsOnUpgrade(
-          setPermissionsOp.src, logVersion), setPermissionsOp.permissions);
+      final String src =
+          renameReservedPathsOnUpgrade(setPermissionsOp.src, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src);
+      FSDirAttrOp.unprotectedSetPermission(fsDir, iip,
+          setPermissionsOp.permissions);
       break;
     }
     case OP_SET_OWNER: {
       SetOwnerOp setOwnerOp = (SetOwnerOp)op;
-      FSDirAttrOp.unprotectedSetOwner(
-          fsDir, renameReservedPathsOnUpgrade(setOwnerOp.src, logVersion),
+      final String src = renameReservedPathsOnUpgrade(
+          setOwnerOp.src, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src);
+      FSDirAttrOp.unprotectedSetOwner(fsDir, iip,
           setOwnerOp.username, setOwnerOp.groupname);
       break;
     }
     case OP_SET_NS_QUOTA: {
       SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op;
-      FSDirAttrOp.unprotectedSetQuota(
-          fsDir, renameReservedPathsOnUpgrade(setNSQuotaOp.src, logVersion),
+      final String src = renameReservedPathsOnUpgrade(
+          setNSQuotaOp.src, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src);
+      FSDirAttrOp.unprotectedSetQuota(fsDir, iip,
           setNSQuotaOp.nsQuota, HdfsConstants.QUOTA_DONT_SET, null);
       break;
     }
     case OP_CLEAR_NS_QUOTA: {
       ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op;
-      FSDirAttrOp.unprotectedSetQuota(
-          fsDir, renameReservedPathsOnUpgrade(clearNSQuotaOp.src, logVersion),
+      final String src = renameReservedPathsOnUpgrade(
+          clearNSQuotaOp.src, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src);
+      FSDirAttrOp.unprotectedSetQuota(fsDir, iip,
           HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET, null);
       break;
     }
-
-    case OP_SET_QUOTA:
+    case OP_SET_QUOTA: {
       SetQuotaOp setQuotaOp = (SetQuotaOp) op;
-      FSDirAttrOp.unprotectedSetQuota(fsDir,
-          renameReservedPathsOnUpgrade(setQuotaOp.src, logVersion),
+      final String src = renameReservedPathsOnUpgrade(
+          setQuotaOp.src, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src);
+      FSDirAttrOp.unprotectedSetQuota(fsDir, iip,
           setQuotaOp.nsQuota, setQuotaOp.dsQuota, null);
       break;
-
-    case OP_SET_QUOTA_BY_STORAGETYPE:
-        FSEditLogOp.SetQuotaByStorageTypeOp setQuotaByStorageTypeOp =
+    }
+    case OP_SET_QUOTA_BY_STORAGETYPE: {
+      FSEditLogOp.SetQuotaByStorageTypeOp setQuotaByStorageTypeOp =
           (FSEditLogOp.SetQuotaByStorageTypeOp) op;
-        FSDirAttrOp.unprotectedSetQuota(fsDir,
-          renameReservedPathsOnUpgrade(setQuotaByStorageTypeOp.src, logVersion),
+      final String src = renameReservedPathsOnUpgrade(
+          setQuotaByStorageTypeOp.src, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src);
+      FSDirAttrOp.unprotectedSetQuota(fsDir, iip,
           HdfsConstants.QUOTA_DONT_SET, setQuotaByStorageTypeOp.dsQuota,
           setQuotaByStorageTypeOp.type);
-        break;
-
+      break;
+    }
     case OP_TIMES: {
       TimesOp timesOp = (TimesOp)op;
-      FSDirAttrOp.unprotectedSetTimes(
-          fsDir, renameReservedPathsOnUpgrade(timesOp.path, logVersion),
+      final String src = renameReservedPathsOnUpgrade(
+          timesOp.path, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src);
+      FSDirAttrOp.unprotectedSetTimes(fsDir, iip,
           timesOp.mtime, timesOp.atime, true);
       break;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48b9d5fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index fb3b375..34fb8b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1798,8 +1798,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         boolean updateAccessTime = inode != null &&
             now > inode.getAccessTime() + dir.getAccessTimePrecision();
         if (!isInSafeMode() && updateAccessTime) {
-          boolean changed = FSDirAttrOp.setTimes(dir,
-              inode, -1, now, false, iip.getLatestSnapshotId());
+          boolean changed = FSDirAttrOp.setTimes(dir, iip, -1, now, false);
           if (changed) {
             getEditLog().logTimes(src, -1, now);
           }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/52] [abbrv] hadoop git commit: HADOOP-13688. Stop bundling HTML source code in javadoc JARs.

Posted by cn...@apache.org.
HADOOP-13688. Stop bundling HTML source code in javadoc JARs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72a2ae64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72a2ae64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72a2ae64

Branch: refs/heads/HADOOP-13037
Commit: 72a2ae6452e615c66d10829da38737896814e02b
Parents: 2d46c3f
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Oct 6 11:19:38 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Oct 6 11:19:38 2016 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 1 -
 hadoop-project-dist/pom.xml                    | 1 -
 pom.xml                                        | 1 -
 3 files changed, 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72a2ae64/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index df1d63b..0aa5fc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -299,7 +299,6 @@
             </goals>
             <phase>site</phase>
             <configuration>
-              <linksource>true</linksource>
               <quiet>true</quiet>
               <verbose>false</verbose>
               <source>${maven.compile.source}</source>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72a2ae64/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index bf4fac7..e64f173 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -116,7 +116,6 @@
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-javadoc-plugin</artifactId>
         <configuration>
-          <linksource>true</linksource>
           <maxmemory>512m</maxmemory>
           <quiet>true</quiet>
           <verbose>false</verbose>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72a2ae64/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 250f5a1..1a3cd28 100644
--- a/pom.xml
+++ b/pom.xml
@@ -429,7 +429,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
             <id>aggregate</id>
             <configuration>
               <maxmemory>1024m</maxmemory>
-              <linksource>true</linksource>
               <quiet>true</quiet>
               <verbose>false</verbose>
               <source>${maven.compile.source}</source>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/52] [abbrv] hadoop git commit: HADOOP-13689. Do not attach javadoc and sources jars during non-dist build.

Posted by cn...@apache.org.
HADOOP-13689. Do not attach javadoc and sources jars during non-dist build.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf372173
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf372173
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf372173

Branch: refs/heads/HADOOP-13037
Commit: bf372173d0f7cb97b62556cbd199a075254b96e6
Parents: 48b9d5f
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Oct 6 15:08:24 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Oct 6 15:08:24 2016 -0700

----------------------------------------------------------------------
 hadoop-project-dist/pom.xml | 16 ----------------
 1 file changed, 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf372173/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index e64f173..4423d94 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -88,22 +88,6 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-source-plugin</artifactId>
-        <executions>
-          <execution>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>jar</goal>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <attach>true</attach>
-        </configuration>
-      </plugin>
-      <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>findbugs-maven-plugin</artifactId>
         <configuration>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/52] [abbrv] hadoop git commit: HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.

Posted by cn...@apache.org.
HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3565c9af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3565c9af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3565c9af

Branch: refs/heads/HADOOP-13037
Commit: 3565c9af17ab05bf9e7f68b71b6c6850df772bb9
Parents: 69620f95
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Oct 7 14:14:47 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Oct 7 14:15:59 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirDeleteOp.java     | 63 ++++++++++----------
 .../hdfs/server/namenode/FSEditLogLoader.java   | 11 ++--
 .../hdfs/server/namenode/FSNamesystem.java      |  2 +-
 3 files changed, 38 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3565c9af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 21ee3ce..328ce79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -55,7 +55,7 @@ class FSDirDeleteOp {
     FSNamesystem fsn = fsd.getFSNamesystem();
     fsd.writeLock();
     try {
-      if (deleteAllowed(iip, iip.getPath()) ) {
+      if (deleteAllowed(iip)) {
         List<INodeDirectory> snapshottableDirs = new ArrayList<>();
         FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
         ReclaimContext context = new ReclaimContext(
@@ -98,20 +98,24 @@ class FSDirDeleteOp {
     FSDirectory fsd = fsn.getFSDirectory();
     FSPermissionChecker pc = fsd.getPermissionChecker();
 
-    final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
-    src = iip.getPath();
-    if (!recursive && fsd.isNonEmptyDirectory(iip)) {
-      throw new PathIsNotEmptyDirectoryException(src + " is non empty");
+    if (FSDirectory.isExactReservedName(src)) {
+      throw new InvalidPathException(src);
     }
+
+    final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
     if (fsd.isPermissionEnabled()) {
       fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
                           FsAction.ALL, true);
     }
-    if (recursive && fsd.isNonEmptyDirectory(iip)) {
-      checkProtectedDescendants(fsd, src);
+    if (fsd.isNonEmptyDirectory(iip)) {
+      if (!recursive) {
+        throw new PathIsNotEmptyDirectoryException(
+            iip.getPath() + " is non empty");
+      }
+      checkProtectedDescendants(fsd, iip);
     }
 
-    return deleteInternal(fsn, src, iip, logRetryCache);
+    return deleteInternal(fsn, iip, logRetryCache);
   }
 
   /**
@@ -126,17 +130,14 @@ class FSDirDeleteOp {
    * @param src a string representation of a path to an inode
    * @param mtime the time the inode is removed
    */
-  static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
+  static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime)
       throws IOException {
     assert fsd.hasWriteLock();
     FSNamesystem fsn = fsd.getFSNamesystem();
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     List<INode> removedINodes = new ChunkedArrayList<>();
     List<Long> removedUCFiles = new ChunkedArrayList<>();
-
-    final INodesInPath iip = fsd.getINodesInPath4Write(
-        FSDirectory.normalizePath(src), false);
-    if (!deleteAllowed(iip, src)) {
+    if (!deleteAllowed(iip)) {
       return;
     }
     List<INodeDirectory> snapshottableDirs = new ArrayList<>();
@@ -162,7 +163,6 @@ class FSDirDeleteOp {
    * <p>
    * For small directory or file the deletion is done in one shot.
    * @param fsn namespace
-   * @param src path name to be deleted
    * @param iip the INodesInPath instance containing all the INodes for the path
    * @param logRetryCache whether to record RPC ids in editlog for retry cache
    *          rebuilding
@@ -170,15 +170,11 @@ class FSDirDeleteOp {
    * @throws IOException
    */
   static BlocksMapUpdateInfo deleteInternal(
-      FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache)
+      FSNamesystem fsn, INodesInPath iip, boolean logRetryCache)
       throws IOException {
     assert fsn.hasWriteLock();
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
-    }
-
-    if (FSDirectory.isExactReservedName(src)) {
-      throw new InvalidPathException(src);
+      NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath());
     }
 
     FSDirectory fsd = fsn.getFSDirectory();
@@ -193,14 +189,14 @@ class FSDirDeleteOp {
     if (filesRemoved < 0) {
       return null;
     }
-    fsd.getEditLog().logDelete(src, mtime, logRetryCache);
+    fsd.getEditLog().logDelete(iip.getPath(), mtime, logRetryCache);
     incrDeletedFileCount(filesRemoved);
 
     fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, true);
 
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
-                                        + src +" is removed");
+      NameNode.stateChangeLog.debug(
+          "DIR* Namesystem.delete: " + iip.getPath() +" is removed");
     }
     return collectedBlocks;
   }
@@ -209,19 +205,18 @@ class FSDirDeleteOp {
     NameNode.getNameNodeMetrics().incrFilesDeleted(count);
   }
 
-  private static boolean deleteAllowed(final INodesInPath iip,
-      final String src) {
+  private static boolean deleteAllowed(final INodesInPath iip) {
     if (iip.length() < 1 || iip.getLastINode() == null) {
       if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug(
             "DIR* FSDirectory.unprotectedDelete: failed to remove "
-                + src + " because it does not exist");
+                + iip.getPath() + " because it does not exist");
       }
       return false;
     } else if (iip.length() == 1) { // src is the root
       NameNode.stateChangeLog.warn(
-          "DIR* FSDirectory.unprotectedDelete: failed to remove " + src +
-              " because the root is not allowed to be deleted");
+          "DIR* FSDirectory.unprotectedDelete: failed to remove " +
+              iip.getPath() + " because the root is not allowed to be deleted");
       return false;
     }
     return true;
@@ -278,15 +273,19 @@ class FSDirDeleteOp {
    * Throw if the given directory has any non-empty protected descendants
    * (including itself).
    *
-   * @param src directory whose descendants are to be checked. The caller
-   *            must ensure src is not terminated with {@link Path#SEPARATOR}.
+   * @param iip directory whose descendants are to be checked.
    * @throws AccessControlException if a non-empty protected descendant
    *                                was found.
    */
-  private static void checkProtectedDescendants(FSDirectory fsd, String src)
-      throws AccessControlException, UnresolvedLinkException {
+  private static void checkProtectedDescendants(
+      FSDirectory fsd, INodesInPath iip)
+          throws AccessControlException, UnresolvedLinkException {
     final SortedSet<String> protectedDirs = fsd.getProtectedDirectories();
+    if (protectedDirs.isEmpty()) {
+      return;
+    }
 
+    String src = iip.getPath();
     // Is src protected? Caller has already checked it is non-empty.
     if (protectedDirs.contains(src)) {
       throw new AccessControlException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3565c9af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 09201cf..8abdba8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -356,7 +356,7 @@ public class FSEditLogLoader {
       INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
       if (oldFile != null && addCloseOp.overwrite) {
         // This is OP_ADD with overwrite
-        FSDirDeleteOp.deleteForEditLog(fsDir, path, addCloseOp.mtime);
+        FSDirDeleteOp.deleteForEditLog(fsDir, iip, addCloseOp.mtime);
         iip = INodesInPath.replace(iip, iip.length() - 1, null);
         oldFile = null;
       }
@@ -565,10 +565,11 @@ public class FSEditLogLoader {
     }
     case OP_DELETE: {
       DeleteOp deleteOp = (DeleteOp)op;
-      FSDirDeleteOp.deleteForEditLog(
-          fsDir, renameReservedPathsOnUpgrade(deleteOp.path, logVersion),
-          deleteOp.timestamp);
-      
+      final String src = renameReservedPathsOnUpgrade(
+          deleteOp.path, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src, false);
+      FSDirDeleteOp.deleteForEditLog(fsDir, iip, deleteOp.timestamp);
+
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3565c9af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 34fb8b6..0f4f14c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3805,7 +3805,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas.");
           BlocksMapUpdateInfo toRemoveBlocks =
               FSDirDeleteOp.deleteInternal(
-                  FSNamesystem.this, bc.getName(),
+                  FSNamesystem.this,
                   INodesInPath.fromINode((INodeFile) bc), false);
           changed |= toRemoveBlocks != null;
           if (toRemoveBlocks != null) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/52] [abbrv] hadoop git commit: HADOOP-13701. AbstractContractRootDirectoryTest can fail when handling delete "/". Contributed by Genmao Yu

Posted by cn...@apache.org.
HADOOP-13701. AbstractContractRootDirectoryTest can fail when handling delete "/". Contributed by Genmao Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c31b5e61
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c31b5e61
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c31b5e61

Branch: refs/heads/HADOOP-13037
Commit: c31b5e61b1f09949548116309218a2b3e9c0beda
Parents: a57bba4
Author: Kai Zheng <ka...@intel.com>
Authored: Sat Oct 8 17:35:59 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Sat Oct 8 17:35:59 2016 +0600

----------------------------------------------------------------------
 .../fs/aliyun/oss/AliyunOSSFileSystem.java      | 39 +++++++++++++++++++-
 1 file changed, 38 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c31b5e61/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 81e038d..3b266c8 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
 
@@ -53,6 +54,7 @@ public class AliyunOSSFileSystem extends FileSystem {
   private static final Logger LOG =
       LoggerFactory.getLogger(AliyunOSSFileSystem.class);
   private URI uri;
+  private String bucket;
   private Path workingDir;
   private AliyunOSSFileSystemStore store;
   private int maxKeys;
@@ -124,11 +126,20 @@ public class AliyunOSSFileSystem extends FileSystem {
   private boolean innerDelete(FileStatus status, boolean recursive)
       throws IOException {
     Path f = status.getPath();
+    String p = f.toUri().getPath();
+    FileStatus[] statuses;
+    // indicating root directory "/".
+    if (p.equals("/")) {
+      statuses = listStatus(status.getPath());
+      boolean isEmptyDir = statuses.length <= 0;
+      return rejectRootDirectoryDelete(isEmptyDir, recursive);
+    }
+
     String key = pathToKey(f);
     if (status.isDirectory()) {
       if (!recursive) {
-        FileStatus[] statuses = listStatus(status.getPath());
         // Check whether it is an empty directory or not
+        statuses = listStatus(status.getPath());
         if (statuses.length > 0) {
           throw new IOException("Cannot remove directory " + f +
               ": It is not empty!");
@@ -148,6 +159,31 @@ public class AliyunOSSFileSystem extends FileSystem {
     return true;
   }
 
+  /**
+   * Implements the specific logic to reject root directory deletion.
+   * The caller must return the result of this call, rather than
+   * attempt to continue with the delete operation: deleting root
+   * directories is never allowed. This method simply implements
+   * the policy of when to return an exit code versus raise an exception.
+   * @param isEmptyDir empty directory or not
+   * @param recursive recursive flag from command
+   * @return a return code for the operation
+   * @throws PathIOException if the operation was explicitly rejected.
+   */
+  private boolean rejectRootDirectoryDelete(boolean isEmptyDir,
+      boolean recursive) throws IOException {
+    LOG.info("oss delete the {} root directory of {}", bucket, recursive);
+    if (isEmptyDir) {
+      return true;
+    }
+    if (recursive) {
+      return false;
+    } else {
+      // reject
+      throw new PathIOException(bucket, "Cannot delete root path");
+    }
+  }
+
   private void createFakeDirectoryIfNecessary(Path f) throws IOException {
     String key = pathToKey(f);
     if (StringUtils.isNotEmpty(key) && !exists(f)) {
@@ -226,6 +262,7 @@ public class AliyunOSSFileSystem extends FileSystem {
   public void initialize(URI name, Configuration conf) throws IOException {
     super.initialize(name, conf);
 
+    bucket = name.getHost();
     uri = java.net.URI.create(name.getScheme() + "://" + name.getAuthority());
     workingDir = new Path("/user",
         System.getProperty("user.name")).makeQualified(uri, null);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/52] [abbrv] hadoop git commit: HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by java.io.File. (Virajith Jalaparti via lei)

Posted by cn...@apache.org.
HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by java.io.File. (Virajith Jalaparti via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96b12662
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96b12662
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96b12662

Branch: refs/heads/HADOOP-13037
Commit: 96b12662ea76e3ded4ef13944fc8df206cfb4613
Parents: 0773ffd
Author: Lei Xu <le...@apache.org>
Authored: Mon Oct 10 15:28:19 2016 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Mon Oct 10 15:30:03 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/common/Storage.java      |  22 ++
 .../server/datanode/BlockPoolSliceStorage.java  |  20 +-
 .../hdfs/server/datanode/BlockScanner.java      |   8 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  34 +-
 .../hdfs/server/datanode/DataStorage.java       |  34 +-
 .../hdfs/server/datanode/DirectoryScanner.java  | 320 +------------------
 .../hdfs/server/datanode/DiskBalancer.java      |  25 +-
 .../hdfs/server/datanode/LocalReplica.java      |   2 +-
 .../hdfs/server/datanode/ReplicaInfo.java       |   2 +-
 .../hdfs/server/datanode/StorageLocation.java   |  32 +-
 .../hdfs/server/datanode/VolumeScanner.java     |  27 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   5 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  | 234 +++++++++++++-
 .../impl/FsDatasetAsyncDiskService.java         |  40 ++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 136 ++++----
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 233 ++++++++++++--
 .../fsdataset/impl/FsVolumeImplBuilder.java     |  65 ++++
 .../datanode/fsdataset/impl/FsVolumeList.java   |  44 +--
 .../impl/RamDiskAsyncLazyPersistService.java    |  79 +++--
 .../fsdataset/impl/VolumeFailureInfo.java       |  13 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   2 +-
 .../TestNameNodePrunesMissingStorages.java      |  15 +-
 .../server/datanode/SimulatedFSDataset.java     |  46 ++-
 .../hdfs/server/datanode/TestBlockScanner.java  |   3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java    |  15 +-
 .../datanode/TestDataNodeVolumeFailure.java     |  12 +-
 .../TestDataNodeVolumeFailureReporting.java     |  10 +
 .../server/datanode/TestDirectoryScanner.java   |  76 +++--
 .../hdfs/server/datanode/TestDiskError.java     |   2 +-
 .../extdataset/ExternalDatasetImpl.java         |  10 +-
 .../datanode/extdataset/ExternalVolumeImpl.java |  44 ++-
 .../fsdataset/impl/FsDatasetImplTestUtils.java  |   9 +-
 .../fsdataset/impl/TestFsDatasetImpl.java       |  69 ++--
 .../fsdataset/impl/TestFsVolumeList.java        |  83 +++--
 .../TestDiskBalancerWithMockMover.java          |   4 +-
 35 files changed, 1062 insertions(+), 713 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 9218e9d..e55de35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIOException;
 import org.apache.hadoop.util.ToolRunner;
@@ -269,11 +270,17 @@ public abstract class Storage extends StorageInfo {
 
     private String storageUuid = null;      // Storage directory identifier.
     
+    private final StorageLocation location;
     public StorageDirectory(File dir) {
       // default dirType is null
       this(dir, null, false);
     }
     
+    public StorageDirectory(StorageLocation location) {
+      // default dirType is null
+      this(location.getFile(), null, false, location);
+    }
+
     public StorageDirectory(File dir, StorageDirType dirType) {
       this(dir, dirType, false);
     }
@@ -294,11 +301,22 @@ public abstract class Storage extends StorageInfo {
      *          disables locking on the storage directory, false enables locking
      */
     public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) {
+      this(dir, dirType, isShared, null);
+    }
+
+    public StorageDirectory(File dir, StorageDirType dirType,
+        boolean isShared, StorageLocation location) {
       this.root = dir;
       this.lock = null;
       this.dirType = dirType;
       this.isShared = isShared;
+      this.location = location;
+      assert location == null ||
+          dir.getAbsolutePath().startsWith(
+              location.getFile().getAbsolutePath()):
+            "The storage location and directory should be equal";
     }
+
     
     /**
      * Get root directory of this storage
@@ -861,6 +879,10 @@ public abstract class Storage extends StorageInfo {
       }
       return false;
     }
+
+    public StorageLocation getStorageLocation() {
+      return location;
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index fd89611..e3b6da1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -147,10 +147,10 @@ public class BlockPoolSliceStorage extends Storage {
    * @throws IOException
    */
   private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo,
-      File dataDir, StartupOption startOpt,
+      File dataDir, StorageLocation location, StartupOption startOpt,
       List<Callable<StorageDirectory>> callables, Configuration conf)
           throws IOException {
-    StorageDirectory sd = new StorageDirectory(dataDir, null, true);
+    StorageDirectory sd = new StorageDirectory(dataDir, null, true, location);
     try {
       StorageState curState = sd.analyzeStorage(startOpt, this, true);
       // sd is locked but not opened
@@ -208,9 +208,9 @@ public class BlockPoolSliceStorage extends Storage {
    * @throws IOException on error
    */
   List<StorageDirectory> loadBpStorageDirectories(NamespaceInfo nsInfo,
-      Collection<File> dataDirs, StartupOption startOpt,
-      List<Callable<StorageDirectory>> callables, Configuration conf)
-          throws IOException {
+      Collection<File> dataDirs, StorageLocation location,
+      StartupOption startOpt, List<Callable<StorageDirectory>> callables,
+      Configuration conf) throws IOException {
     List<StorageDirectory> succeedDirs = Lists.newArrayList();
     try {
       for (File dataDir : dataDirs) {
@@ -220,7 +220,7 @@ public class BlockPoolSliceStorage extends Storage {
                   "attempt to load an used block storage: " + dataDir);
         }
         final StorageDirectory sd = loadStorageDirectory(
-            nsInfo, dataDir, startOpt, callables, conf);
+            nsInfo, dataDir, location, startOpt, callables, conf);
         succeedDirs.add(sd);
       }
     } catch (IOException e) {
@@ -244,12 +244,12 @@ public class BlockPoolSliceStorage extends Storage {
    * @throws IOException on error
    */
   List<StorageDirectory> recoverTransitionRead(NamespaceInfo nsInfo,
-      Collection<File> dataDirs, StartupOption startOpt,
-      List<Callable<StorageDirectory>> callables, Configuration conf)
-          throws IOException {
+      Collection<File> dataDirs, StorageLocation location,
+      StartupOption startOpt, List<Callable<StorageDirectory>> callables,
+      Configuration conf) throws IOException {
     LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
     final List<StorageDirectory> loaded = loadBpStorageDirectories(
-        nsInfo, dataDirs, startOpt, callables, conf);
+        nsInfo, dataDirs, location, startOpt, callables, conf);
     for (StorageDirectory sd : loaded) {
       addStorageDir(sd);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 456dcc1..21484fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -201,17 +201,17 @@ public class BlockScanner {
       FsVolumeSpi volume = ref.getVolume();
       if (!isEnabled()) {
         LOG.debug("Not adding volume scanner for {}, because the block " +
-            "scanner is disabled.", volume.getBasePath());
+            "scanner is disabled.", volume);
         return;
       }
       VolumeScanner scanner = scanners.get(volume.getStorageID());
       if (scanner != null) {
         LOG.error("Already have a scanner for volume {}.",
-            volume.getBasePath());
+            volume);
         return;
       }
       LOG.debug("Adding scanner for volume {} (StorageID {})",
-          volume.getBasePath(), volume.getStorageID());
+          volume, volume.getStorageID());
       scanner = new VolumeScanner(conf, datanode, ref);
       scanner.start();
       scanners.put(volume.getStorageID(), scanner);
@@ -245,7 +245,7 @@ public class BlockScanner {
       return;
     }
     LOG.info("Removing scanner for volume {} (StorageID {})",
-        volume.getBasePath(), volume.getStorageID());
+        volume, volume.getStorageID());
     scanner.shutdown();
     scanners.remove(volume.getStorageID());
     Uninterruptibles.joinUninterruptibly(scanner, 5, TimeUnit.MINUTES);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index dd7e426..cb8e308 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -58,7 +58,6 @@ import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
-import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -78,7 +77,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -791,11 +789,7 @@ public class DataNode extends ReconfigurableBase
     if (locations.isEmpty()) {
       return;
     }
-    Set<File> volumesToRemove = new HashSet<>();
-    for (StorageLocation loc : locations) {
-      volumesToRemove.add(loc.getFile().getAbsoluteFile());
-    }
-    removeVolumes(volumesToRemove, true);
+    removeVolumes(locations, true);
   }
 
   /**
@@ -814,26 +808,22 @@ public class DataNode extends ReconfigurableBase
    * @throws IOException
    */
   private synchronized void removeVolumes(
-      final Set<File> absoluteVolumePaths, boolean clearFailure)
+      final Collection<StorageLocation> storageLocations, boolean clearFailure)
       throws IOException {
-    for (File vol : absoluteVolumePaths) {
-      Preconditions.checkArgument(vol.isAbsolute());
-    }
-
-    if (absoluteVolumePaths.isEmpty()) {
+    if (storageLocations.isEmpty()) {
       return;
     }
 
     LOG.info(String.format("Deactivating volumes (clear failure=%b): %s",
-        clearFailure, Joiner.on(",").join(absoluteVolumePaths)));
+        clearFailure, Joiner.on(",").join(storageLocations)));
 
     IOException ioe = null;
     // Remove volumes and block infos from FsDataset.
-    data.removeVolumes(absoluteVolumePaths, clearFailure);
+    data.removeVolumes(storageLocations, clearFailure);
 
     // Remove volumes from DataStorage.
     try {
-      storage.removeVolumes(absoluteVolumePaths);
+      storage.removeVolumes(storageLocations);
     } catch (IOException e) {
       ioe = e;
     }
@@ -841,7 +831,7 @@ public class DataNode extends ReconfigurableBase
     // Set configuration and dataDirs to reflect volume changes.
     for (Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext(); ) {
       StorageLocation loc = it.next();
-      if (absoluteVolumePaths.contains(loc.getFile().getAbsoluteFile())) {
+      if (storageLocations.contains(loc)) {
         it.remove();
       }
     }
@@ -3242,18 +3232,18 @@ public class DataNode extends ReconfigurableBase
    * Check the disk error
    */
   private void checkDiskError() {
-    Set<File> unhealthyDataDirs = data.checkDataDir();
-    if (unhealthyDataDirs != null && !unhealthyDataDirs.isEmpty()) {
+    Set<StorageLocation> unhealthyLocations = data.checkDataDir();
+    if (unhealthyLocations != null && !unhealthyLocations.isEmpty()) {
       try {
         // Remove all unhealthy volumes from DataNode.
-        removeVolumes(unhealthyDataDirs, false);
+        removeVolumes(unhealthyLocations, false);
       } catch (IOException e) {
         LOG.warn("Error occurred when removing unhealthy storage dirs: "
             + e.getMessage(), e);
       }
       StringBuilder sb = new StringBuilder("DataNode failed volumes:");
-      for (File dataDir : unhealthyDataDirs) {
-        sb.append(dataDir.getAbsolutePath() + ";");
+      for (StorageLocation location : unhealthyLocations) {
+        sb.append(location + ";");
       }
       handleDiskError(sb.toString());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 7e620c2..7c9bea5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -263,9 +263,10 @@ public class DataStorage extends Storage {
   }
 
   private StorageDirectory loadStorageDirectory(DataNode datanode,
-      NamespaceInfo nsInfo, File dataDir, StartupOption startOpt,
-      List<Callable<StorageDirectory>> callables) throws IOException {
-    StorageDirectory sd = new StorageDirectory(dataDir, null, false);
+      NamespaceInfo nsInfo, File dataDir, StorageLocation location,
+      StartupOption startOpt, List<Callable<StorageDirectory>> callables)
+          throws IOException {
+    StorageDirectory sd = new StorageDirectory(dataDir, null, false, location);
     try {
       StorageState curState = sd.analyzeStorage(startOpt, this, true);
       // sd is locked but not opened
@@ -310,7 +311,7 @@ public class DataStorage extends Storage {
    * builder later.
    *
    * @param datanode DataNode object.
-   * @param volume the root path of a storage directory.
+   * @param location the StorageLocation for the storage directory.
    * @param nsInfos an array of namespace infos.
    * @return a VolumeBuilder that holds the metadata of this storage directory
    * and can be added to DataStorage later.
@@ -318,8 +319,10 @@ public class DataStorage extends Storage {
    *
    * Note that if there is IOException, the state of DataStorage is not modified.
    */
-  public VolumeBuilder prepareVolume(DataNode datanode, File volume,
-      List<NamespaceInfo> nsInfos) throws IOException {
+  public VolumeBuilder prepareVolume(DataNode datanode,
+      StorageLocation location, List<NamespaceInfo> nsInfos)
+          throws IOException {
+    File volume = location.getFile();
     if (containsStorageDir(volume)) {
       final String errorMessage = "Storage directory is in use";
       LOG.warn(errorMessage + ".");
@@ -327,7 +330,8 @@ public class DataStorage extends Storage {
     }
 
     StorageDirectory sd = loadStorageDirectory(
-        datanode, nsInfos.get(0), volume, StartupOption.HOTSWAP, null);
+        datanode, nsInfos.get(0), volume, location,
+        StartupOption.HOTSWAP, null);
     VolumeBuilder builder =
         new VolumeBuilder(this, sd);
     for (NamespaceInfo nsInfo : nsInfos) {
@@ -338,7 +342,8 @@ public class DataStorage extends Storage {
 
       final BlockPoolSliceStorage bpStorage = getBlockPoolSliceStorage(nsInfo);
       final List<StorageDirectory> dirs = bpStorage.loadBpStorageDirectories(
-          nsInfo, bpDataDirs, StartupOption.HOTSWAP, null, datanode.getConf());
+          nsInfo, bpDataDirs, location, StartupOption.HOTSWAP,
+          null, datanode.getConf());
       builder.addBpStorageDirectories(nsInfo.getBlockPoolID(), dirs);
     }
     return builder;
@@ -407,7 +412,7 @@ public class DataStorage extends Storage {
           final List<Callable<StorageDirectory>> callables
               = Lists.newArrayList();
           final StorageDirectory sd = loadStorageDirectory(
-              datanode, nsInfo, root, startOpt, callables);
+              datanode, nsInfo, root, dataDir, startOpt, callables);
           if (callables.isEmpty()) {
             addStorageDir(sd);
             success.add(dataDir);
@@ -458,7 +463,8 @@ public class DataStorage extends Storage {
 
         final List<Callable<StorageDirectory>> callables = Lists.newArrayList();
         final List<StorageDirectory> dirs = bpStorage.recoverTransitionRead(
-            nsInfo, bpDataDirs, startOpt, callables, datanode.getConf());
+            nsInfo, bpDataDirs, dataDir, startOpt,
+            callables, datanode.getConf());
         if (callables.isEmpty()) {
           for(StorageDirectory sd : dirs) {
             success.add(sd);
@@ -498,9 +504,10 @@ public class DataStorage extends Storage {
    * @param dirsToRemove a set of storage directories to be removed.
    * @throws IOException if I/O error when unlocking storage directory.
    */
-  synchronized void removeVolumes(final Set<File> dirsToRemove)
+  synchronized void removeVolumes(
+      final Collection<StorageLocation> storageLocations)
       throws IOException {
-    if (dirsToRemove.isEmpty()) {
+    if (storageLocations.isEmpty()) {
       return;
     }
 
@@ -508,7 +515,8 @@ public class DataStorage extends Storage {
     for (Iterator<StorageDirectory> it = this.storageDirs.iterator();
          it.hasNext(); ) {
       StorageDirectory sd = it.next();
-      if (dirsToRemove.contains(sd.getRoot())) {
+      StorageLocation sdLocation = sd.getStorageLocation();
+      if (storageLocations.contains(sdLocation)) {
         // Remove the block pool level storage first.
         for (Map.Entry<String, BlockPoolSliceStorage> entry :
             this.bpStorageMap.entrySet()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index c50bfaf..58071dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -22,7 +22,6 @@ import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -37,9 +36,6 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -47,10 +43,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.Time;
@@ -209,200 +204,6 @@ public class DirectoryScanner implements Runnable {
     }
   }
 
-  /**
-   * Tracks the files and other information related to a block on the disk
-   * Missing file is indicated by setting the corresponding member
-   * to null.
-   * 
-   * Because millions of these structures may be created, we try to save
-   * memory here.  So instead of storing full paths, we store path suffixes.
-   * The block file, if it exists, will have a path like this:
-   * <volume_base_path>/<block_path>
-   * So we don't need to store the volume path, since we already know what the
-   * volume is.
-   * 
-   * The metadata file, if it exists, will have a path like this:
-   * <volume_base_path>/<block_path>_<genstamp>.meta
-   * So if we have a block file, there isn't any need to store the block path
-   * again.
-   * 
-   * The accessor functions take care of these manipulations.
-   */
-  static class ScanInfo implements Comparable<ScanInfo> {
-    private final long blockId;
-    
-    /**
-     * The block file path, relative to the volume's base directory.
-     * If there was no block file found, this may be null. If 'vol'
-     * is null, then this is the full path of the block file.
-     */
-    private final String blockSuffix;
-    
-    /**
-     * The suffix of the meta file path relative to the block file.
-     * If blockSuffix is null, then this will be the entire path relative
-     * to the volume base directory, or an absolute path if vol is also
-     * null.
-     */
-    private final String metaSuffix;
-
-    private final FsVolumeSpi volume;
-
-    /**
-     * Get the file's length in async block scan
-     */
-    private final long blockFileLength;
-
-    private final static Pattern CONDENSED_PATH_REGEX =
-        Pattern.compile("(?<!^)(\\\\|/){2,}");
-    
-    private final static String QUOTED_FILE_SEPARATOR = 
-        Matcher.quoteReplacement(File.separator);
-    
-    /**
-     * Get the most condensed version of the path.
-     *
-     * For example, the condensed version of /foo//bar is /foo/bar
-     * Unlike {@link File#getCanonicalPath()}, this will never perform I/O
-     * on the filesystem.
-     *
-     * @param path the path to condense
-     * @return the condensed path
-     */
-    private static String getCondensedPath(String path) {
-      return CONDENSED_PATH_REGEX.matcher(path).
-          replaceAll(QUOTED_FILE_SEPARATOR);
-    }
-
-    /**
-     * Get a path suffix.
-     *
-     * @param f            The file to get the suffix for.
-     * @param prefix       The prefix we're stripping off.
-     *
-     * @return             A suffix such that prefix + suffix = path to f
-     */
-    private static String getSuffix(File f, String prefix) {
-      String fullPath = getCondensedPath(f.getAbsolutePath());
-      if (fullPath.startsWith(prefix)) {
-        return fullPath.substring(prefix.length());
-      }
-      throw new RuntimeException(prefix + " is not a prefix of " + fullPath);
-    }
-
-    /**
-     * Create a ScanInfo object for a block. This constructor will examine
-     * the block data and meta-data files.
-     *
-     * @param blockId the block ID
-     * @param blockFile the path to the block data file
-     * @param metaFile the path to the block meta-data file
-     * @param vol the volume that contains the block
-     */
-    ScanInfo(long blockId, File blockFile, File metaFile, FsVolumeSpi vol) {
-      this.blockId = blockId;
-      String condensedVolPath = vol == null ? null :
-        getCondensedPath(vol.getBasePath());
-      this.blockSuffix = blockFile == null ? null :
-        getSuffix(blockFile, condensedVolPath);
-      this.blockFileLength = (blockFile != null) ? blockFile.length() : 0; 
-      if (metaFile == null) {
-        this.metaSuffix = null;
-      } else if (blockFile == null) {
-        this.metaSuffix = getSuffix(metaFile, condensedVolPath);
-      } else {
-        this.metaSuffix = getSuffix(metaFile,
-            condensedVolPath + blockSuffix);
-      }
-      this.volume = vol;
-    }
-
-    /**
-     * Returns the block data file.
-     *
-     * @return the block data file
-     */
-    File getBlockFile() {
-      return (blockSuffix == null) ? null :
-        new File(volume.getBasePath(), blockSuffix);
-    }
-
-    /**
-     * Return the length of the data block. The length returned is the length
-     * cached when this object was created.
-     *
-     * @return the length of the data block
-     */
-    long getBlockFileLength() {
-      return blockFileLength;
-    }
-
-    /**
-     * Returns the block meta data file or null if there isn't one.
-     *
-     * @return the block meta data file
-     */
-    File getMetaFile() {
-      if (metaSuffix == null) {
-        return null;
-      } else if (blockSuffix == null) {
-        return new File(volume.getBasePath(), metaSuffix);
-      } else {
-        return new File(volume.getBasePath(), blockSuffix + metaSuffix);
-      }
-    }
-
-    /**
-     * Returns the block ID.
-     *
-     * @return the block ID
-     */
-    long getBlockId() {
-      return blockId;
-    }
-
-    /**
-     * Returns the volume that contains the block that this object describes.
-     *
-     * @return the volume
-     */
-    FsVolumeSpi getVolume() {
-      return volume;
-    }
-
-    @Override // Comparable
-    public int compareTo(ScanInfo b) {
-      if (blockId < b.blockId) {
-        return -1;
-      } else if (blockId == b.blockId) {
-        return 0;
-      } else {
-        return 1;
-      }
-    }
-
-    @Override // Object
-    public boolean equals(Object o) {
-      if (this == o) {
-        return true;
-      }
-      if (!(o instanceof ScanInfo)) {
-        return false;
-      }
-      return blockId == ((ScanInfo) o).blockId;
-    }
-
-    @Override // Object
-    public int hashCode() {
-      return (int)(blockId^(blockId>>>32));
-    }
-
-    public long getGenStamp() {
-      return metaSuffix != null ? Block.getGenerationStamp(
-          getMetaFile().getName()) : 
-            HdfsConstants.GRANDFATHER_GENERATION_STAMP;
-    }
-  }
 
   /**
    * Create a new directory scanner, but don't cycle it running yet.
@@ -644,7 +445,7 @@ public class DirectoryScanner implements Runnable {
             // There may be multiple on-disk records for the same block, don't increment
             // the memory record pointer if so.
             ScanInfo nextInfo = blockpoolReport[Math.min(d, blockpoolReport.length - 1)];
-            if (nextInfo.getBlockId() != info.blockId) {
+            if (nextInfo.getBlockId() != info.getBlockId()) {
               ++m;
             }
           } else {
@@ -763,19 +564,6 @@ public class DirectoryScanner implements Runnable {
   }
 
   /**
-   * Helper method to determine if a file name is consistent with a block.
-   * meta-data file
-   *
-   * @param blockId the block ID
-   * @param metaFile the file to check
-   * @return whether the file name is a block meta-data file name
-   */
-  private static boolean isBlockMetaFile(String blockId, String metaFile) {
-    return metaFile.startsWith(blockId)
-        && metaFile.endsWith(Block.METADATA_EXTENSION);
-  }
-
-  /**
    * The ReportCompiler class encapsulates the process of searching a datanode's
    * disks for block information.  It operates by performing a DFS of the
    * volume to discover block information.
@@ -784,7 +572,7 @@ public class DirectoryScanner implements Runnable {
    * ScanInfo object for it and adds that object to its report list.  The report
    * list is returned by the {@link #call()} method.
    */
-  private class ReportCompiler implements Callable<ScanInfoPerBlockPool> {
+  public class ReportCompiler implements Callable<ScanInfoPerBlockPool> {
     private final FsVolumeSpi volume;
     private final DataNode datanode;
     // Variable for tracking time spent running for throttling purposes
@@ -816,14 +604,12 @@ public class DirectoryScanner implements Runnable {
       ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
       for (String bpid : bpList) {
         LinkedList<ScanInfo> report = new LinkedList<>();
-        File bpFinalizedDir = volume.getFinalizedDir(bpid);
 
         perfTimer.start();
         throttleTimer.start();
 
         try {
-          result.put(bpid,
-              compileReport(volume, bpFinalizedDir, bpFinalizedDir, report));
+          result.put(bpid, volume.compileReport(bpid, report, this));
         } catch (InterruptedException ex) {
           // Exit quickly and flag the scanner to do the same
           result = null;
@@ -834,106 +620,12 @@ public class DirectoryScanner implements Runnable {
     }
 
     /**
-     * Compile a list of {@link ScanInfo} for the blocks in the directory
-     * given by {@code dir}.
-     *
-     * @param vol the volume that contains the directory to scan
-     * @param bpFinalizedDir the root directory of the directory to scan
-     * @param dir the directory to scan
-     * @param report the list onto which blocks reports are placed
-     */
-    private LinkedList<ScanInfo> compileReport(FsVolumeSpi vol,
-        File bpFinalizedDir, File dir, LinkedList<ScanInfo> report)
-        throws InterruptedException {
-
-      throttle();
-
-      List <String> fileNames;
-      try {
-        fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE);
-      } catch (IOException ioe) {
-        LOG.warn("Exception occured while compiling report: ", ioe);
-        // Initiate a check on disk failure.
-        datanode.checkDiskErrorAsync();
-        // Ignore this directory and proceed.
-        return report;
-      }
-      Collections.sort(fileNames);
-
-      /*
-       * Assumption: In the sorted list of files block file appears immediately
-       * before block metadata file. This is true for the current naming
-       * convention for block file blk_<blockid> and meta file
-       * blk_<blockid>_<genstamp>.meta
-       */
-      for (int i = 0; i < fileNames.size(); i++) {
-        // Make sure this thread can make a timely exit. With a low throttle
-        // rate, completing a run can take a looooong time.
-        if (Thread.interrupted()) {
-          throw new InterruptedException();
-        }
-
-        File file = new File(dir, fileNames.get(i));
-        if (file.isDirectory()) {
-          compileReport(vol, bpFinalizedDir, file, report);
-          continue;
-        }
-        if (!Block.isBlockFilename(file)) {
-          if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, file.getName())) {
-            long blockId = Block.getBlockId(file.getName());
-            verifyFileLocation(file.getParentFile(), bpFinalizedDir,
-                blockId);
-            report.add(new ScanInfo(blockId, null, file, vol));
-          }
-          continue;
-        }
-        File blockFile = file;
-        long blockId = Block.filename2id(file.getName());
-        File metaFile = null;
-
-        // Skip all the files that start with block name until
-        // getting to the metafile for the block
-        while (i + 1 < fileNames.size()) {
-          File blkMetaFile = new File(dir, fileNames.get(i + 1));
-          if (!(blkMetaFile.isFile()
-              && blkMetaFile.getName().startsWith(blockFile.getName()))) {
-            break;
-          }
-          i++;
-          if (isBlockMetaFile(blockFile.getName(), blkMetaFile.getName())) {
-            metaFile = blkMetaFile;
-            break;
-          }
-        }
-        verifyFileLocation(blockFile, bpFinalizedDir, blockId);
-        report.add(new ScanInfo(blockId, blockFile, metaFile, vol));
-      }
-      return report;
-    }
-
-    /**
-     * Verify whether the actual directory location of block file has the
-     * expected directory path computed using its block ID.
-     */
-    private void verifyFileLocation(File actualBlockFile,
-        File bpFinalizedDir, long blockId) {
-      File expectedBlockDir =
-          DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
-      File actualBlockDir = actualBlockFile.getParentFile();
-      if (actualBlockDir.compareTo(expectedBlockDir) != 0) {
-        LOG.warn("Block: " + blockId +
-            " found in invalid directory.  Expected directory: " +
-            expectedBlockDir + ".  Actual directory: " + actualBlockDir);
-      }
-    }
-
-    /**
      * Called by the thread before each potential disk scan so that a pause
      * can be optionally inserted to limit the number of scans per second.
      * The limit is controlled by
      * {@link DFSConfigKeys#DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_KEY}.
      */
-    private void throttle() throws InterruptedException {
+    public void throttle() throws InterruptedException {
       accumulateTimeRunning();
 
       if ((throttleLimitMsPerSec < 1000) &&
@@ -963,7 +655,7 @@ public class DirectoryScanner implements Runnable {
     }
   }
 
-  private enum BlockDirFilter implements FilenameFilter {
+  public enum BlockDirFilter implements FilenameFilter {
     INSTANCE;
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index e7e9105..0c75001 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -500,7 +500,8 @@ public class DiskBalancer {
         references = this.dataset.getFsVolumeReferences();
         for (int ndx = 0; ndx < references.size(); ndx++) {
           FsVolumeSpi vol = references.get(ndx);
-          storageIDToVolBasePathMap.put(vol.getStorageID(), vol.getBasePath());
+          storageIDToVolBasePathMap.put(vol.getStorageID(),
+              vol.getBaseURI().getPath());
         }
         references.close();
       }
@@ -1023,7 +1024,7 @@ public class DiskBalancer {
         openPoolIters(source, poolIters);
         if (poolIters.size() == 0) {
           LOG.error("No block pools found on volume. volume : {}. Exiting.",
-              source.getBasePath());
+              source.getBaseURI());
           return;
         }
 
@@ -1033,17 +1034,16 @@ public class DiskBalancer {
             // Check for the max error count constraint.
             if (item.getErrorCount() > getMaxError(item)) {
               LOG.error("Exceeded the max error count. source {}, dest: {} " +
-                      "error count: {}", source.getBasePath(),
-                  dest.getBasePath(), item.getErrorCount());
-              this.setExitFlag();
-              continue;
+                      "error count: {}", source.getBaseURI(),
+                  dest.getBaseURI(), item.getErrorCount());
+              break;
             }
 
             // Check for the block tolerance constraint.
             if (isCloseEnough(item)) {
               LOG.info("Copy from {} to {} done. copied {} bytes and {} " +
                       "blocks.",
-                  source.getBasePath(), dest.getBasePath(),
+                  source.getBaseURI(), dest.getBaseURI(),
                   item.getBytesCopied(), item.getBlocksCopied());
               this.setExitFlag();
               continue;
@@ -1053,7 +1053,7 @@ public class DiskBalancer {
             // we are not able to find any blocks to copy.
             if (block == null) {
               LOG.error("No source blocks, exiting the copy. Source: {}, " +
-                  "Dest:{}", source.getBasePath(), dest.getBasePath());
+                  "Dest:{}", source.getBaseURI(), dest.getBaseURI());
               this.setExitFlag();
               continue;
             }
@@ -1081,14 +1081,13 @@ public class DiskBalancer {
               // exiting here.
               LOG.error("Destination volume: {} does not have enough space to" +
                   " accommodate a block. Block Size: {} Exiting from" +
-                  " copyBlocks.", dest.getBasePath(), block.getNumBytes());
-              this.setExitFlag();
-              continue;
+                  " copyBlocks.", dest.getBaseURI(), block.getNumBytes());
+              break;
             }
 
             LOG.debug("Moved block with size {} from  {} to {}",
-                block.getNumBytes(), source.getBasePath(),
-                dest.getBasePath());
+                block.getNumBytes(), source.getBaseURI(),
+                dest.getBaseURI());
 
             // Check for the max throughput constraint.
             // We sleep here to keep the promise that we will not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
index cbfc9a5..58febf0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
 import org.apache.hadoop.io.IOUtils;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index cbbafc3..dc63238 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -25,8 +25,8 @@ import java.net.URI;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.util.LightWeightResizableGSet;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
index 3162c5c..75abc1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.util.StringUtils;
 
+
 /**
  * Encapsulates the URI and storage medium that together describe a
  * storage directory.
@@ -37,7 +38,7 @@ import org.apache.hadoop.util.StringUtils;
  *
  */
 @InterfaceAudience.Private
-public class StorageLocation {
+public class StorageLocation implements Comparable<StorageLocation>{
   final StorageType storageType;
   final File file;
 
@@ -104,16 +105,37 @@ public class StorageLocation {
 
   @Override
   public boolean equals(Object obj) {
-    if (obj == this) {
-      return true;
-    } else if (obj == null || !(obj instanceof StorageLocation)) {
+    if (obj == null || !(obj instanceof StorageLocation)) {
       return false;
     }
-    return toString().equals(obj.toString());
+    int comp = compareTo((StorageLocation) obj);
+    return comp == 0;
   }
 
   @Override
   public int hashCode() {
     return toString().hashCode();
   }
+
+  @Override
+  public int compareTo(StorageLocation obj) {
+    if (obj == this) {
+      return 0;
+    } else if (obj == null) {
+      return -1;
+    }
+
+    StorageLocation otherStorage = (StorageLocation) obj;
+    if (this.getFile() != null && otherStorage.getFile() != null) {
+      return this.getFile().getAbsolutePath().compareTo(
+          otherStorage.getFile().getAbsolutePath());
+    } else if (this.getFile() == null && otherStorage.getFile() == null) {
+      return this.storageType.compareTo(otherStorage.getStorageType());
+    } else if (this.getFile() == null) {
+      return -1;
+    } else {
+      return 1;
+    }
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 3416b53..1e44fb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -217,7 +217,7 @@ public class VolumeScanner extends Thread {
 
   public void printStats(StringBuilder p) {
     p.append(String.format("Block scanner information for volume %s with base" +
-        " path %s%n", volume.getStorageID(), volume.getBasePath()));
+        " path %s%n", volume.getStorageID(), volume));
     synchronized (stats) {
       p.append(String.format("Bytes verified in last hour       : %57d%n",
           stats.bytesScannedInPastHour));
@@ -253,20 +253,20 @@ public class VolumeScanner extends Thread {
 
     public void setup(VolumeScanner scanner) {
       LOG.trace("Starting VolumeScanner {}",
-          scanner.volume.getBasePath());
+          scanner.volume);
       this.scanner = scanner;
     }
 
     public void handle(ExtendedBlock block, IOException e) {
       FsVolumeSpi volume = scanner.volume;
       if (e == null) {
-        LOG.trace("Successfully scanned {} on {}", block, volume.getBasePath());
+        LOG.trace("Successfully scanned {} on {}", block, volume);
         return;
       }
       // If the block does not exist anymore, then it's not an error.
       if (!volume.getDataset().contains(block)) {
         LOG.debug("Volume {}: block {} is no longer in the dataset.",
-            volume.getBasePath(), block);
+            volume, block);
         return;
       }
       // If the block exists, the exception may due to a race with write:
@@ -278,11 +278,10 @@ public class VolumeScanner extends Thread {
       if (e instanceof FileNotFoundException ) {
         LOG.info("Volume {}: verification failed for {} because of " +
                 "FileNotFoundException.  This may be due to a race with write.",
-            volume.getBasePath(), block);
+            volume, block);
         return;
       }
-      LOG.warn("Reporting bad " + block + " with volume "
-          + volume.getBasePath(), e);
+      LOG.warn("Reporting bad {} on {}", block, volume);
       try {
         scanner.datanode.reportBadBlocks(block, volume);
       } catch (IOException ie) {
@@ -305,7 +304,7 @@ public class VolumeScanner extends Thread {
       handler = new ScanResultHandler();
     }
     this.resultHandler = handler;
-    setName("VolumeScannerThread(" + volume.getBasePath() + ")");
+    setName("VolumeScannerThread(" + volume + ")");
     setDaemon(true);
   }
 
@@ -376,7 +375,7 @@ public class VolumeScanner extends Thread {
       BlockIterator iter = blockIters.get(idx);
       if (!iter.atEnd()) {
         LOG.info("Now scanning bpid {} on volume {}",
-            iter.getBlockPoolId(), volume.getBasePath());
+            iter.getBlockPoolId(), volume);
         curBlockIter = iter;
         return 0L;
       }
@@ -385,7 +384,7 @@ public class VolumeScanner extends Thread {
       if (waitMs <= 0) {
         iter.rewind();
         LOG.info("Now rescanning bpid {} on volume {}, after more than " +
-            "{} hour(s)", iter.getBlockPoolId(), volume.getBasePath(),
+            "{} hour(s)", iter.getBlockPoolId(), volume,
             TimeUnit.HOURS.convert(conf.scanPeriodMs, TimeUnit.MILLISECONDS));
         curBlockIter = iter;
         return 0L;
@@ -416,16 +415,16 @@ public class VolumeScanner extends Thread {
           cblock.getBlockPoolId(), cblock.getBlockId());
       if (b == null) {
         LOG.info("Replica {} was not found in the VolumeMap for volume {}",
-            cblock, volume.getBasePath());
+            cblock, volume);
       } else {
         block = new ExtendedBlock(cblock.getBlockPoolId(), b);
       }
     } catch (FileNotFoundException e) {
       LOG.info("FileNotFoundException while finding block {} on volume {}",
-          cblock, volume.getBasePath());
+          cblock, volume);
     } catch (IOException e) {
       LOG.warn("I/O error while finding block {} on volume {}",
-            cblock, volume.getBasePath());
+            cblock, volume);
     }
     if (block == null) {
       return -1; // block not found.
@@ -642,7 +641,7 @@ public class VolumeScanner extends Thread {
 
   @Override
   public String toString() {
-    return "VolumeScanner(" + volume.getBasePath() +
+    return "VolumeScanner(" + volume +
         ", " + volume.getStorageID() + ")";
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index b75ed5b..f2ffa83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -27,6 +27,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -206,7 +207,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
    * @param clearFailure set true to clear the failure information about the
    *                     volumes.
    */
-  void removeVolumes(Set<File> volumes, boolean clearFailure);
+  void removeVolumes(Collection<StorageLocation> volumes, boolean clearFailure);
 
   /** @return a storage with the given storage ID */
   DatanodeStorage getStorage(final String storageUuid);
@@ -482,7 +483,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
      * Check if all the data directories are healthy
      * @return A set of unhealthy data directories.
      */
-  Set<File> checkDataDir();
+  Set<StorageLocation> checkDataDir();
 
   /**
    * Shutdown the FSDataset

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 9e16121..dbba31d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -20,10 +20,20 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset;
 import java.io.Closeable;
 import java.io.File;
 import java.io.IOException;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
+import java.util.LinkedList;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 
 /**
  * This is an interface for the underlying volume.
@@ -48,14 +58,14 @@ public interface FsVolumeSpi {
   long getAvailable() throws IOException;
 
   /** @return the base path to the volume */
-  String getBasePath();
+  URI getBaseURI();
 
-  /** @return the path to the volume */
-  String getPath(String bpid) throws IOException;
+  DF getUsageStats(Configuration conf);
 
-  /** @return the directory for the finalized blocks in the block pool. */
-  File getFinalizedDir(String bpid) throws IOException;
-  
+  /** @return the {@link StorageLocation} to the volume */
+  StorageLocation getStorageLocation();
+
+  /** @return the {@link StorageType} of the volume */
   StorageType getStorageType();
 
   /** Returns true if the volume is NOT backed by persistent storage. */
@@ -186,4 +196,216 @@ public interface FsVolumeSpi {
    * Get the FSDatasetSpi which this volume is a part of.
    */
   FsDatasetSpi getDataset();
+
+  /**
+   * Tracks the files and other information related to a block on the disk
+   * Missing file is indicated by setting the corresponding member
+   * to null.
+   *
+   * Because millions of these structures may be created, we try to save
+   * memory here.  So instead of storing full paths, we store path suffixes.
+   * The block file, if it exists, will have a path like this:
+   * <volume_base_path>/<block_path>
+   * So we don't need to store the volume path, since we already know what the
+   * volume is.
+   *
+   * The metadata file, if it exists, will have a path like this:
+   * <volume_base_path>/<block_path>_<genstamp>.meta
+   * So if we have a block file, there isn't any need to store the block path
+   * again.
+   *
+   * The accessor functions take care of these manipulations.
+   */
+  public static class ScanInfo implements Comparable<ScanInfo> {
+    private final long blockId;
+
+    /**
+     * The block file path, relative to the volume's base directory.
+     * If there was no block file found, this may be null. If 'vol'
+     * is null, then this is the full path of the block file.
+     */
+    private final String blockSuffix;
+
+    /**
+     * The suffix of the meta file path relative to the block file.
+     * If blockSuffix is null, then this will be the entire path relative
+     * to the volume base directory, or an absolute path if vol is also
+     * null.
+     */
+    private final String metaSuffix;
+
+    private final FsVolumeSpi volume;
+
+    /**
+     * Get the file's length in async block scan
+     */
+    private final long blockFileLength;
+
+    private final static Pattern CONDENSED_PATH_REGEX =
+        Pattern.compile("(?<!^)(\\\\|/){2,}");
+
+    private final static String QUOTED_FILE_SEPARATOR =
+        Matcher.quoteReplacement(File.separator);
+
+    /**
+     * Get the most condensed version of the path.
+     *
+     * For example, the condensed version of /foo//bar is /foo/bar
+     * Unlike {@link File#getCanonicalPath()}, this will never perform I/O
+     * on the filesystem.
+     *
+     * @param path the path to condense
+     * @return the condensed path
+     */
+    private static String getCondensedPath(String path) {
+      return CONDENSED_PATH_REGEX.matcher(path).
+          replaceAll(QUOTED_FILE_SEPARATOR);
+    }
+
+    /**
+     * Get a path suffix.
+     *
+     * @param f            The file to get the suffix for.
+     * @param prefix       The prefix we're stripping off.
+     *
+     * @return             A suffix such that prefix + suffix = path to f
+     */
+    private static String getSuffix(File f, String prefix) {
+      String fullPath = getCondensedPath(f.getAbsolutePath());
+      if (fullPath.startsWith(prefix)) {
+        return fullPath.substring(prefix.length());
+      }
+      throw new RuntimeException(prefix + " is not a prefix of " + fullPath);
+    }
+
+    /**
+     * Create a ScanInfo object for a block. This constructor will examine
+     * the block data and meta-data files.
+     *
+     * @param blockId the block ID
+     * @param blockFile the path to the block data file
+     * @param metaFile the path to the block meta-data file
+     * @param vol the volume that contains the block
+     */
+    public ScanInfo(long blockId, File blockFile, File metaFile,
+        FsVolumeSpi vol) {
+      this.blockId = blockId;
+      String condensedVolPath =
+          (vol == null || vol.getBaseURI() == null) ? null :
+            getCondensedPath(new File(vol.getBaseURI()).getAbsolutePath());
+      this.blockSuffix = blockFile == null ? null :
+        getSuffix(blockFile, condensedVolPath);
+      this.blockFileLength = (blockFile != null) ? blockFile.length() : 0;
+      if (metaFile == null) {
+        this.metaSuffix = null;
+      } else if (blockFile == null) {
+        this.metaSuffix = getSuffix(metaFile, condensedVolPath);
+      } else {
+        this.metaSuffix = getSuffix(metaFile,
+            condensedVolPath + blockSuffix);
+      }
+      this.volume = vol;
+    }
+
+    /**
+     * Returns the block data file.
+     *
+     * @return the block data file
+     */
+    public File getBlockFile() {
+      return (blockSuffix == null) ? null :
+        new File(new File(volume.getBaseURI()).getAbsolutePath(), blockSuffix);
+    }
+
+    /**
+     * Return the length of the data block. The length returned is the length
+     * cached when this object was created.
+     *
+     * @return the length of the data block
+     */
+    public long getBlockFileLength() {
+      return blockFileLength;
+    }
+
+    /**
+     * Returns the block meta data file or null if there isn't one.
+     *
+     * @return the block meta data file
+     */
+    public File getMetaFile() {
+      if (metaSuffix == null) {
+        return null;
+      } else if (blockSuffix == null) {
+        return new File(new File(volume.getBaseURI()).getAbsolutePath(),
+            metaSuffix);
+      } else {
+        return new File(new File(volume.getBaseURI()).getAbsolutePath(),
+            blockSuffix + metaSuffix);
+      }
+    }
+
+    /**
+     * Returns the block ID.
+     *
+     * @return the block ID
+     */
+    public long getBlockId() {
+      return blockId;
+    }
+
+    /**
+     * Returns the volume that contains the block that this object describes.
+     *
+     * @return the volume
+     */
+    public FsVolumeSpi getVolume() {
+      return volume;
+    }
+
+    @Override // Comparable
+    public int compareTo(ScanInfo b) {
+      if (blockId < b.blockId) {
+        return -1;
+      } else if (blockId == b.blockId) {
+        return 0;
+      } else {
+        return 1;
+      }
+    }
+
+    @Override // Object
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (!(o instanceof ScanInfo)) {
+        return false;
+      }
+      return blockId == ((ScanInfo) o).blockId;
+    }
+
+    @Override // Object
+    public int hashCode() {
+      return (int)(blockId^(blockId>>>32));
+    }
+
+    public long getGenStamp() {
+      return metaSuffix != null ? Block.getGenerationStamp(
+          getMetaFile().getName()) :
+            HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+    }
+  }
+
+  /**
+   * Compile a list of {@link ScanInfo} for the blocks in
+   * the block pool with id {@code bpid}.
+   *
+   * @param bpid block pool id to scan
+   * @param report the list onto which blocks reports are placed
+   * @param reportCompiler
+   * @throws IOException
+   */
+  LinkedList<ScanInfo> compileReport(String bpid,
+      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+      throws InterruptedException, IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
index c9160cd..b9c731b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
@@ -71,8 +71,8 @@ class FsDatasetAsyncDiskService {
   private final DataNode datanode;
   private final FsDatasetImpl fsdatasetImpl;
   private final ThreadGroup threadGroup;
-  private Map<File, ThreadPoolExecutor> executors
-      = new HashMap<File, ThreadPoolExecutor>();
+  private Map<String, ThreadPoolExecutor> executors
+      = new HashMap<String, ThreadPoolExecutor>();
   private Map<String, Set<Long>> deletedBlockIds 
       = new HashMap<String, Set<Long>>();
   private static final int MAX_DELETED_BLOCKS = 64;
@@ -91,7 +91,7 @@ class FsDatasetAsyncDiskService {
     this.threadGroup = new ThreadGroup(getClass().getSimpleName());
   }
 
-  private void addExecutorForVolume(final File volume) {
+  private void addExecutorForVolume(final FsVolumeImpl volume) {
     ThreadFactory threadFactory = new ThreadFactory() {
       int counter = 0;
 
@@ -115,18 +115,21 @@ class FsDatasetAsyncDiskService {
 
     // This can reduce the number of running threads
     executor.allowCoreThreadTimeOut(true);
-    executors.put(volume, executor);
+    executors.put(volume.getStorageID(), executor);
   }
 
   /**
    * Starts AsyncDiskService for a new volume
    * @param volume the root of the new data volume.
    */
-  synchronized void addVolume(File volume) {
+  synchronized void addVolume(FsVolumeImpl volume) {
     if (executors == null) {
       throw new RuntimeException("AsyncDiskService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    if (volume == null) {
+      throw new RuntimeException("Attempt to add a null volume");
+    }
+    ThreadPoolExecutor executor = executors.get(volume.getStorageID());
     if (executor != null) {
       throw new RuntimeException("Volume " + volume + " is already existed.");
     }
@@ -137,17 +140,17 @@ class FsDatasetAsyncDiskService {
    * Stops AsyncDiskService for a volume.
    * @param volume the root of the volume.
    */
-  synchronized void removeVolume(File volume) {
+  synchronized void removeVolume(String storageId) {
     if (executors == null) {
       throw new RuntimeException("AsyncDiskService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    ThreadPoolExecutor executor = executors.get(storageId);
     if (executor == null) {
-      throw new RuntimeException("Can not find volume " + volume
-          + " to remove.");
+      throw new RuntimeException("Can not find volume with storageId "
+          + storageId + " to remove.");
     } else {
       executor.shutdown();
-      executors.remove(volume);
+      executors.remove(storageId);
     }
   }
   
@@ -162,13 +165,16 @@ class FsDatasetAsyncDiskService {
   /**
    * Execute the task sometime in the future, using ThreadPools.
    */
-  synchronized void execute(File root, Runnable task) {
+  synchronized void execute(FsVolumeImpl volume, Runnable task) {
     if (executors == null) {
       throw new RuntimeException("AsyncDiskService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(root);
+    if (volume == null) {
+      throw new RuntimeException("A null volume does not have a executor");
+    }
+    ThreadPoolExecutor executor = executors.get(volume.getStorageID());
     if (executor == null) {
-      throw new RuntimeException("Cannot find root " + root
+      throw new RuntimeException("Cannot find volume " + volume
           + " for execution of task " + task);
     } else {
       executor.execute(task);
@@ -185,7 +191,7 @@ class FsDatasetAsyncDiskService {
     } else {
       LOG.info("Shutting down all async disk service threads");
       
-      for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
+      for (Map.Entry<String, ThreadPoolExecutor> e : executors.entrySet()) {
         e.getValue().shutdown();
       }
       // clear the executor map so that calling execute again will fail.
@@ -198,7 +204,7 @@ class FsDatasetAsyncDiskService {
   public void submitSyncFileRangeRequest(FsVolumeImpl volume,
       final FileDescriptor fd, final long offset, final long nbytes,
       final int flags) {
-    execute(volume.getCurrentDir(), new Runnable() {
+    execute(volume, new Runnable() {
       @Override
       public void run() {
         try {
@@ -220,7 +226,7 @@ class FsDatasetAsyncDiskService {
         + " replica " + replicaToDelete + " for deletion");
     ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(
         volumeRef, replicaToDelete, block, trashDirectory);
-    execute(((FsVolumeImpl) volumeRef.getVolume()).getCurrentDir(), deletionTask);
+    execute(((FsVolumeImpl) volumeRef.getVolume()), deletionTask);
   }
   
   /** A task for deleting a block file and its associated meta file, as well

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 26a2e9f..fd747bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -361,20 +361,22 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
       Collection<StorageLocation> dataLocations, DataStorage storage) {
-    Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
+    Set<StorageLocation> failedLocationSet = Sets.newHashSetWithExpectedSize(
         dataLocations.size());
     for (StorageLocation sl: dataLocations) {
-      failedLocationSet.add(sl.getFile().getAbsolutePath());
+      LOG.info("Adding to failedLocationSet " + sl);
+      failedLocationSet.add(sl);
     }
     for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
          it.hasNext(); ) {
       Storage.StorageDirectory sd = it.next();
-      failedLocationSet.remove(sd.getRoot().getAbsolutePath());
+      failedLocationSet.remove(sd.getStorageLocation());
+      LOG.info("Removing from failedLocationSet " + sd.getStorageLocation());
     }
     List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
         failedLocationSet.size());
     long failureDate = Time.now();
-    for (String failedStorageLocation: failedLocationSet) {
+    for (StorageLocation failedStorageLocation: failedLocationSet) {
       volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
           failureDate));
     }
@@ -403,49 +405,55 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           new DatanodeStorage(sd.getStorageUuid(),
               DatanodeStorage.State.NORMAL,
               storageType));
-      asyncDiskService.addVolume(sd.getCurrentDir());
+      asyncDiskService.addVolume((FsVolumeImpl) ref.getVolume());
       volumes.addVolume(ref);
     }
   }
 
   private void addVolume(Collection<StorageLocation> dataLocations,
       Storage.StorageDirectory sd) throws IOException {
-    final File dir = sd.getCurrentDir();
-    final StorageType storageType =
-        getStorageTypeFromLocations(dataLocations, sd.getRoot());
+    final StorageLocation storageLocation = sd.getStorageLocation();
 
     // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
     // nothing needed to be rolled back to make various data structures, e.g.,
     // storageMap and asyncDiskService, consistent.
-    FsVolumeImpl fsVolume = new FsVolumeImpl(
-        this, sd.getStorageUuid(), dir, this.conf, storageType);
+    FsVolumeImpl fsVolume = new FsVolumeImplBuilder()
+                              .setDataset(this)
+                              .setStorageID(sd.getStorageUuid())
+                              .setStorageDirectory(sd)
+                              .setConf(this.conf)
+                              .build();
     FsVolumeReference ref = fsVolume.obtainReference();
     ReplicaMap tempVolumeMap = new ReplicaMap(datasetLock);
     fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);
 
-    activateVolume(tempVolumeMap, sd, storageType, ref);
-    LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
+    activateVolume(tempVolumeMap, sd, storageLocation.getStorageType(), ref);
+    LOG.info("Added volume - " + storageLocation + ", StorageType: " +
+        storageLocation.getStorageType());
   }
 
   @VisibleForTesting
-  public FsVolumeImpl createFsVolume(String storageUuid, File currentDir,
-      StorageType storageType) throws IOException {
-    return new FsVolumeImpl(this, storageUuid, currentDir, conf, storageType);
+  public FsVolumeImpl createFsVolume(String storageUuid,
+      Storage.StorageDirectory sd,
+      final StorageLocation location) throws IOException {
+    return new FsVolumeImplBuilder()
+        .setDataset(this)
+        .setStorageID(storageUuid)
+        .setStorageDirectory(sd)
+        .setConf(conf)
+        .build();
   }
 
   @Override
   public void addVolume(final StorageLocation location,
       final List<NamespaceInfo> nsInfos)
       throws IOException {
-    final File dir = location.getFile();
-
     // Prepare volume in DataStorage
     final DataStorage.VolumeBuilder builder;
     try {
-      builder = dataStorage.prepareVolume(datanode, location.getFile(), nsInfos);
+      builder = dataStorage.prepareVolume(datanode, location, nsInfos);
     } catch (IOException e) {
-      volumes.addVolumeFailureInfo(new VolumeFailureInfo(
-          location.getFile().getAbsolutePath(), Time.now()));
+      volumes.addVolumeFailureInfo(new VolumeFailureInfo(location, Time.now()));
       throw e;
     }
 
@@ -453,7 +461,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
     StorageType storageType = location.getStorageType();
     final FsVolumeImpl fsVolume =
-        createFsVolume(sd.getStorageUuid(), sd.getCurrentDir(), storageType);
+        createFsVolume(sd.getStorageUuid(), sd, location);
     final ReplicaMap tempVolumeMap = new ReplicaMap(new AutoCloseableLock());
     ArrayList<IOException> exceptions = Lists.newArrayList();
 
@@ -482,34 +490,33 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
     builder.build();
     activateVolume(tempVolumeMap, sd, storageType, ref);
-    LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
+    LOG.info("Added volume - " + location + ", StorageType: " + storageType);
   }
 
   /**
    * Removes a set of volumes from FsDataset.
-   * @param volumesToRemove a set of absolute root path of each volume.
+   * @param storageLocationsToRemove a set of
+   * {@link StorageLocation}s for each volume.
    * @param clearFailure set true to clear failure information.
    */
   @Override
-  public void removeVolumes(Set<File> volumesToRemove, boolean clearFailure) {
-    // Make sure that all volumes are absolute path.
-    for (File vol : volumesToRemove) {
-      Preconditions.checkArgument(vol.isAbsolute(),
-          String.format("%s is not absolute path.", vol.getPath()));
-    }
-
+  public void removeVolumes(
+      Collection<StorageLocation> storageLocationsToRemove,
+      boolean clearFailure) {
     Map<String, List<ReplicaInfo>> blkToInvalidate = new HashMap<>();
     List<String> storageToRemove = new ArrayList<>();
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
         Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-        final File absRoot = sd.getRoot().getAbsoluteFile();
-        if (volumesToRemove.contains(absRoot)) {
-          LOG.info("Removing " + absRoot + " from FsDataset.");
-
+        final StorageLocation sdLocation = sd.getStorageLocation();
+        LOG.info("Checking removing StorageLocation " +
+            sdLocation + " with id " + sd.getStorageUuid());
+        if (storageLocationsToRemove.contains(sdLocation)) {
+          LOG.info("Removing StorageLocation " + sdLocation + " with id " +
+              sd.getStorageUuid() + " from FsDataset.");
           // Disable the volume from the service.
-          asyncDiskService.removeVolume(sd.getCurrentDir());
-          volumes.removeVolume(absRoot, clearFailure);
+          asyncDiskService.removeVolume(sd.getStorageUuid());
+          volumes.removeVolume(sdLocation, clearFailure);
           volumes.waitVolumeRemoved(5000, datasetLockCondition);
 
           // Removed all replica information for the blocks on the volume.
@@ -517,12 +524,14 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           // not scan disks.
           for (String bpid : volumeMap.getBlockPoolList()) {
             List<ReplicaInfo> blocks = new ArrayList<>();
-            for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator();
-                 it.hasNext(); ) {
+            for (Iterator<ReplicaInfo> it =
+                  volumeMap.replicas(bpid).iterator(); it.hasNext();) {
               ReplicaInfo block = it.next();
-              final File absBasePath =
-                  new File(block.getVolume().getBasePath()).getAbsoluteFile();
-              if (absBasePath.equals(absRoot)) {
+              final StorageLocation blockStorageLocation =
+                  block.getVolume().getStorageLocation();
+              LOG.info("checking for block " + block.getBlockId() +
+                  " with storageLocation " + blockStorageLocation);
+              if (blockStorageLocation.equals(sdLocation)) {
                 blocks.add(block);
                 it.remove();
               }
@@ -625,7 +634,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     List<String> failedStorageLocations = Lists.newArrayListWithCapacity(
         infos.length);
     for (VolumeFailureInfo info: infos) {
-      failedStorageLocations.add(info.getFailedStorageLocation());
+      failedStorageLocations.add(
+          info.getFailedStorageLocation().getFile().getAbsolutePath());
     }
     return failedStorageLocations.toArray(
         new String[failedStorageLocations.size()]);
@@ -663,7 +673,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     long lastVolumeFailureDate = 0;
     long estimatedCapacityLostTotal = 0;
     for (VolumeFailureInfo info: infos) {
-      failedStorageLocations.add(info.getFailedStorageLocation());
+      failedStorageLocations.add(
+          info.getFailedStorageLocation().getFile().getAbsolutePath());
       long failureDate = info.getFailureDate();
       if (failureDate > lastVolumeFailureDate) {
         lastVolumeFailureDate = failureDate;
@@ -960,25 +971,15 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
     FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
     // Copy files to temp dir first
-    File[] blockFiles = copyBlockFiles(block.getBlockId(),
-        block.getGenerationStamp(), replicaInfo,
-        targetVolume.getTmpDir(block.getBlockPoolId()),
-        replicaInfo.isOnTransientStorage(), smallBufferSize, conf);
-
-    ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.TEMPORARY)
-        .setBlockId(replicaInfo.getBlockId())
-        .setGenerationStamp(replicaInfo.getGenerationStamp())
-        .setFsVolume(targetVolume)
-        .setDirectoryToUse(blockFiles[0].getParentFile())
-        .setBytesToReserve(0)
-        .build();
-    newReplicaInfo.setNumBytes(blockFiles[1].length());
+    ReplicaInfo newReplicaInfo = targetVolume.moveBlockToTmpLocation(block,
+        replicaInfo, smallBufferSize, conf);
+
     // Finalize the copied files
     newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       // Increment numBlocks here as this block moved without knowing to BPS
       FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
-      volume.getBlockPoolSlice(block.getBlockPoolId()).incrNumBlocks();
+      volume.incrNumBlocks(block.getBlockPoolId());
     }
 
     removeOldReplica(replicaInfo, newReplicaInfo, block.getBlockPoolId());
@@ -2072,7 +2073,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    * @return the failed volumes. Returns null if no volume failed.
    */
   @Override // FsDatasetSpi
-  public Set<File> checkDataDir() {
+  public Set<StorageLocation> checkDataDir() {
    return volumes.checkDirs();
   }
     
@@ -2250,9 +2251,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
                     .setFsVolume(vol)
                     .setDirectoryToUse(diskFile.getParentFile())
                     .build();
-              ((FsVolumeImpl) vol).getBlockPoolSlice(bpid)
-                  .resolveDuplicateReplicas(
-                      memBlockInfo, diskBlockInfo, volumeMap);
+              ((FsVolumeImpl) vol).resolveDuplicateReplicas(bpid,
+                  memBlockInfo, diskBlockInfo, volumeMap);
             }
           } else {
             if (!diskFile.delete()) {
@@ -2803,15 +2803,15 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     // Add thread for DISK volume if RamDisk is configured
     if (ramDiskConfigured &&
         asyncLazyPersistService != null &&
-        !asyncLazyPersistService.queryVolume(v.getCurrentDir())) {
-      asyncLazyPersistService.addVolume(v.getCurrentDir());
+        !asyncLazyPersistService.queryVolume(v)) {
+      asyncLazyPersistService.addVolume(v);
     }
 
     // Remove thread for DISK volume if RamDisk is not configured
     if (!ramDiskConfigured &&
         asyncLazyPersistService != null &&
-        asyncLazyPersistService.queryVolume(v.getCurrentDir())) {
-      asyncLazyPersistService.removeVolume(v.getCurrentDir());
+        asyncLazyPersistService.queryVolume(v)) {
+      asyncLazyPersistService.removeVolume(v);
     }
   }
 
@@ -2946,11 +2946,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
           // Move the replica from lazyPersist/ to finalized/ on
           // the target volume
-          BlockPoolSlice bpSlice =
-              replicaState.getLazyPersistVolume().getBlockPoolSlice(bpid);
-
           newReplicaInfo =
-              bpSlice.activateSavedReplica(replicaInfo, replicaState);
+              replicaState.getLazyPersistVolume().activateSavedReplica(bpid,
+                  replicaInfo, replicaState);
 
           // Update the volumeMap entry.
           volumeMap.add(bpid, newReplicaInfo);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/52] [abbrv] hadoop git commit: Merge branch 'HADOOP-12756' into trunk

Posted by cn...@apache.org.
Merge branch 'HADOOP-12756' into trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/669d6f13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/669d6f13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/669d6f13

Branch: refs/heads/HADOOP-13037
Commit: 669d6f13ec48a90d4ba7e4ed1dd0e9687580f8f3
Parents: c874fa9 c31b5e6
Author: Kai Zheng <ka...@intel.com>
Authored: Tue Oct 11 03:22:11 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Tue Oct 11 03:22:11 2016 +0600

----------------------------------------------------------------------
 .gitignore                                      |   2 +
 hadoop-project/pom.xml                          |  22 +
 .../dev-support/findbugs-exclude.xml            |  18 +
 hadoop-tools/hadoop-aliyun/pom.xml              | 154 +++++
 .../aliyun/oss/AliyunCredentialsProvider.java   |  87 +++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java      | 580 +++++++++++++++++++
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 516 +++++++++++++++++
 .../fs/aliyun/oss/AliyunOSSInputStream.java     | 260 +++++++++
 .../fs/aliyun/oss/AliyunOSSOutputStream.java    | 111 ++++
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java    | 167 ++++++
 .../apache/hadoop/fs/aliyun/oss/Constants.java  | 113 ++++
 .../hadoop/fs/aliyun/oss/package-info.java      |  22 +
 .../site/markdown/tools/hadoop-aliyun/index.md  | 294 ++++++++++
 .../fs/aliyun/oss/AliyunOSSTestUtils.java       |  77 +++
 .../fs/aliyun/oss/TestAliyunCredentials.java    |  78 +++
 .../oss/TestAliyunOSSFileSystemContract.java    | 239 ++++++++
 .../oss/TestAliyunOSSFileSystemStore.java       | 125 ++++
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java | 145 +++++
 .../aliyun/oss/TestAliyunOSSOutputStream.java   |  91 +++
 .../aliyun/oss/contract/AliyunOSSContract.java  |  49 ++
 .../contract/TestAliyunOSSContractCreate.java   |  35 ++
 .../contract/TestAliyunOSSContractDelete.java   |  34 ++
 .../contract/TestAliyunOSSContractDistCp.java   |  44 ++
 .../TestAliyunOSSContractGetFileStatus.java     |  35 ++
 .../contract/TestAliyunOSSContractMkdir.java    |  34 ++
 .../oss/contract/TestAliyunOSSContractOpen.java |  34 ++
 .../contract/TestAliyunOSSContractRename.java   |  35 ++
 .../contract/TestAliyunOSSContractRootDir.java  |  69 +++
 .../oss/contract/TestAliyunOSSContractSeek.java |  34 ++
 .../src/test/resources/contract/aliyun-oss.xml  | 115 ++++
 .../src/test/resources/core-site.xml            |  46 ++
 .../src/test/resources/log4j.properties         |  23 +
 hadoop-tools/hadoop-tools-dist/pom.xml          |   6 +
 hadoop-tools/pom.xml                            |   1 +
 34 files changed, 3695 insertions(+)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/52] [abbrv] hadoop git commit: HADOOP-12977 s3a to handle delete("/", true) robustly. Contributed by Steve Loughran.

Posted by cn...@apache.org.
HADOOP-12977 s3a to handle delete("/", true) robustly. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebd4f39a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebd4f39a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebd4f39a

Branch: refs/heads/HADOOP-13037
Commit: ebd4f39a393e5fa9a810c6a36b749549229a53df
Parents: bf37217
Author: Steve Loughran <st...@apache.org>
Authored: Fri Oct 7 12:51:40 2016 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Oct 7 12:51:40 2016 +0100

----------------------------------------------------------------------
 .../src/site/markdown/filesystem/filesystem.md  | 77 +++++++++++++++-----
 .../apache/hadoop/fs/FileContextURIBase.java    |  4 +-
 .../AbstractContractRootDirectoryTest.java      | 34 ++++++++-
 .../hadoop/fs/contract/ContractTestUtils.java   | 39 ++++++++++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 77 ++++++++++++++++----
 5 files changed, 197 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebd4f39a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index 1587842..2c9dd5d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -669,19 +669,40 @@ exists in the metadata, but no copies of any its blocks can be located;
 
 ### `boolean delete(Path p, boolean recursive)`
 
+Delete a path, be it a file, symbolic link or directory. The
+`recursive` flag indicates whether a recursive delete should take place \u2014if
+unset then a non-empty directory cannot be deleted.
+
+Except in the special case of the root directory, if this API call
+completed successfully then there is nothing at the end of the path.
+That is: the outcome is desired. The return flag simply tells the caller
+whether or not any change was made to the state of the filesystem.
+
+*Note*: many uses of this method surround it with checks for the return value being
+false, raising exception if so. For example
+
+```java
+if (!fs.delete(path, true)) throw new IOException("Could not delete " + path);
+```
+
+This pattern is not needed. Code SHOULD just call `delete(path, recursive)` and
+assume the destination is no longer present \u2014except in the special case of root
+directories, which will always remain (see below for special coverage of root directories).
+
 #### Preconditions
 
-A directory with children and recursive == false cannot be deleted
+A directory with children and `recursive == False` cannot be deleted
 
     if isDir(FS, p) and not recursive and (children(FS, p) != {}) : raise IOException
 
+(HDFS raises `PathIsNotEmptyDirectoryException` here.)
 
 #### Postconditions
 
 
 ##### Nonexistent path
 
-If the file does not exist the FS state does not change
+If the file does not exist the filesystem state does not change
 
     if not exists(FS, p):
         FS' = FS
@@ -700,7 +721,7 @@ A path referring to a file is removed, return value: `True`
         result = True
 
 
-##### Empty root directory
+##### Empty root directory, `recursive == False`
 
 Deleting an empty root does not change the filesystem state
 and may return true or false.
@@ -711,7 +732,10 @@ and may return true or false.
 
 There is no consistent return code from an attempt to delete the root directory.
 
-##### Empty (non-root) directory
+Implementations SHOULD return true; this avoids code which checks for a false
+return value from overreacting.
+
+##### Empty (non-root) directory `recursive == False`
 
 Deleting an empty directory that is not root will remove the path from the FS and
 return true.
@@ -721,26 +745,41 @@ return true.
         result = True
 
 
-##### Recursive delete of root directory
+##### Recursive delete of non-empty root directory
 
 Deleting a root path with children and `recursive==True`
  can do one of two things.
 
-The POSIX model assumes that if the user has
+1. The POSIX model assumes that if the user has
 the correct permissions to delete everything,
 they are free to do so (resulting in an empty filesystem).
 
-    if isDir(FS, p) and isRoot(p) and recursive :
-        FS' = ({["/"]}, {}, {}, {})
-        result = True
+        if isDir(FS, p) and isRoot(p) and recursive :
+            FS' = ({["/"]}, {}, {}, {})
+            result = True
 
-In contrast, HDFS never permits the deletion of the root of a filesystem; the
-filesystem can be taken offline and reformatted if an empty
+1. HDFS never permits the deletion of the root of a filesystem; the
+filesystem must be taken offline and reformatted if an empty
 filesystem is desired.
 
-    if isDir(FS, p) and isRoot(p) and recursive :
-        FS' = FS
-        result = False
+        if isDir(FS, p) and isRoot(p) and recursive :
+            FS' = FS
+            result = False
+
+HDFS has the notion of *Protected Directories*, which are declared in
+the option `fs.protected.directories`. Any attempt to delete such a directory
+or a parent thereof raises an `AccessControlException`. Accordingly, any
+attempt to delete the root directory SHALL, if there is a protected directory,
+result in such an exception being raised.
+
+This specification does not recommend any specific action. Do note, however,
+that the POSIX model assumes that there is a permissions model such that normal
+users do not have the permission to delete that root directory; it is an action
+which only system administrators should be able to perform.
+
+Any filesystem client which interacts with a remote filesystem which lacks
+such a security model, MAY reject calls to `delete("/", true)` on the basis
+that it makes it too easy to lose data.
 
 ##### Recursive delete of non-root directory
 
@@ -766,11 +805,11 @@ removes the path and all descendants
 
 #### Implementation Notes
 
-* S3N, Swift, FTP and potentially other non-traditional FileSystems
-implement `delete()` as recursive listing and file delete operation.
-This can break the expectations of client applications -and means that
-they cannot be used as drop-in replacements for HDFS.
-
+* Object Stores and other non-traditional filesystems onto which a directory
+ tree is emulated, tend to implement `delete()` as recursive listing and
+entry-by-entry delete operation.
+This can break the expectations of client applications for O(1) atomic directory
+deletion, preventing the stores' use as drop-in replacements for HDFS.
 
 ### `boolean rename(Path src, Path d)`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebd4f39a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java
index 0a6ba65..a99f762 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java
@@ -77,7 +77,9 @@ public abstract class FileContextURIBase {
   public void tearDown() throws Exception {
     // Clean up after test completion
     // No need to clean fc1 as fc1 and fc2 points same location
-    fc2.delete(BASE, true);
+    if (fc2 != null) {
+      fc2.delete(BASE, true);
+    }
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebd4f39a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
index cf3ede5..0a8f464 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.fs.FileStatus;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.deleteChildren;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.listChildren;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.toList;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk;
 
@@ -62,12 +64,40 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
   }
 
   @Test
-  public void testRmEmptyRootDirNonRecursive() throws Throwable {
+  public void testRmEmptyRootDirRecursive() throws Throwable {
     //extra sanity checks here to avoid support calls about complete loss of data
     skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
     Path root = new Path("/");
     assertIsDirectory(root);
     boolean deleted = getFileSystem().delete(root, true);
+    LOG.info("rm -r / of empty dir result is {}", deleted);
+    assertIsDirectory(root);
+  }
+
+  @Test
+  public void testRmEmptyRootDirNonRecursive() throws Throwable {
+    // extra sanity checks here to avoid support calls about complete loss
+    // of data
+    skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
+    Path root = new Path("/");
+    assertIsDirectory(root);
+    // make sure it is clean
+    FileSystem fs = getFileSystem();
+    deleteChildren(fs, root, true);
+    FileStatus[] children = listChildren(fs, root);
+    if (children.length > 0) {
+      StringBuilder error = new StringBuilder();
+      error.append("Deletion of child entries failed, still have")
+          .append(children.length)
+          .append(System.lineSeparator());
+      for (FileStatus child : children) {
+        error.append("  ").append(child.getPath())
+            .append(System.lineSeparator());
+      }
+      fail(error.toString());
+    }
+    // then try to delete the empty one
+    boolean deleted = fs.delete(root, false);
     LOG.info("rm / of empty dir result is {}", deleted);
     assertIsDirectory(root);
   }
@@ -88,6 +118,8 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
     } catch (IOException e) {
       //expected
       handleExpectedException(e);
+      // and the file must still be present
+      assertIsFile(file);
     } finally {
       getFileSystem().delete(file, false);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebd4f39a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index 0a1ca49..03f47c1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -393,6 +393,45 @@ public class ContractTestUtils extends Assert {
     rejectRootOperation(path, false);
   }
 
+  /**
+   * List then delete the children of a path, but not the path itself.
+   * This can be used to delete the entries under a root path when that
+   * FS does not support {@code delete("/")}.
+   * @param fileSystem filesystem
+   * @param path path to delete
+   * @param recursive flag to indicate child entry deletion should be recursive
+   * @return the number of child entries found and deleted (not including
+   * any recursive children of those entries)
+   * @throws IOException problem in the deletion process.
+   */
+  public static int deleteChildren(FileSystem fileSystem,
+      Path path,
+      boolean recursive) throws IOException {
+    FileStatus[] children = listChildren(fileSystem, path);
+    for (FileStatus entry : children) {
+      fileSystem.delete(entry.getPath(), recursive);
+    }
+    return children.length;
+  }
+
+  /**
+   * List all children of a path, but not the path itself in the case
+   * that the path refers to a file or empty directory.
+   * @param fileSystem FS
+   * @param path path
+   * @return a list of children, and never the path itself.
+   * @throws IOException problem in the list process
+   */
+  public static FileStatus[] listChildren(FileSystem fileSystem,
+      Path path) throws IOException {
+    FileStatus[] entries = fileSystem.listStatus(path);
+    if (entries.length == 1 && path.equals(entries[0].getPath())) {
+      // this is the path: ignore
+      return new FileStatus[]{};
+    } else {
+      return entries;
+    }
+  }
 
   public static void noteAction(String action) {
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebd4f39a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 3e536fb..85d1fc7 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -67,10 +67,13 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.GlobalStorageStatistics;
+import org.apache.hadoop.fs.InvalidRequestException;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -803,13 +806,27 @@ public class S3AFileSystem extends FileSystem {
    * operation statistics.
    * @param key key to blob to delete.
    */
-  private void deleteObject(String key) {
+  private void deleteObject(String key) throws InvalidRequestException {
+    blockRootDelete(key);
     incrementWriteOperations();
     incrementStatistic(OBJECT_DELETE_REQUESTS);
     s3.deleteObject(bucket, key);
   }
 
   /**
+   * Reject any request to delete an object where the key is root.
+   * @param key key to validate
+   * @throws InvalidRequestException if the request was rejected due to
+   * a mistaken attempt to delete the root directory.
+   */
+  private void blockRootDelete(String key) throws InvalidRequestException {
+    if (key.isEmpty() || "/".equals(key)) {
+      throw new InvalidRequestException("Bucket "+ bucket
+          +" cannot be deleted");
+    }
+  }
+
+  /**
    * Perform a bulk object delete operation.
    * Increments the {@code OBJECT_DELETE_REQUESTS} and write
    * operation statistics.
@@ -948,17 +965,24 @@ public class S3AFileSystem extends FileSystem {
   /**
    * A helper method to delete a list of keys on a s3-backend.
    *
-   * @param keysToDelete collection of keys to delete on the s3-backend
+   * @param keysToDelete collection of keys to delete on the s3-backend.
+   *        if empty, no request is made of the object store.
    * @param clearKeys clears the keysToDelete-list after processing the list
    *            when set to true
    * @param deleteFakeDir indicates whether this is for deleting fake dirs
+   * @throws InvalidRequestException if the request was rejected due to
+   * a mistaken attempt to delete the root directory.
    */
   private void removeKeys(List<DeleteObjectsRequest.KeyVersion> keysToDelete,
-      boolean clearKeys, boolean deleteFakeDir) throws AmazonClientException {
+      boolean clearKeys, boolean deleteFakeDir)
+      throws AmazonClientException, InvalidRequestException {
     if (keysToDelete.isEmpty()) {
-      // no keys
+      // exit fast if there are no keys to delete
       return;
     }
+    for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) {
+      blockRootDelete(keyVersion.getKey());
+    }
     if (enableMultiObjectsDelete) {
       deleteObjects(new DeleteObjectsRequest(bucket).withKeys(keysToDelete));
     } else {
@@ -1020,18 +1044,16 @@ public class S3AFileSystem extends FileSystem {
     if (status.isDirectory()) {
       LOG.debug("delete: Path is a directory: {}", f);
 
-      if (!recursive && !status.isEmptyDirectory()) {
-        throw new IOException("Path is a folder: " + f +
-                              " and it is not an empty directory");
-      }
-
       if (!key.endsWith("/")) {
         key = key + "/";
       }
 
       if (key.equals("/")) {
-        LOG.info("s3a cannot delete the root directory");
-        return false;
+        return rejectRootDirectoryDelete(status, recursive);
+      }
+
+      if (!recursive && !status.isEmptyDirectory()) {
+        throw new PathIsNotEmptyDirectoryException(f.toString());
       }
 
       if (status.isEmptyDirectory()) {
@@ -1072,10 +1094,39 @@ public class S3AFileSystem extends FileSystem {
       deleteObject(key);
     }
 
-    createFakeDirectoryIfNecessary(f.getParent());
+    Path parent = f.getParent();
+    if (parent != null) {
+      createFakeDirectoryIfNecessary(parent);
+    }
     return true;
   }
 
+  /**
+   * Implements the specific logic to reject root directory deletion.
+   * The caller must return the result of this call, rather than
+   * attempt to continue with the delete operation: deleting root
+   * directories is never allowed. This method simply implements
+   * the policy of when to return an exit code versus raise an exception.
+   * @param status filesystem status
+   * @param recursive recursive flag from command
+   * @return a return code for the operation
+   * @throws PathIOException if the operation was explicitly rejected.
+   */
+  private boolean rejectRootDirectoryDelete(S3AFileStatus status,
+      boolean recursive) throws IOException {
+    LOG.info("s3a delete the {} root directory of {}", bucket, recursive);
+    boolean emptyRoot = status.isEmptyDirectory();
+    if (emptyRoot) {
+      return true;
+    }
+    if (recursive) {
+      return false;
+    } else {
+      // reject
+      throw new PathIOException(bucket, "Cannot delete root path");
+    }
+  }
+
   private void createFakeDirectoryIfNecessary(Path f)
       throws IOException, AmazonClientException {
     String key = pathToKey(f);
@@ -1551,7 +1602,7 @@ public class S3AFileSystem extends FileSystem {
     }
     try {
       removeKeys(keysToRemove, false, true);
-    } catch(AmazonClientException e) {
+    } catch(AmazonClientException | InvalidRequestException e) {
       instrumentation.errorIgnored();
       if (LOG.isDebugEnabled()) {
         StringBuilder sb = new StringBuilder();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/52] [abbrv] hadoop git commit: HDFS-10991. Export hdfsTruncateFile symbol in libhdfs. Contributed by Surendra Singh Lilhore.

Posted by cn...@apache.org.
HDFS-10991. Export hdfsTruncateFile symbol in libhdfs. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dacd3ec6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dacd3ec6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dacd3ec6

Branch: refs/heads/HADOOP-13037
Commit: dacd3ec66b111be24131957c986f0c748cf9ea26
Parents: 8a09bf7
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Oct 11 15:07:14 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Oct 11 15:07:14 2016 -0700

----------------------------------------------------------------------
 .../src/main/native/libhdfs/include/hdfs/hdfs.h                     | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dacd3ec6/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
index c856928..83c1c59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
@@ -493,6 +493,7 @@ extern  "C" {
      *         complete before proceeding with further file updates.
      *         -1 on error.
      */
+    LIBHDFS_EXTERNAL
     int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength);
 
     /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/52] [abbrv] hadoop git commit: HDFS-10985. o.a.h.ha.TestZKFailoverController should not use fixed time sleep before assertions. Contributed by Mingliang Liu

Posted by cn...@apache.org.
HDFS-10985. o.a.h.ha.TestZKFailoverController should not use fixed time sleep before assertions. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c874fa91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c874fa91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c874fa91

Branch: refs/heads/HADOOP-13037
Commit: c874fa914dfbf07d1731f5e87398607366675879
Parents: b963818
Author: Mingliang Liu <li...@apache.org>
Authored: Fri Oct 7 17:03:08 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Oct 10 13:33:07 2016 -0700

----------------------------------------------------------------------
 .../hadoop/ha/TestZKFailoverController.java     | 34 ++++++++++++--------
 1 file changed, 21 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c874fa91/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
index 164167c..846c8ae 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.*;
 
 import java.security.NoSuchAlgorithmException;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@@ -441,12 +442,16 @@ public class TestZKFailoverController extends ClientBaseWithFixes {
     cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
     cluster.waitForActiveLockHolder(0);
 
-    Thread.sleep(10000); // allow to quiesce
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return cluster.getService(0).fenceCount == 0 &&
+            cluster.getService(1).fenceCount == 0 &&
+            cluster.getService(0).activeTransitionCount == 2 &&
+            cluster.getService(1).activeTransitionCount == 1;
+      }
+    }, 100, 60 * 1000);
 
-    assertEquals(0, cluster.getService(0).fenceCount);
-    assertEquals(0, cluster.getService(1).fenceCount);
-    assertEquals(2, cluster.getService(0).activeTransitionCount);
-    assertEquals(1, cluster.getService(1).activeTransitionCount);
   }
 
   @Test
@@ -590,14 +595,17 @@ public class TestZKFailoverController extends ClientBaseWithFixes {
     cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
     cluster.waitForActiveLockHolder(0);
 
-    Thread.sleep(10000); // allow to quiesce
-
-    assertEquals(0, cluster.getService(0).fenceCount);
-    assertEquals(0, cluster.getService(1).fenceCount);
-    assertEquals(0, cluster.getService(2).fenceCount);
-    assertEquals(2, cluster.getService(0).activeTransitionCount);
-    assertEquals(1, cluster.getService(1).activeTransitionCount);
-    assertEquals(1, cluster.getService(2).activeTransitionCount);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return cluster.getService(0).fenceCount == 0 &&
+            cluster.getService(1).fenceCount == 0 &&
+            cluster.getService(2).fenceCount == 0 &&
+            cluster.getService(0).activeTransitionCount == 2 &&
+            cluster.getService(1).activeTransitionCount == 1 &&
+            cluster.getService(2).activeTransitionCount == 1;
+      }
+    }, 100, 60 * 1000);
   }
 
   private int runFC(DummyHAService target, String ... args) throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/52] [abbrv] hadoop git commit: HADOOP-12579. Deprecate WriteableRPCEngine. Contributed by Wei Zhou

Posted by cn...@apache.org.
HADOOP-12579. Deprecate WriteableRPCEngine. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec0b7071
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec0b7071
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec0b7071

Branch: refs/heads/HADOOP-13037
Commit: ec0b70716c8e6509654a3975d3ca139a0144cc8e
Parents: 4d10621
Author: Kai Zheng <ka...@intel.com>
Authored: Sun Oct 9 15:07:03 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Sun Oct 9 15:07:03 2016 +0600

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java  | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec0b7071/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index a9dbb41..3d6d461 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -46,6 +46,7 @@ import org.apache.htrace.core.Tracer;
 
 /** An RpcEngine implementation for Writable data. */
 @InterfaceStability.Evolving
+@Deprecated
 public class WritableRpcEngine implements RpcEngine {
   private static final Log LOG = LogFactory.getLog(RPC.class);
   
@@ -331,6 +332,7 @@ public class WritableRpcEngine implements RpcEngine {
 
 
   /** An RPC Server. */
+  @Deprecated
   public static class Server extends RPC.Server {
     /** 
      * Construct an RPC server.
@@ -443,7 +445,8 @@ public class WritableRpcEngine implements RpcEngine {
         value = value.substring(0, 55)+"...";
       LOG.info(value);
     }
-    
+
+    @Deprecated
     static class WritableRpcInvoker implements RpcInvoker {
 
      @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/52] [abbrv] hadoop git commit: YARN-5551. Ignore file backed pages from memory computation when smaps is enabled. Contributed by Rajesh Balamohan

Posted by cn...@apache.org.
YARN-5551. Ignore file backed pages from memory computation when smaps is enabled. Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ecb51b85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ecb51b85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ecb51b85

Branch: refs/heads/HADOOP-13037
Commit: ecb51b857ac7faceff981b2b6f22ea1af0d42ab1
Parents: 96b1266
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Oct 11 15:12:43 2016 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Oct 11 15:12:43 2016 +0000

----------------------------------------------------------------------
 .../yarn/util/ProcfsBasedProcessTree.java       | 26 ++++++-----
 .../yarn/util/TestProcfsBasedProcessTree.java   | 46 ++++++++++----------
 2 files changed, 39 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecb51b85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 80d49c3..29bc277 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -406,15 +406,14 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
                 continue;
               }
 
-              total +=
-                  Math.min(info.sharedDirty, info.pss) + info.privateDirty
-                      + info.privateClean;
+              // Account for anonymous to know the amount of
+              // memory reclaimable by killing the process
+              total += info.anonymous;
+
               if (LOG.isDebugEnabled()) {
                 LOG.debug(" total(" + olderThanAge + "): PID : " + p.getPid()
-                    + ", SharedDirty : " + info.sharedDirty + ", PSS : "
-                    + info.pss + ", Private_Dirty : " + info.privateDirty
-                    + ", Private_Clean : " + info.privateClean + ", total : "
-                    + (total * KB_TO_BYTES));
+                    + ", info : " + info.toString()
+                    + ", total : " + (total * KB_TO_BYTES));
               }
             }
           }
@@ -877,6 +876,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
     private int sharedDirty;
     private int privateClean;
     private int privateDirty;
+    private int anonymous;
     private int referenced;
     private String regionName;
     private String permission;
@@ -929,6 +929,10 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
       return referenced;
     }
 
+    public int getAnonymous() {
+      return anonymous;
+    }
+
     public void setMemInfo(String key, String value) {
       MemInfo info = MemInfo.getMemInfoByName(key);
       int val = 0;
@@ -969,6 +973,9 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
       case REFERENCED:
         referenced = val;
         break;
+      case ANONYMOUS:
+        anonymous = val;
+        break;
       default:
         break;
       }
@@ -999,10 +1006,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
         .append(MemInfo.REFERENCED.name + ":" + this.getReferenced())
         .append(" kB\n");
       sb.append("\t")
-        .append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
-        .append(" kB\n");
-      sb.append("\t")
-        .append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
+        .append(MemInfo.ANONYMOUS.name + ":" + this.getAnonymous())
         .append(" kB\n");
       return sb.toString();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecb51b85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index fa4e8c8..841d333 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -369,21 +369,24 @@ public class TestProcfsBasedProcessTree {
       List<ProcessSmapMemoryInfo> memoryMappingList =
           procMemInfo[i].getMemoryInfoList();
       memoryMappingList.add(constructMemoryMappingInfo(
-        "7f56c177c000-7f56c177d000 "
+          "7f56c177c000-7f56c177d000 "
             + "rw-p 00010000 08:02 40371558                   "
             + "/grid/0/jdk1.7.0_25/jre/lib/amd64/libnio.so",
-        new String[] { "4", "4", "25", "4", "25", "15", "10", "4", "0", "0",
-            "0", "4", "4" }));
+            // Format: size, rss, pss, shared_clean, shared_dirty, private_clean
+            // private_dirty, referenced, anon, anon-huge-pages, swap,
+            // kernel_page_size, mmu_page_size
+            new String[] {"4", "4", "25", "4", "25", "15", "10", "4", "10", "0",
+                "0", "4", "4"}));
       memoryMappingList.add(constructMemoryMappingInfo(
-        "7fb09382e000-7fb09382f000 r--s 00003000 " + "08:02 25953545",
-        new String[] { "4", "4", "25", "4", "0", "15", "10", "4", "0", "0",
-            "0", "4", "4" }));
+          "7fb09382e000-7fb09382f000 r--s 00003000 " + "08:02 25953545",
+          new String[] {"4", "4", "25", "4", "0", "15", "10", "4", "10", "0",
+              "0", "4", "4"}));
       memoryMappingList.add(constructMemoryMappingInfo(
-        "7e8790000-7e8b80000 r-xs 00000000 00:00 0", new String[] { "4", "4",
-            "25", "4", "0", "15", "10", "4", "0", "0", "0", "4", "4" }));
+          "7e8790000-7e8b80000 r-xs 00000000 00:00 0", new String[] {"4", "4",
+              "25", "4", "0", "15", "10", "4", "10", "0", "0", "4", "4"}));
       memoryMappingList.add(constructMemoryMappingInfo(
-        "7da677000-7e0dcf000 rw-p 00000000 00:00 0", new String[] { "4", "4",
-            "25", "4", "50", "15", "10", "4", "0", "0", "0", "4", "4" }));
+          "7da677000-7e0dcf000 rw-p 00000000 00:00 0", new String[] {"4", "4",
+              "25", "4", "50", "15", "10", "4", "10", "0", "0", "4", "4"}));
     }
   }
 
@@ -471,13 +474,12 @@ public class TestProcfsBasedProcessTree {
 
       // Check by enabling smaps
       setSmapsInProceTree(processTree, true);
-      // RSS=Min(shared_dirty,PSS)+PrivateClean+PrivateDirty (exclude r-xs,
-      // r--s)
+      // anon (exclude r-xs,r--s)
       Assert.assertEquals("rss memory does not match",
-        (100 * KB_TO_BYTES * 3), processTree.getRssMemorySize());
+          (20 * KB_TO_BYTES * 3), processTree.getRssMemorySize());
       // verify old API
       Assert.assertEquals("rss memory (old API) does not match",
-        (100 * KB_TO_BYTES * 3), processTree.getCumulativeRssmem());
+          (20 * KB_TO_BYTES * 3), processTree.getCumulativeRssmem());
 
       // test the cpu time again to see if it cumulates
       procInfos[0] =
@@ -621,10 +623,10 @@ public class TestProcfsBasedProcessTree {
           cumuRssMem, processTree.getCumulativeRssmem());
       } else {
         Assert.assertEquals("rssmem does not include new process",
-          100 * KB_TO_BYTES * 4, processTree.getRssMemorySize());
+            20 * KB_TO_BYTES * 4, processTree.getRssMemorySize());
         // verify old API
         Assert.assertEquals("rssmem (old API) does not include new process",
-          100 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem());
+            20 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem());
       }
 
       // however processes older than 1 iteration will retain the older value
@@ -650,11 +652,11 @@ public class TestProcfsBasedProcessTree {
       } else {
         Assert.assertEquals(
           "rssmem shouldn't have included new process",
-          100 * KB_TO_BYTES * 3, processTree.getRssMemorySize(1));
+            20 * KB_TO_BYTES * 3, processTree.getRssMemorySize(1));
         // Verify old API
         Assert.assertEquals(
           "rssmem (old API) shouldn't have included new process",
-          100 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(1));
+            20 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(1));
       }
 
       // one more process
@@ -696,11 +698,11 @@ public class TestProcfsBasedProcessTree {
       } else {
         Assert.assertEquals(
           "rssmem shouldn't have included new processes",
-          100 * KB_TO_BYTES * 3, processTree.getRssMemorySize(2));
+            20 * KB_TO_BYTES * 3, processTree.getRssMemorySize(2));
         // Verify old API
         Assert.assertEquals(
           "rssmem (old API) shouldn't have included new processes",
-          100 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(2));
+            20 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(2));
       }
 
       // processes older than 1 iteration should not include new process,
@@ -727,10 +729,10 @@ public class TestProcfsBasedProcessTree {
       } else {
         Assert.assertEquals(
           "rssmem shouldn't have included new processes",
-          100 * KB_TO_BYTES * 4, processTree.getRssMemorySize(1));
+            20 * KB_TO_BYTES * 4, processTree.getRssMemorySize(1));
         Assert.assertEquals(
           "rssmem (old API) shouldn't have included new processes",
-          100 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem(1));
+            20 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem(1));
       }
 
       // no processes older than 3 iterations


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/52] [abbrv] hadoop git commit: HDFS-10965. Add unit test for HDFS command 'dfsadmin -printTopology'. Contributed by Xiaobing Zhou

Posted by cn...@apache.org.
HDFS-10965. Add unit test for HDFS command 'dfsadmin -printTopology'. Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ba7092b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ba7092b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ba7092b

Branch: refs/heads/HADOOP-13037
Commit: 7ba7092bbcbbccfa24b672414d315656e600096c
Parents: b84c489
Author: Mingliang Liu <li...@apache.org>
Authored: Tue Oct 11 16:47:39 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue Oct 11 17:23:54 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 50 ++++++++++++++++++++
 1 file changed, 50 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ba7092b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 94ecb9e..b49f73d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Before;
@@ -364,6 +365,55 @@ public class TestDFSAdmin {
   }
 
   @Test(timeout = 30000)
+  public void testPrintTopology() throws Exception {
+    redirectStream();
+
+    /* init conf */
+    final Configuration dfsConf = new HdfsConfiguration();
+    final File baseDir = new File(
+        PathUtils.getTestDir(getClass()),
+        GenericTestUtils.getMethodName());
+    dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
+
+    final int numDn = 4;
+    final String[] racks = {
+        "/d1/r1", "/d1/r2",
+        "/d2/r1", "/d2/r2"};
+
+    /* init cluster using topology */
+    try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
+        .numDataNodes(numDn).racks(racks).build()) {
+
+      miniCluster.waitActive();
+      assertEquals(numDn, miniCluster.getDataNodes().size());
+      final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+
+      resetStream();
+      final int ret = ToolRunner.run(dfsAdmin, new String[] {"-printTopology"});
+
+      /* collect outputs */
+      final List<String> outs = Lists.newArrayList();
+      scanIntoList(out, outs);
+
+      /* verify results */
+      assertEquals(0, ret);
+      assertEquals(
+          "There should be three lines per Datanode: the 1st line is"
+              + " rack info, 2nd node info, 3rd empty line. The total"
+              + " should be as a result of 3 * numDn.",
+          12, outs.size());
+      assertThat(outs.get(0),
+          is(allOf(containsString("Rack:"), containsString("/d1/r1"))));
+      assertThat(outs.get(3),
+          is(allOf(containsString("Rack:"), containsString("/d1/r2"))));
+      assertThat(outs.get(6),
+          is(allOf(containsString("Rack:"), containsString("/d2/r1"))));
+      assertThat(outs.get(9),
+          is(allOf(containsString("Rack:"), containsString("/d2/r2"))));
+    }
+  }
+
+  @Test(timeout = 30000)
   public void testNameNodeGetReconfigurationStatus() throws IOException,
       InterruptedException, TimeoutException {
     ReconfigurationUtil ru = mock(ReconfigurationUtil.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/52] [abbrv] hadoop git commit: HDFS-10933. Refactor TestFsck. Contributed by Takanobu Asanuma.

Posted by cn...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3059b251/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 4b7eebd..aa41e9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -57,8 +57,11 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import com.google.common.base.Supplier;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
@@ -74,7 +77,6 @@ import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
@@ -116,44 +118,49 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.PatternLayout;
 import org.apache.log4j.RollingFileAppender;
+import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.collect.Sets;
 
 /**
- * A JUnit test for doing fsck
+ * A JUnit test for doing fsck.
  */
 public class TestFsck {
+  private static final Log LOG =
+      LogFactory.getLog(TestFsck.class.getName());
+
   static final String AUDITLOG_FILE =
       GenericTestUtils.getTempPath("TestFsck-audit.log");
   
   // Pattern for: 
   // allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
-  static final Pattern fsckPattern = Pattern.compile(
+  static final Pattern FSCK_PATTERN = Pattern.compile(
       "allowed=.*?\\s" +
       "ugi=.*?\\s" + 
       "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
       "cmd=fsck\\ssrc=\\/\\sdst=null\\s" + 
       "perm=null\\s" + "proto=.*");
-  static final Pattern getfileinfoPattern = Pattern.compile(
+  static final Pattern GET_FILE_INFO_PATTERN = Pattern.compile(
       "allowed=.*?\\s" +
       "ugi=.*?\\s" + 
       "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
       "cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" + 
       "perm=null\\s" + "proto=.*");
 
-  static final Pattern numMissingBlocksPattern = Pattern.compile(
+  static final Pattern NUM_MISSING_BLOCKS_PATTERN = Pattern.compile(
       ".*Missing blocks:\t\t([0123456789]*).*");
 
-  static final Pattern numCorruptBlocksPattern = Pattern.compile(
+  static final Pattern NUM_CORRUPT_BLOCKS_PATTERN = Pattern.compile(
       ".*Corrupt blocks:\t\t([0123456789]*).*");
   
   private static final String LINE_SEPARATOR =
-    System.getProperty("line.separator");
+      System.getProperty("line.separator");
 
   static String runFsck(Configuration conf, int expectedErrCode, 
-                        boolean checkErrorCode,String... path)
+                        boolean checkErrorCode, String... path)
                         throws Exception {
     ByteArrayOutputStream bStream = new ByteArrayOutputStream();
     PrintStream out = new PrintStream(bStream, true);
@@ -163,60 +170,72 @@ public class TestFsck {
       assertEquals(expectedErrCode, errCode);
     }
     GenericTestUtils.setLogLevel(FSPermissionChecker.LOG, Level.INFO);
-    FSImage.LOG.info("OUTPUT = " + bStream.toString());
+    LOG.info("OUTPUT = " + bStream.toString());
     return bStream.toString();
   }
 
-  /** do fsck */
+  private MiniDFSCluster cluster = null;
+  private Configuration conf = null;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new Configuration();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    shutdownCluster();
+  }
+
+  private void shutdownCluster() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /** do fsck. */
   @Test
   public void testFsck() throws Exception {
     DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
         setNumFiles(20).build();
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      final long precision = 1L;
-      conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      fs = cluster.getFileSystem();
-      final String fileName = "/srcdat";
-      util.createFiles(fs, fileName);
-      util.waitReplication(fs, fileName, (short)3);
-      final Path file = new Path(fileName);
-      long aTime = fs.getFileStatus(file).getAccessTime();
-      Thread.sleep(precision);
-      setupAuditLogs();
-      String outStr = runFsck(conf, 0, true, "/");
-      verifyAuditLogs();
-      assertEquals(aTime, fs.getFileStatus(file).getAccessTime());
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      cluster.shutdown();
-      
-      // restart the cluster; bring up namenode but not the data nodes
-      cluster = new MiniDFSCluster.Builder(conf)
-          .numDataNodes(0).format(false).build();
-      outStr = runFsck(conf, 1, true, "/");
-      // expect the result is corrupt
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-      System.out.println(outStr);
-      
-      // bring up data nodes & cleanup cluster
-      cluster.startDataNodes(conf, 4, true, null, null);
-      cluster.waitActive();
-      cluster.waitClusterUp();
-      fs = cluster.getFileSystem();
-      util.cleanup(fs, "/srcdat");
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
-    }
+    final long precision = 1L;
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
+        precision);
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    fs = cluster.getFileSystem();
+    final String fileName = "/srcdat";
+    util.createFiles(fs, fileName);
+    util.waitReplication(fs, fileName, (short)3);
+    final Path file = new Path(fileName);
+    long aTime = fs.getFileStatus(file).getAccessTime();
+    Thread.sleep(precision);
+    setupAuditLogs();
+    String outStr = runFsck(conf, 0, true, "/");
+    verifyAuditLogs();
+    assertEquals(aTime, fs.getFileStatus(file).getAccessTime());
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    shutdownCluster();
+
+    // restart the cluster; bring up namenode but not the data nodes
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(0).format(false).build();
+    outStr = runFsck(conf, 1, true, "/");
+    // expect the result is corrupt
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+    System.out.println(outStr);
+
+    // bring up data nodes & cleanup cluster
+    cluster.startDataNodes(conf, 4, true, null, null);
+    cluster.waitActive();
+    cluster.waitClusterUp();
+    fs = cluster.getFileSystem();
+    util.cleanup(fs, "/srcdat");
   }
 
-  /** Sets up log4j logger for auditlogs */
+  /** Sets up log4j logger for auditlogs. */
   private void setupAuditLogs() throws IOException {
     File file = new File(AUDITLOG_FILE);
     if (file.exists()) {
@@ -247,11 +266,11 @@ public class TestFsck {
         line = reader.readLine();
         assertNotNull(line);
         assertTrue("Expected getfileinfo event not found in audit log",
-            getfileinfoPattern.matcher(line).matches());
+            GET_FILE_INFO_PATTERN.matcher(line).matches());
       }
       line = reader.readLine();
       assertNotNull(line);
-      assertTrue("Expected fsck event not found in audit log", fsckPattern
+      assertTrue("Expected fsck event not found in audit log", FSCK_PATTERN
           .matcher(line).matches());
       assertNull("Unexpected event in audit log", reader.readLine());
     } finally {
@@ -270,175 +289,155 @@ public class TestFsck {
   public void testFsckNonExistent() throws Exception {
     DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
         setNumFiles(20).build();
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      fs = cluster.getFileSystem();
-      util.createFiles(fs, "/srcdat");
-      util.waitReplication(fs, "/srcdat", (short)3);
-      String outStr = runFsck(conf, 0, true, "/non-existent");
-      assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
-      System.out.println(outStr);
-      util.cleanup(fs, "/srcdat");
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
-    }
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    fs = cluster.getFileSystem();
+    util.createFiles(fs, "/srcdat");
+    util.waitReplication(fs, "/srcdat", (short)3);
+    String outStr = runFsck(conf, 0, true, "/non-existent");
+    assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
+    System.out.println(outStr);
+    util.cleanup(fs, "/srcdat");
   }
 
-  /** Test fsck with permission set on inodes */
+  /** Test fsck with permission set on inodes. */
   @Test
   public void testFsckPermission() throws Exception {
     final DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(20).build();
-    final Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
 
-    MiniDFSCluster cluster = null;
-    try {
-      // Create a cluster with the current user, write some files
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      final MiniDFSCluster c2 = cluster;
-      final String dir = "/dfsck";
-      final Path dirpath = new Path(dir);
-      final FileSystem fs = c2.getFileSystem();
-
-      util.createFiles(fs, dir);
-      util.waitReplication(fs, dir, (short) 3);
-      fs.setPermission(dirpath, new FsPermission((short) 0700));
-
-      // run DFSck as another user, should fail with permission issue
-      UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
-          "ProbablyNotARealUserName", new String[] { "ShangriLa" });
-      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws Exception {
-          System.out.println(runFsck(conf, -1, true, dir));
-          return null;
-        }
-      });
-      
-      // set permission and try DFSck again as the fake user, should succeed
-      fs.setPermission(dirpath, new FsPermission((short) 0777));
-      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws Exception {
-          final String outStr = runFsck(conf, 0, true, dir);
-          System.out.println(outStr);
-          assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-          return null;
-        }
-      });
+    // Create a cluster with the current user, write some files
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    final MiniDFSCluster c2 = cluster;
+    final String dir = "/dfsck";
+    final Path dirpath = new Path(dir);
+    final FileSystem fs = c2.getFileSystem();
+
+    util.createFiles(fs, dir);
+    util.waitReplication(fs, dir, (short) 3);
+    fs.setPermission(dirpath, new FsPermission((short) 0700));
+
+    // run DFSck as another user, should fail with permission issue
+    UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
+        "ProbablyNotARealUserName", new String[] {"ShangriLa"});
+    fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        System.out.println(runFsck(conf, -1, true, dir));
+        return null;
+      }
+    });
 
-      util.cleanup(fs, dir);
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
+    // set permission and try DFSck again as the fake user, should succeed
+    fs.setPermission(dirpath, new FsPermission((short) 0777));
+    fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        final String outStr = runFsck(conf, 0, true, dir);
+        System.out.println(outStr);
+        assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+        return null;
+      }
+    });
+
+    util.cleanup(fs, dir);
   }
 
   @Test
   public void testFsckMove() throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    final int DFS_BLOCK_SIZE = 1024;
-    final int NUM_DATANODES = 4;
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE);
+    final int dfsBlockSize = 1024;
+    final int numDatanodes = 4;
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, dfsBlockSize);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
-        (5 * DFS_BLOCK_SIZE) + (DFS_BLOCK_SIZE - 1), 5 * DFS_BLOCK_SIZE);
-    MiniDFSCluster cluster = null;
+        (5 * dfsBlockSize) + (dfsBlockSize - 1), 5 * dfsBlockSize);
     FileSystem fs = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).
-          numDataNodes(NUM_DATANODES).build();
-      String topDir = "/srcdat";
-      fs = cluster.getFileSystem();
-      cluster.waitActive();
-      util.createFiles(fs, topDir);
-      util.waitReplication(fs, topDir, (short)3);
-      String outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
-                                          cluster.getNameNodePort()), conf);
-      String fileNames[] = util.getFileNames(topDir);
-      CorruptedTestFile ctFiles[] = new CorruptedTestFile[] {
+    cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(numDatanodes).build();
+    String topDir = "/srcdat";
+    fs = cluster.getFileSystem();
+    cluster.waitActive();
+    util.createFiles(fs, topDir);
+    util.waitReplication(fs, topDir, (short)3);
+    String outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                        cluster.getNameNodePort()), conf);
+    String[] fileNames = util.getFileNames(topDir);
+    CorruptedTestFile[] ctFiles = new CorruptedTestFile[]{
         new CorruptedTestFile(fileNames[0], Sets.newHashSet(0),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
+            dfsClient, numDatanodes, dfsBlockSize),
         new CorruptedTestFile(fileNames[1], Sets.newHashSet(2, 3),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
+            dfsClient, numDatanodes, dfsBlockSize),
         new CorruptedTestFile(fileNames[2], Sets.newHashSet(4),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
+            dfsClient, numDatanodes, dfsBlockSize),
         new CorruptedTestFile(fileNames[3], Sets.newHashSet(0, 1, 2, 3),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
+            dfsClient, numDatanodes, dfsBlockSize),
         new CorruptedTestFile(fileNames[4], Sets.newHashSet(1, 2, 3, 4),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE)
-      };
-      int totalMissingBlocks = 0;
-      for (CorruptedTestFile ctFile : ctFiles) {
-        totalMissingBlocks += ctFile.getTotalMissingBlocks();
-      }
-      for (CorruptedTestFile ctFile : ctFiles) {
-        ctFile.removeBlocks(cluster);
-      }
-      // Wait for fsck to discover all the missing blocks
-      while (true) {
-        outStr = runFsck(conf, 1, false, "/");
-        String numMissing = null;
-        String numCorrupt = null;
-        for (String line : outStr.split(LINE_SEPARATOR)) {
-          Matcher m = numMissingBlocksPattern.matcher(line);
-          if (m.matches()) {
-            numMissing = m.group(1);
-          }
-          m = numCorruptBlocksPattern.matcher(line);
-          if (m.matches()) {
-            numCorrupt = m.group(1);
-          }
-          if (numMissing != null && numCorrupt != null) {
-            break;
-          }
+            dfsClient, numDatanodes, dfsBlockSize)
+    };
+    int totalMissingBlocks = 0;
+    for (CorruptedTestFile ctFile : ctFiles) {
+      totalMissingBlocks += ctFile.getTotalMissingBlocks();
+    }
+    for (CorruptedTestFile ctFile : ctFiles) {
+      ctFile.removeBlocks(cluster);
+    }
+    // Wait for fsck to discover all the missing blocks
+    while (true) {
+      outStr = runFsck(conf, 1, false, "/");
+      String numMissing = null;
+      String numCorrupt = null;
+      for (String line : outStr.split(LINE_SEPARATOR)) {
+        Matcher m = NUM_MISSING_BLOCKS_PATTERN.matcher(line);
+        if (m.matches()) {
+          numMissing = m.group(1);
         }
-        if (numMissing == null || numCorrupt == null) {
-          throw new IOException("failed to find number of missing or corrupt" +
-              " blocks in fsck output.");
+        m = NUM_CORRUPT_BLOCKS_PATTERN.matcher(line);
+        if (m.matches()) {
+          numCorrupt = m.group(1);
         }
-        if (numMissing.equals(Integer.toString(totalMissingBlocks))) {
-          assertTrue(numCorrupt.equals(Integer.toString(0)));
-          assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+        if (numMissing != null && numCorrupt != null) {
           break;
         }
-        try {
-          Thread.sleep(100);
-        } catch (InterruptedException ignore) {
-        }
       }
-
-      // Copy the non-corrupt blocks of corruptFileName to lost+found.
-      outStr = runFsck(conf, 1, false, "/", "-move");
-      FSImage.LOG.info("WATERMELON: outStr = " + outStr);
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-
-      // Make sure that we properly copied the block files from the DataNodes
-      // to lost+found
-      for (CorruptedTestFile ctFile : ctFiles) {
-        ctFile.checkSalvagedRemains();
+      if (numMissing == null || numCorrupt == null) {
+        throw new IOException("failed to find number of missing or corrupt" +
+            " blocks in fsck output.");
+      }
+      if (numMissing.equals(Integer.toString(totalMissingBlocks))) {
+        assertTrue(numCorrupt.equals(Integer.toString(0)));
+        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+        break;
       }
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException ignore) {
+      }
+    }
 
-      // Fix the filesystem by removing corruptFileName
-      outStr = runFsck(conf, 1, true, "/", "-delete");
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-      
-      // Check to make sure we have a healthy filesystem
-      outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); 
-      util.cleanup(fs, topDir);
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
+    // Copy the non-corrupt blocks of corruptFileName to lost+found.
+    outStr = runFsck(conf, 1, false, "/", "-move");
+    LOG.info("WATERMELON: outStr = " + outStr);
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+
+    // Make sure that we properly copied the block files from the DataNodes
+    // to lost+found
+    for (CorruptedTestFile ctFile : ctFiles) {
+      ctFile.checkSalvagedRemains();
     }
+
+    // Fix the filesystem by removing corruptFileName
+    outStr = runFsck(conf, 1, true, "/", "-delete");
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+
+    // Check to make sure we have a healthy filesystem
+    outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    util.cleanup(fs, topDir);
   }
 
   static private class CorruptedTestFile {
@@ -449,7 +448,7 @@ public class TestFsck {
     final private int blockSize;
     final private byte[] initialContents;
     
-    public CorruptedTestFile(String name, Set<Integer> blocksToCorrupt,
+    CorruptedTestFile(String name, Set<Integer> blocksToCorrupt,
         DFSClient dfsClient, int numDataNodes, int blockSize)
             throws IOException {
       this.name = name;
@@ -505,7 +504,7 @@ public class TestFsck {
                 new FileOutputStream(blockFile, false);
             blockFileStream.write("corrupt".getBytes());
             blockFileStream.close();
-            FSImage.LOG.info("Corrupted block file " + blockFile);
+            LOG.info("Corrupted block file " + blockFile);
           }
         }
       }
@@ -536,7 +535,9 @@ public class TestFsck {
           if (blockIdx == (numBlocks - 1)) {
             // The last block might not be full-length
             len = (int)(in.getFileLength() % blockSize);
-            if (len == 0) len = blockBuffer.length;
+            if (len == 0) {
+              len = blockBuffer.length;
+            }
           }
           IOUtils.readFully(in, blockBuffer, 0, len);
           int startIdx = blockIdx * blockSize;
@@ -555,218 +556,186 @@ public class TestFsck {
   
   @Test
   public void testFsckMoveAndDelete() throws Exception {
-    final int MAX_MOVE_TRIES = 5;
+    final int maxMoveTries = 5;
     DFSTestUtil util = new DFSTestUtil.Builder().
         setName("TestFsckMoveAndDelete").setNumFiles(5).build();
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      String topDir = "/srcdat";
-      fs = cluster.getFileSystem();
-      cluster.waitActive();
-      util.createFiles(fs, topDir);
-      util.waitReplication(fs, topDir, (short)3);
-      String outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      
-      // Corrupt a block by deleting it
-      String[] fileNames = util.getFileNames(topDir);
-      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
-                                          cluster.getNameNodePort()), conf);
-      String corruptFileName = fileNames[0];
-      ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
-          corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock();
-      for (int i=0; i<4; i++) {
-        File blockFile = cluster.getBlockFile(i, block);
-        if(blockFile != null && blockFile.exists()) {
-          assertTrue(blockFile.delete());
-        }
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    String topDir = "/srcdat";
+    fs = cluster.getFileSystem();
+    cluster.waitActive();
+    util.createFiles(fs, topDir);
+    util.waitReplication(fs, topDir, (short)3);
+    String outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+    // Corrupt a block by deleting it
+    String[] fileNames = util.getFileNames(topDir);
+    DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                        cluster.getNameNodePort()), conf);
+    String corruptFileName = fileNames[0];
+    ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
+        corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock();
+    for (int i=0; i<4; i++) {
+      File blockFile = cluster.getBlockFile(i, block);
+      if(blockFile != null && blockFile.exists()) {
+        assertTrue(blockFile.delete());
       }
+    }
 
-      // We excpect the filesystem to be corrupted
-      outStr = runFsck(conf, 1, false, "/");
-      while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
-        try {
-          Thread.sleep(100);
-        } catch (InterruptedException ignore) {
-        }
-        outStr = runFsck(conf, 1, false, "/");
-      } 
-      
-      // After a fsck -move, the corrupted file should still exist.
-      for (int i = 0; i < MAX_MOVE_TRIES; i++) {
-        outStr = runFsck(conf, 1, true, "/", "-move" );
-        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-        String[] newFileNames = util.getFileNames(topDir);
-        boolean found = false;
-        for (String f : newFileNames) {
-          if (f.equals(corruptFileName)) {
-            found = true;
-            break;
-          }
-        }
-        assertTrue(found);
+    // We excpect the filesystem to be corrupted
+    outStr = runFsck(conf, 1, false, "/");
+    while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException ignore) {
       }
+      outStr = runFsck(conf, 1, false, "/");
+    }
 
-      // Fix the filesystem by moving corrupted files to lost+found
-      outStr = runFsck(conf, 1, true, "/", "-move", "-delete");
+    // After a fsck -move, the corrupted file should still exist.
+    for (int i = 0; i < maxMoveTries; i++) {
+      outStr = runFsck(conf, 1, true, "/", "-move");
       assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-      
-      // Check to make sure we have healthy filesystem
-      outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); 
-      util.cleanup(fs, topDir);
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      cluster.shutdown();
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
+      String[] newFileNames = util.getFileNames(topDir);
+      boolean found = false;
+      for (String f : newFileNames) {
+        if (f.equals(corruptFileName)) {
+          found = true;
+          break;
+        }
+      }
+      assertTrue(found);
     }
+
+    // Fix the filesystem by moving corrupted files to lost+found
+    outStr = runFsck(conf, 1, true, "/", "-move", "-delete");
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+
+    // Check to make sure we have healthy filesystem
+    outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    util.cleanup(fs, topDir);
   }
   
   @Test
   public void testFsckOpenFiles() throws Exception {
     DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
         setNumFiles(4).build();
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      String topDir = "/srcdat";
-      String randomString = "HADOOP  ";
-      fs = cluster.getFileSystem();
-      cluster.waitActive();
-      util.createFiles(fs, topDir);
-      util.waitReplication(fs, topDir, (short)3);
-      String outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      // Open a file for writing and do not close for now
-      Path openFile = new Path(topDir + "/openFile");
-      FSDataOutputStream out = fs.create(openFile);
-      int writeCount = 0;
-      while (writeCount != 100) {
-        out.write(randomString.getBytes());
-        writeCount++;                  
-      }
-      ((DFSOutputStream) out.getWrappedStream()).hflush();
-      // We expect the filesystem to be HEALTHY and show one open file
-      outStr = runFsck(conf, 0, true, topDir);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertFalse(outStr.contains("OPENFORWRITE")); 
-      // Use -openforwrite option to list open files
-      outStr = runFsck(conf, 0, true, topDir, "-files", "-blocks",
-          "-locations", "-openforwrite");
-      System.out.println(outStr);
-      assertTrue(outStr.contains("OPENFORWRITE"));
-      assertTrue(outStr.contains("Under Construction Block:"));
-      assertTrue(outStr.contains("openFile"));
-      // Close the file
-      out.close(); 
-      // Now, fsck should show HEALTHY fs and should not show any open files
-      outStr = runFsck(conf, 0, true, topDir);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertFalse(outStr.contains("OPENFORWRITE"));
-      assertFalse(outStr.contains("Under Construction Block:"));
-      util.cleanup(fs, topDir);
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      cluster.shutdown();
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    String topDir = "/srcdat";
+    String randomString = "HADOOP  ";
+    fs = cluster.getFileSystem();
+    cluster.waitActive();
+    util.createFiles(fs, topDir);
+    util.waitReplication(fs, topDir, (short)3);
+    String outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    // Open a file for writing and do not close for now
+    Path openFile = new Path(topDir + "/openFile");
+    FSDataOutputStream out = fs.create(openFile);
+    int writeCount = 0;
+    while (writeCount != 100) {
+      out.write(randomString.getBytes());
+      writeCount++;
     }
+    ((DFSOutputStream) out.getWrappedStream()).hflush();
+    // We expect the filesystem to be HEALTHY and show one open file
+    outStr = runFsck(conf, 0, true, topDir);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertFalse(outStr.contains("OPENFORWRITE"));
+    // Use -openforwrite option to list open files
+    outStr = runFsck(conf, 0, true, topDir, "-files", "-blocks",
+        "-locations", "-openforwrite");
+    System.out.println(outStr);
+    assertTrue(outStr.contains("OPENFORWRITE"));
+    assertTrue(outStr.contains("Under Construction Block:"));
+    assertTrue(outStr.contains("openFile"));
+    // Close the file
+    out.close();
+    // Now, fsck should show HEALTHY fs and should not show any open files
+    outStr = runFsck(conf, 0, true, topDir);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertFalse(outStr.contains("OPENFORWRITE"));
+    assertFalse(outStr.contains("Under Construction Block:"));
+    util.cleanup(fs, topDir);
   }
 
   @Test
   public void testFsckOpenECFiles() throws Exception {
     DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsckECFile").
         setNumFiles(4).build();
-    Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
     ErasureCodingPolicy ecPolicy =
         ErasureCodingPolicyManager.getSystemDefaultPolicy();
     int numAllUnits = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
         numAllUnits + 1).build();
     FileSystem fs = null;
-    try {
-      String topDir = "/myDir";
-      byte[] randomBytes = new byte[3000000];
-      int seed = 42;
-      new Random(seed).nextBytes(randomBytes);
-      cluster.waitActive();
-      fs = cluster.getFileSystem();
-      util.createFiles(fs, topDir);
-      // set topDir to EC when it has replicated files
-      cluster.getFileSystem().getClient().setErasureCodingPolicy(
-          topDir, ecPolicy);
-
-      // create a new file under topDir
-      DFSTestUtil.createFile(fs, new Path(topDir, "ecFile"), 1024, (short) 1, 0L);
-      // Open a EC file for writing and do not close for now
-      Path openFile = new Path(topDir + "/openECFile");
-      FSDataOutputStream out = fs.create(openFile);
-      int writeCount = 0;
-      while (writeCount != 300) {
-        out.write(randomBytes);
-        writeCount++;
-      }
-
-      // make sure the fsck can correctly handle mixed ec/replicated files
-      runFsck(conf, 0, true, topDir, "-files", "-blocks", "-openforwrite");
-
-      // We expect the filesystem to be HEALTHY and show one open file
-      String outStr = runFsck(conf, 0, true, openFile.toString(), "-files",
-          "-blocks", "-openforwrite");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(outStr.contains("OPENFORWRITE"));
-      assertTrue(outStr.contains("Live_repl=" + numAllUnits));
-      assertTrue(outStr.contains("Expected_repl=" + numAllUnits));
-
-      // Use -openforwrite option to list open files
-      outStr = runFsck(conf, 0, true, openFile.toString(), "-files", "-blocks",
-          "-locations", "-openforwrite", "-replicaDetails");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(outStr.contains("OPENFORWRITE"));
-      assertTrue(outStr.contains("Live_repl=" + numAllUnits));
-      assertTrue(outStr.contains("Expected_repl=" + numAllUnits));
-      assertTrue(outStr.contains("Under Construction Block:"));
-
-      // Close the file
-      out.close();
-
-      // Now, fsck should show HEALTHY fs and should not show any open files
-      outStr = runFsck(conf, 0, true, openFile.toString(), "-files", "-blocks",
-          "-locations", "-racks", "-replicaDetails");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertFalse(outStr.contains("OPENFORWRITE"));
-      assertFalse(outStr.contains("Under Construction Block:"));
-      assertFalse(outStr.contains("Expected_repl=" + numAllUnits));
-      assertTrue(outStr.contains("Live_repl=" + numAllUnits));
-      util.cleanup(fs, topDir);
-    } finally {
-      if (fs != null) {
-        try {
-          fs.close();
-        } catch (Exception e) {
-        }
-      }
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+    String topDir = "/myDir";
+    byte[] randomBytes = new byte[3000000];
+    int seed = 42;
+    new Random(seed).nextBytes(randomBytes);
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    util.createFiles(fs, topDir);
+    // set topDir to EC when it has replicated files
+    cluster.getFileSystem().getClient().setErasureCodingPolicy(
+        topDir, ecPolicy);
+
+    // create a new file under topDir
+    DFSTestUtil.createFile(fs, new Path(topDir, "ecFile"), 1024, (short) 1, 0L);
+    // Open a EC file for writing and do not close for now
+    Path openFile = new Path(topDir + "/openECFile");
+    FSDataOutputStream out = fs.create(openFile);
+    int writeCount = 0;
+    while (writeCount != 300) {
+      out.write(randomBytes);
+      writeCount++;
     }
+
+    // make sure the fsck can correctly handle mixed ec/replicated files
+    runFsck(conf, 0, true, topDir, "-files", "-blocks", "-openforwrite");
+
+    // We expect the filesystem to be HEALTHY and show one open file
+    String outStr = runFsck(conf, 0, true, openFile.toString(), "-files",
+        "-blocks", "-openforwrite");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(outStr.contains("OPENFORWRITE"));
+    assertTrue(outStr.contains("Live_repl=" + numAllUnits));
+    assertTrue(outStr.contains("Expected_repl=" + numAllUnits));
+
+    // Use -openforwrite option to list open files
+    outStr = runFsck(conf, 0, true, openFile.toString(), "-files", "-blocks",
+        "-locations", "-openforwrite", "-replicaDetails");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(outStr.contains("OPENFORWRITE"));
+    assertTrue(outStr.contains("Live_repl=" + numAllUnits));
+    assertTrue(outStr.contains("Expected_repl=" + numAllUnits));
+    assertTrue(outStr.contains("Under Construction Block:"));
+
+    // Close the file
+    out.close();
+
+    // Now, fsck should show HEALTHY fs and should not show any open files
+    outStr = runFsck(conf, 0, true, openFile.toString(), "-files", "-blocks",
+        "-locations", "-racks", "-replicaDetails");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertFalse(outStr.contains("OPENFORWRITE"));
+    assertFalse(outStr.contains("Under Construction Block:"));
+    assertFalse(outStr.contains("Expected_repl=" + numAllUnits));
+    assertTrue(outStr.contains("Live_repl=" + numAllUnits));
+    util.cleanup(fs, topDir);
   }
 
   @Test
   public void testCorruptBlock() throws Exception {
-    Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
@@ -778,8 +747,6 @@ public class TestFsck {
     String outStr = null;
     short factor = 1;
 
-    MiniDFSCluster cluster = null;
-    try {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
@@ -810,7 +777,7 @@ public class TestFsck {
       IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
                         true);
     } catch (IOException ie) {
-      // Ignore exception
+      assertTrue(ie instanceof ChecksumException);
     }
 
     dfsClient = new DFSClient(new InetSocketAddress("localhost",
@@ -827,27 +794,23 @@ public class TestFsck {
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }
-    assertTrue (blocks.get(0).isCorrupt());
+    assertTrue(blocks.get(0).isCorrupt());
 
     // Check if fsck reports the same
     outStr = runFsck(conf, 1, true, "/");
     System.out.println(outStr);
     assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
     assertTrue(outStr.contains("testCorruptBlock"));
-    } finally {
-      if (cluster != null) {cluster.shutdown();}
-    }
   }
 
   @Test
   public void testUnderMinReplicatedBlock() throws Exception {
-    Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     // Set minReplication to 2
     short minReplication=2;
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,minReplication);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, minReplication);
     FileSystem fs = null;
     DFSClient dfsClient = null;
     LocatedBlocks blocks = null;
@@ -855,252 +818,234 @@ public class TestFsck {
     Random random = new Random();
     String outStr = null;
     short factor = 1;
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      cluster.waitActive();
-      fs = cluster.getFileSystem();
-      Path file1 = new Path("/testUnderMinReplicatedBlock");
-      DFSTestUtil.createFile(fs, file1, 1024, minReplication, 0);
-      // Wait until file replication has completed
-      DFSTestUtil.waitReplication(fs, file1, minReplication);
-      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
-
-      // Make sure filesystem is in healthy state
-      outStr = runFsck(conf, 0, true, "/");
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-
-      // corrupt the first replica
-      File blockFile = cluster.getBlockFile(0, block);
-      if (blockFile != null && blockFile.exists()) {
-        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
-        FileChannel channel = raFile.getChannel();
-        String badString = "BADBAD";
-        int rand = random.nextInt((int) channel.size()/2);
-        raFile.seek(rand);
-        raFile.write(badString.getBytes());
-        raFile.close();
-      }
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    Path file1 = new Path("/testUnderMinReplicatedBlock");
+    DFSTestUtil.createFile(fs, file1, 1024, minReplication, 0);
+    // Wait until file replication has completed
+    DFSTestUtil.waitReplication(fs, file1, minReplication);
+    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
 
-      dfsClient = new DFSClient(new InetSocketAddress("localhost",
-          cluster.getNameNodePort()), conf);
-      blocks = dfsClient.getNamenode().
-          getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
-      replicaCount = blocks.get(0).getLocations().length;
-      while (replicaCount != factor) {
+    // Make sure filesystem is in healthy state
+    outStr = runFsck(conf, 0, true, "/");
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+    // corrupt the first replica
+    File blockFile = cluster.getBlockFile(0, block);
+    if (blockFile != null && blockFile.exists()) {
+      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+      FileChannel channel = raFile.getChannel();
+      String badString = "BADBAD";
+      int rand = random.nextInt((int) channel.size()/2);
+      raFile.seek(rand);
+      raFile.write(badString.getBytes());
+      raFile.close();
+    }
+
+    dfsClient = new DFSClient(new InetSocketAddress("localhost",
+        cluster.getNameNodePort()), conf);
+    blocks = dfsClient.getNamenode().
+        getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+    replicaCount = blocks.get(0).getLocations().length;
+    while (replicaCount != factor) {
+      try {
+        Thread.sleep(100);
+        // Read the file to trigger reportBadBlocks
         try {
-          Thread.sleep(100);
-          // Read the file to trigger reportBadBlocks
-          try {
-            IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
-                true);
-          } catch (IOException ie) {
-            // Ignore exception
-          }
-          System.out.println("sleep in try: replicaCount="+replicaCount+"  factor="+factor);
-        } catch (InterruptedException ignore) {
+          IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(),
+              conf, true);
+        } catch (IOException ie) {
+          assertTrue(ie instanceof ChecksumException);
         }
-        blocks = dfsClient.getNamenode().
-            getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
-        replicaCount = blocks.get(0).getLocations().length;
+        System.out.println("sleep in try: replicaCount=" + replicaCount
+            + "  factor=" + factor);
+      } catch (InterruptedException ignore) {
       }
-
-      // Check if fsck reports the same
-      outStr = runFsck(conf, 0, true, "/");
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
-      assertTrue(outStr.contains("dfs.namenode.replication.min:\t2"));
-    } finally {
-      if (cluster != null) {cluster.shutdown();}
+      blocks = dfsClient.getNamenode().
+          getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+      replicaCount = blocks.get(0).getLocations().length;
     }
+
+    // Check if fsck reports the same
+    outStr = runFsck(conf, 0, true, "/");
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
+    assertTrue(outStr.contains("dfs.namenode.replication.min:\t2"));
   }
 
   @Test(timeout = 60000)
   public void testFsckReplicaDetails() throws Exception {
 
-    final short REPL_FACTOR = 1;
-    short NUM_DN = 1;
+    final short replFactor = 1;
+    short numDn = 1;
     final long blockSize = 512;
     final long fileSize = 1024;
     boolean checkDecommissionInProgress = false;
-    String[] racks = { "/rack1" };
-    String[] hosts = { "host1" };
+    String[] racks = {"/rack1"};
+    String[] hosts = {"host1"};
 
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
-    MiniDFSCluster cluster;
     DistributedFileSystem dfs;
     cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts).racks(racks).build();
+        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
+            .racks(racks).build();
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
 
     // create files
     final String testFile = new String("/testfile");
     final Path path = new Path(testFile);
-    DFSTestUtil.createFile(dfs, path, fileSize, REPL_FACTOR, 1000L);
-    DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
-    try {
-      // make sure datanode that has replica is fine before decommission
-      String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", "-replicaDetails");
-      assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(fsckOut.contains("(LIVE)"));
-
-      // decommission datanode
-      ExtendedBlock eb = DFSTestUtil.getFirstBlock(dfs, path);
-      FSNamesystem fsn = cluster.getNameNode().getNamesystem();
-      BlockManager bm = fsn.getBlockManager();
-      BlockCollection bc = null;
-      try {
-        fsn.writeLock();
-        BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
-        bc = fsn.getBlockCollection(bi);
-      } finally {
-        fsn.writeUnlock();
-      }
-      DatanodeDescriptor dn = bc.getBlocks()[0]
-          .getDatanode(0);
-      bm.getDatanodeManager().getDecomManager().startDecommission(dn);
-      String dnName = dn.getXferAddr();
-
-      // check the replica status while decommissioning
-      fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", "-replicaDetails");
-      assertTrue(fsckOut.contains("(DECOMMISSIONING)"));
-
-      // Start 2nd Datanode and wait for decommission to start
-      cluster.startDataNodes(conf, 1, true, null, null, null);
-      DatanodeInfo datanodeInfo = null;
-      do {
-        Thread.sleep(2000);
-        for (DatanodeInfo info : dfs.getDataNodeStats()) {
-          if (dnName.equals(info.getXferAddr())) {
-            datanodeInfo = info;
-          }
-        }
-        if (!checkDecommissionInProgress && datanodeInfo != null
-            && datanodeInfo.isDecommissionInProgress()) {
-          checkDecommissionInProgress = true;
-        }
-      } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
+    DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L);
+    DFSTestUtil.waitReplication(dfs, path, replFactor);
 
-      // check the replica status after decommission is done
-      fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", "-replicaDetails");
-      assertTrue(fsckOut.contains("(DECOMMISSIONED)"));
+    // make sure datanode that has replica is fine before decommission
+    String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks",
+        "-replicaDetails");
+    assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(fsckOut.contains("(LIVE)"));
+
+    // decommission datanode
+    ExtendedBlock eb = DFSTestUtil.getFirstBlock(dfs, path);
+    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
+    BlockManager bm = fsn.getBlockManager();
+    BlockCollection bc = null;
+    try {
+      fsn.writeLock();
+      BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
+      bc = fsn.getBlockCollection(bi);
     } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+      fsn.writeUnlock();
     }
+    DatanodeDescriptor dn = bc.getBlocks()[0]
+        .getDatanode(0);
+    bm.getDatanodeManager().getDecomManager().startDecommission(dn);
+    String dnName = dn.getXferAddr();
+
+    // check the replica status while decommissioning
+    fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks",
+        "-replicaDetails");
+    assertTrue(fsckOut.contains("(DECOMMISSIONING)"));
+
+    // Start 2nd Datanode and wait for decommission to start
+    cluster.startDataNodes(conf, 1, true, null, null, null);
+    DatanodeInfo datanodeInfo = null;
+    do {
+      Thread.sleep(2000);
+      for (DatanodeInfo info : dfs.getDataNodeStats()) {
+        if (dnName.equals(info.getXferAddr())) {
+          datanodeInfo = info;
+        }
+      }
+      if (!checkDecommissionInProgress && datanodeInfo != null
+          && datanodeInfo.isDecommissionInProgress()) {
+        checkDecommissionInProgress = true;
+      }
+    } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
+
+    // check the replica status after decommission is done
+    fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks",
+        "-replicaDetails");
+    assertTrue(fsckOut.contains("(DECOMMISSIONED)"));
   }
 
-  /** Test if fsck can return -1 in case of failure
+  /** Test if fsck can return -1 in case of failure.
    * 
    * @throws Exception
    */
   @Test
   public void testFsckError() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      // bring up a one-node cluster
-      Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      String fileName = "/test.txt";
-      Path filePath = new Path(fileName);
-      FileSystem fs = cluster.getFileSystem();
-      
-      // create a one-block file
-      DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
-      DFSTestUtil.waitReplication(fs, filePath, (short)1);
-      
-      // intentionally corrupt NN data structure
-      INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
-          (fileName, true);
-      final BlockInfo[] blocks = node.getBlocks();
-      assertEquals(blocks.length, 1);
-      blocks[0].setNumBytes(-1L);  // set the block length to be negative
-      
-      // run fsck and expect a failure with -1 as the error code
-      String outStr = runFsck(conf, -1, true, fileName);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
-      
-      // clean up file system
-      fs.delete(filePath, true);
-    } finally {
-      if (cluster != null) {cluster.shutdown();}
-    }
+    // bring up a one-node cluster
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    String fileName = "/test.txt";
+    Path filePath = new Path(fileName);
+    FileSystem fs = cluster.getFileSystem();
+
+    // create a one-block file
+    DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
+    DFSTestUtil.waitReplication(fs, filePath, (short)1);
+
+    // intentionally corrupt NN data structure
+    INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode(
+        fileName, true);
+    final BlockInfo[] blocks = node.getBlocks();
+    assertEquals(blocks.length, 1);
+    blocks[0].setNumBytes(-1L);  // set the block length to be negative
+
+    // run fsck and expect a failure with -1 as the error code
+    String outStr = runFsck(conf, -1, true, fileName);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
+
+    // clean up file system
+    fs.delete(filePath, true);
   }
   
-  /** check if option -list-corruptfiles of fsck command works properly */
+  /** check if option -list-corruptfiles of fsck command works properly. */
   @Test
   public void testFsckListCorruptFilesBlocks() throws Exception {
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     FileSystem fs = null;
 
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      cluster.waitActive();
-      fs = cluster.getFileSystem();
-      DFSTestUtil util = new DFSTestUtil.Builder().
-          setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
-          setMaxSize(1024).build();
-      util.createFiles(fs, "/corruptData", (short) 1);
-      util.waitReplication(fs, "/corruptData", (short) 1);
-
-      // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
-      String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
-      System.out.println("1. good fsck out: " + outStr);
-      assertTrue(outStr.contains("has 0 CORRUPT files"));
-      // delete the blocks
-      final String bpid = cluster.getNamesystem().getBlockPoolId();
-      for (int i=0; i<4; i++) {
-        for (int j=0; j<=1; j++) {
-          File storageDir = cluster.getInstanceStorageDir(i, j);
-          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-          List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
-              data_dir);
-          if (metadataFiles == null)
-            continue;
-          for (File metadataFile : metadataFiles) {
-            File blockFile = Block.metaToBlockFile(metadataFile);
-            assertTrue("Cannot remove file.", blockFile.delete());
-            assertTrue("Cannot remove file.", metadataFile.delete());
-          }
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    DFSTestUtil util = new DFSTestUtil.Builder().
+        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
+        setMaxSize(1024).build();
+    util.createFiles(fs, "/corruptData", (short) 1);
+    util.waitReplication(fs, "/corruptData", (short) 1);
+
+    String outStr = runFsck(conf, 0, false, "/corruptData",
+        "-list-corruptfileblocks");
+    System.out.println("1. good fsck out: " + outStr);
+    assertTrue(outStr.contains("has 0 CORRUPT files"));
+    // delete the blocks
+    final String bpid = cluster.getNamesystem().getBlockPoolId();
+    for (int i=0; i<4; i++) {
+      for (int j=0; j<=1; j++) {
+        File storageDir = cluster.getInstanceStorageDir(i, j);
+        File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+            dataDir);
+        if (metadataFiles == null) {
+          continue;
+        }
+        for (File metadataFile : metadataFiles) {
+          File blockFile = Block.metaToBlockFile(metadataFile);
+          assertTrue("Cannot remove file.", blockFile.delete());
+          assertTrue("Cannot remove file.", metadataFile.delete());
         }
       }
+    }
 
-      // wait for the namenode to see the corruption
-      final NamenodeProtocols namenode = cluster.getNameNodeRpc();
-      CorruptFileBlocks corruptFileBlocks = namenode
+    // wait for the namenode to see the corruption
+    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
+    CorruptFileBlocks corruptFileBlocks = namenode
+        .listCorruptFileBlocks("/corruptData", null);
+    int numCorrupt = corruptFileBlocks.getFiles().length;
+    while (numCorrupt == 0) {
+      Thread.sleep(1000);
+      corruptFileBlocks = namenode
           .listCorruptFileBlocks("/corruptData", null);
-      int numCorrupt = corruptFileBlocks.getFiles().length;
-      while (numCorrupt == 0) {
-        Thread.sleep(1000);
-        corruptFileBlocks = namenode
-            .listCorruptFileBlocks("/corruptData", null);
-        numCorrupt = corruptFileBlocks.getFiles().length;
-      }
-      outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
-      System.out.println("2. bad fsck out: " + outStr);
-      assertTrue(outStr.contains("has 3 CORRUPT files"));
-
-      // Do a listing on a dir which doesn't have any corrupt blocks and validate
-      util.createFiles(fs, "/goodData");
-      outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
-      System.out.println("3. good fsck out: " + outStr);
-      assertTrue(outStr.contains("has 0 CORRUPT files"));
-      util.cleanup(fs,"/corruptData");
-      util.cleanup(fs, "/goodData");
-    } finally {
-      if (cluster != null) {cluster.shutdown();}
+      numCorrupt = corruptFileBlocks.getFiles().length;
     }
+    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
+    System.out.println("2. bad fsck out: " + outStr);
+    assertTrue(outStr.contains("has 3 CORRUPT files"));
+
+    // Do a listing on a dir which doesn't have any corrupt blocks and validate
+    util.createFiles(fs, "/goodData");
+    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
+    System.out.println("3. good fsck out: " + outStr);
+    assertTrue(outStr.contains("has 0 CORRUPT files"));
+    util.cleanup(fs, "/corruptData");
+    util.cleanup(fs, "/goodData");
   }
   
   /**
@@ -1109,193 +1054,163 @@ public class TestFsck {
    */
   @Test
   public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      // bring up a one-node cluster
-      Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      String fileName = "/test.txt";
-      Path filePath = new Path(fileName);
-      FileSystem fs = cluster.getFileSystem();
-
-      // create a one-block file
-      DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
-      DFSTestUtil.waitReplication(fs, filePath, (short) 1);
-
-      // passing illegal option
-      String outStr = runFsck(conf, -1, true, fileName, "-thisIsNotAValidFlag");
-      System.out.println(outStr);
-      assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-
-      // passing multiple paths are arguments
-      outStr = runFsck(conf, -1, true, "/", fileName);
-      System.out.println(outStr);
-      assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      // clean up file system
-      fs.delete(filePath, true);
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
+    // bring up a one-node cluster
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    String fileName = "/test.txt";
+    Path filePath = new Path(fileName);
+    FileSystem fs = cluster.getFileSystem();
+
+    // create a one-block file
+    DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
+    DFSTestUtil.waitReplication(fs, filePath, (short) 1);
+
+    // passing illegal option
+    String outStr = runFsck(conf, -1, true, fileName, "-thisIsNotAValidFlag");
+    System.out.println(outStr);
+    assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+    // passing multiple paths are arguments
+    outStr = runFsck(conf, -1, true, "/", fileName);
+    System.out.println(outStr);
+    assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    // clean up file system
+    fs.delete(filePath, true);
   }
   
   /**
-   * Tests that the # of missing block replicas and expected replicas is correct
+   * Tests that the # of missing block replicas and expected replicas is
+   * correct.
    * @throws IOException
    */
   @Test
   public void testFsckMissingReplicas() throws IOException {
     // Desired replication factor
-    // Set this higher than NUM_REPLICAS so it's under-replicated
-    final short REPL_FACTOR = 2;
+    // Set this higher than numReplicas so it's under-replicated
+    final short replFactor = 2;
     // Number of replicas to actually start
-    final short NUM_REPLICAS = 1;
+    final short numReplicas = 1;
     // Number of blocks to write
-    final short NUM_BLOCKS = 3;
+    final short numBlocks = 3;
     // Set a small-ish blocksize
     final long blockSize = 512;
     
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     
-    MiniDFSCluster cluster = null;
     DistributedFileSystem dfs = null;
     
-    try {
-      // Startup a minicluster
-      cluster = 
-          new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
-      assertNotNull("Failed Cluster Creation", cluster);
-      cluster.waitClusterUp();
-      dfs = cluster.getFileSystem();
-      assertNotNull("Failed to get FileSystem", dfs);
-      
-      // Create a file that will be intentionally under-replicated
-      final String pathString = new String("/testfile");
-      final Path path = new Path(pathString);
-      long fileLen = blockSize * NUM_BLOCKS;
-      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
-      
-      // Create an under-replicated file
-      NameNode namenode = cluster.getNameNode();
-      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
-          .getDatanodeManager().getNetworkTopology();
-      Map<String,String[]> pmap = new HashMap<String, String[]>();
-      Writer result = new StringWriter();
-      PrintWriter out = new PrintWriter(result, true);
-      InetAddress remoteAddress = InetAddress.getLocalHost();
-      NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
-          NUM_REPLICAS, remoteAddress);
-      
-      // Run the fsck and check the Result
-      final HdfsFileStatus file = 
-          namenode.getRpcServer().getFileInfo(pathString);
-      assertNotNull(file);
-      Result replRes = new ReplicationResult(conf);
-      Result ecRes = new ErasureCodingResult(conf);
-      fsck.check(pathString, file, replRes, ecRes);
-      // Also print the output from the fsck, for ex post facto sanity checks
-      System.out.println(result.toString());
-      assertEquals(replRes.missingReplicas,
-          (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
-      assertEquals(replRes.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
-    } finally {
-      if(dfs != null) {
-        dfs.close();
-      }
-      if(cluster != null) {
-        cluster.shutdown();
-      }
-    }
+    // Startup a minicluster
+    cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
+    assertNotNull("Failed Cluster Creation", cluster);
+    cluster.waitClusterUp();
+    dfs = cluster.getFileSystem();
+    assertNotNull("Failed to get FileSystem", dfs);
+
+    // Create a file that will be intentionally under-replicated
+    final String pathString = new String("/testfile");
+    final Path path = new Path(pathString);
+    long fileLen = blockSize * numBlocks;
+    DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
+
+    // Create an under-replicated file
+    NameNode namenode = cluster.getNameNode();
+    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
+        .getDatanodeManager().getNetworkTopology();
+    Map<String, String[]> pmap = new HashMap<String, String[]>();
+    Writer result = new StringWriter();
+    PrintWriter out = new PrintWriter(result, true);
+    InetAddress remoteAddress = InetAddress.getLocalHost();
+    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
+        numReplicas, remoteAddress);
+
+    // Run the fsck and check the Result
+    final HdfsFileStatus file =
+        namenode.getRpcServer().getFileInfo(pathString);
+    assertNotNull(file);
+    Result replRes = new ReplicationResult(conf);
+    Result ecRes = new ErasureCodingResult(conf);
+    fsck.check(pathString, file, replRes, ecRes);
+    // Also print the output from the fsck, for ex post facto sanity checks
+    System.out.println(result.toString());
+    assertEquals(replRes.missingReplicas,
+        (numBlocks*replFactor) - (numBlocks*numReplicas));
+    assertEquals(replRes.numExpectedReplicas, numBlocks*replFactor);
   }
   
   /**
-   * Tests that the # of misreplaced replicas is correct
+   * Tests that the # of misreplaced replicas is correct.
    * @throws IOException
    */
   @Test
   public void testFsckMisPlacedReplicas() throws IOException {
     // Desired replication factor
-    final short REPL_FACTOR = 2;
+    final short replFactor = 2;
     // Number of replicas to actually start
-    short NUM_DN = 2;
+    short numDn = 2;
     // Number of blocks to write
-    final short NUM_BLOCKS = 3;
+    final short numBlocks = 3;
     // Set a small-ish blocksize
     final long blockSize = 512;
     
-    String [] racks = {"/rack1", "/rack1"};
-    String [] hosts = {"host1", "host2"};
+    String[] racks = {"/rack1", "/rack1"};
+    String[] hosts = {"host1", "host2"};
     
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     
-    MiniDFSCluster cluster = null;
     DistributedFileSystem dfs = null;
     
-    try {
-      // Startup a minicluster
-      cluster = 
-          new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
-          .racks(racks).build();
-      assertNotNull("Failed Cluster Creation", cluster);
-      cluster.waitClusterUp();
-      dfs = cluster.getFileSystem();
-      assertNotNull("Failed to get FileSystem", dfs);
-      
-      // Create a file that will be intentionally under-replicated
-      final String pathString = new String("/testfile");
-      final Path path = new Path(pathString);
-      long fileLen = blockSize * NUM_BLOCKS;
-      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
-      
-      // Create an under-replicated file
-      NameNode namenode = cluster.getNameNode();
-      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
-          .getDatanodeManager().getNetworkTopology();
-      // Add a new node on different rack, so previous blocks' replicas 
-      // are considered to be misplaced
-      nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
-      NUM_DN++;
-      
-      Map<String,String[]> pmap = new HashMap<String, String[]>();
-      Writer result = new StringWriter();
-      PrintWriter out = new PrintWriter(result, true);
-      InetAddress remoteAddress = InetAddress.getLocalHost();
-      NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
-          NUM_DN, remoteAddress);
-      
-      // Run the fsck and check the Result
-      final HdfsFileStatus file = 
-          namenode.getRpcServer().getFileInfo(pathString);
-      assertNotNull(file);
-      Result replRes = new ReplicationResult(conf);
-      Result ecRes = new ErasureCodingResult(conf);
-      fsck.check(pathString, file, replRes, ecRes);
-      // check misReplicatedBlock number.
-      assertEquals(replRes.numMisReplicatedBlocks, NUM_BLOCKS);
-    } finally {
-      if(dfs != null) {
-        dfs.close();
-      }
-      if(cluster != null) {
-        cluster.shutdown();
-      }
-    }
+    // Startup a minicluster
+    cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
+        .racks(racks).build();
+    assertNotNull("Failed Cluster Creation", cluster);
+    cluster.waitClusterUp();
+    dfs = cluster.getFileSystem();
+    assertNotNull("Failed to get FileSystem", dfs);
+
+    // Create a file that will be intentionally under-replicated
+    final String pathString = new String("/testfile");
+    final Path path = new Path(pathString);
+    long fileLen = blockSize * numBlocks;
+    DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
+
+    // Create an under-replicated file
+    NameNode namenode = cluster.getNameNode();
+    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
+        .getDatanodeManager().getNetworkTopology();
+    // Add a new node on different rack, so previous blocks' replicas
+    // are considered to be misplaced
+    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
+    numDn++;
+
+    Map<String, String[]> pmap = new HashMap<String, String[]>();
+    Writer result = new StringWriter();
+    PrintWriter out = new PrintWriter(result, true);
+    InetAddress remoteAddress = InetAddress.getLocalHost();
+    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
+        numDn, remoteAddress);
+
+    // Run the fsck and check the Result
+    final HdfsFileStatus file =
+        namenode.getRpcServer().getFileInfo(pathString);
+    assertNotNull(file);
+    Result replRes = new ReplicationResult(conf);
+    Result ecRes = new ErasureCodingResult(conf);
+    fsck.check(pathString, file, replRes, ecRes);
+    // check misReplicatedBlock number.
+    assertEquals(replRes.numMisReplicatedBlocks, numBlocks);
   }
 
-  /** Test fsck with FileNotFound */
+  /** Test fsck with FileNotFound. */
   @Test
   public void testFsckFileNotFound() throws Exception {
 
     // Number of replicas to actually start
-    final short NUM_REPLICAS = 1;
+    final short numReplicas = 1;
 
-    Configuration conf = new Configuration();
     NameNode namenode = mock(NameNode.class);
     NetworkTopology nettop = mock(NetworkTopology.class);
-    Map<String,String[]> pmap = new HashMap<>();
+    Map<String, String[]> pmap = new HashMap<>();
     Writer result = new StringWriter();
     PrintWriter out = new PrintWriter(result, true);
     InetAddress remoteAddress = InetAddress.getLocalHost();
@@ -1313,7 +1228,7 @@ public class TestFsck {
     when(blockManager.getDatanodeManager()).thenReturn(dnManager);
 
     NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
-        NUM_REPLICAS, remoteAddress);
+        numReplicas, remoteAddress);
 
     String pathString = "/tmp/testFile";
 
@@ -1326,8 +1241,8 @@ public class TestFsck {
     FsPermission perms = FsPermission.getDefault();
     String owner = "foo";
     String group = "bar";
-    byte [] symlink = null;
-    byte [] path = DFSUtil.string2Bytes(pathString);
+    byte[] symlink = null;
+    byte[] path = DFSUtil.string2Bytes(pathString);
     long fileId = 312321L;
     int numChildren = 1;
     byte storagePolicy = 0;
@@ -1346,95 +1261,82 @@ public class TestFsck {
     assertTrue(replRes.isHealthy());
   }
 
-  /** Test fsck with symlinks in the filesystem */
+  /** Test fsck with symlinks in the filesystem. */
   @Test
   public void testFsckSymlink() throws Exception {
     final DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
-    final Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
 
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      final long precision = 1L;
-      conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      fs = cluster.getFileSystem();
-      final String fileName = "/srcdat";
-      util.createFiles(fs, fileName);
-      final FileContext fc = FileContext.getFileContext(
-          cluster.getConfiguration(0));
-      final Path file = new Path(fileName);
-      final Path symlink = new Path("/srcdat-symlink");
-      fc.createSymlink(file, symlink, false);
-      util.waitReplication(fs, fileName, (short)3);
-      long aTime = fc.getFileStatus(symlink).getAccessTime();
-      Thread.sleep(precision);
-      setupAuditLogs();
-      String outStr = runFsck(conf, 0, true, "/");
-      verifyAuditLogs();
-      assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(outStr.contains("Total symlinks:\t\t1"));
-      util.cleanup(fs, fileName);
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
-    }
+    final long precision = 1L;
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
+        precision);
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    fs = cluster.getFileSystem();
+    final String fileName = "/srcdat";
+    util.createFiles(fs, fileName);
+    final FileContext fc = FileContext.getFileContext(
+        cluster.getConfiguration(0));
+    final Path file = new Path(fileName);
+    final Path symlink = new Path("/srcdat-symlink");
+    fc.createSymlink(file, symlink, false);
+    util.waitReplication(fs, fileName, (short)3);
+    long aTime = fc.getFileStatus(symlink).getAccessTime();
+    Thread.sleep(precision);
+    setupAuditLogs();
+    String outStr = runFsck(conf, 0, true, "/");
+    verifyAuditLogs();
+    assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(outStr.contains("Total symlinks:\t\t1"));
+    util.cleanup(fs, fileName);
   }
 
   /**
-   * Test for including the snapshot files in fsck report
+   * Test for including the snapshot files in fsck report.
    */
   @Test
   public void testFsckForSnapshotFiles() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
         .build();
-    try {
-      String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
-          "-files");
-      assertTrue(runFsck.contains("HEALTHY"));
-      final String fileName = "/srcdat";
-      DistributedFileSystem hdfs = cluster.getFileSystem();
-      Path file1 = new Path(fileName);
-      DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
-      hdfs.allowSnapshot(new Path("/"));
-      hdfs.createSnapshot(new Path("/"), "mySnapShot");
-      runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
-      assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
-      runFsck = runFsck(conf, 0, true, "/", "-files");
-      assertFalse(runFsck.contains("mySnapShot"));
-    } finally {
-      cluster.shutdown();
-    }
+    String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
+        "-files");
+    assertTrue(runFsck.contains("HEALTHY"));
+    final String fileName = "/srcdat";
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    Path file1 = new Path(fileName);
+    DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
+    hdfs.allowSnapshot(new Path("/"));
+    hdfs.createSnapshot(new Path("/"), "mySnapShot");
+    runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
+    assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
+    runFsck = runFsck(conf, 0, true, "/", "-files");
+    assertFalse(runFsck.contains("mySnapShot"));
   }
 
   /**
-   * Test for blockIdCK
+   * Test for blockIdCK.
    */
 
   @Test
   public void testBlockIdCK() throws Exception {
 
-    final short REPL_FACTOR = 2;
-    short NUM_DN = 2;
+    final short replFactor = 2;
+    short numDn = 2;
     final long blockSize = 512;
 
-    String [] racks = {"/rack1", "/rack2"};
-    String [] hosts = {"host1", "host2"};
+    String[] racks = {"/rack1", "/rack2"};
+    String[] hosts = {"host1", "host2"};
 
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
-    MiniDFSCluster cluster = null;
     DistributedFileSystem dfs = null;
     cluster =
-      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
+      new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
         .racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
@@ -1443,12 +1345,12 @@ public class TestFsck {
     assertNotNull("Failed to get FileSystem", dfs);
 
     DFSTestUtil util = new DFSTestUtil.Builder().
-      setName(getClass().getSimpleName()).setNumFiles(1).build();
+        setName(getClass().getSimpleName()).setNumFiles(1).build();
     //create files
     final String pathString = new String("/testfile");
     final Path path = new Path(pathString);
-    util.createFile(dfs, path, 1024, REPL_FACTOR , 1000L);
-    util.waitReplication(dfs, path, REPL_FACTOR);
+    util.createFile(dfs, path, 1024, replFactor, 1000L);
+    util.waitReplication(dfs, path, replFactor);
     StringBuilder sb = new StringBuilder();
     for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
       sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
@@ -1456,46 +1358,40 @@ public class TestFsck {
     String[] bIds = sb.toString().split(" ");
 
     //run fsck
-    try {
-      //illegal input test
-      String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
-          "not_a_block_id");
-      assertTrue(runFsckResult.contains("Incorrect blockId format:"));
-
-      //general test
-      runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
-      assertTrue(runFsckResult.contains(bIds[0]));
-      assertTrue(runFsckResult.contains(bIds[1]));
-      assertTrue(runFsckResult.contains(
-          "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
-      assertTrue(runFsckResult.contains(
-          "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
-    } finally {
-      cluster.shutdown();
-    }
+    //illegal input test
+    String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
+        "not_a_block_id");
+    assertTrue(runFsckResult.contains("Incorrect blockId format:"));
+
+    //general test
+    runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
+    assertTrue(runFsckResult.contains(bIds[0]));
+    assertTrue(runFsckResult.contains(bIds[1]));
+    assertTrue(runFsckResult.contains(
+        "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
+    assertTrue(runFsckResult.contains(
+        "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
   }
 
   /**
-   * Test for blockIdCK with datanode decommission
+   * Test for blockIdCK with datanode decommission.
    */
   @Test
   public void testBlockIdCKDecommission() throws Exception {
 
-    final short REPL_FACTOR = 1;
-    short NUM_DN = 2;
+    final short replFactor = 1;
+    short numDn = 2;
     final long blockSize = 512;
     boolean checkDecommissionInProgress = false;
-    String [] racks = {"/rack1", "/rack2"};
-    String [] hosts = {"host1", "host2"};
+    String[] racks = {"/rack1", "/rack2"};
+    String[] hosts = {"host1", "host2"};
 
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
-    MiniDFSCluster cluster;
-    DistributedFileSystem dfs ;
+    DistributedFileSystem dfs;
     cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
+        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
             .racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
@@ -1508,137 +1404,124 @@ public class TestFsck {
     //create files
     final String pathString = new String("/testfile");
     final Path path = new Path(pathString);
-    util.createFile(dfs, path, 1024, REPL_FACTOR, 1000L);
-    util.waitReplication(dfs, path, REPL_FACTOR);
+    util.createFile(dfs, path, 1024, replFactor, 1000L);
+    util.waitReplication(dfs, path, replFactor);
     StringBuilder sb = new StringBuilder();
     for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
       sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
     }
     String[] bIds = sb.toString().split(" ");
-    try {
-      //make sure datanode that has replica is fine before decommission
-      String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-
-      //decommission datanode
-      FSNamesystem fsn = cluster.getNameNode().getNamesystem();
-      BlockManager bm = fsn.getBlockManager();
-      ExtendedBlock eb = util.getFirstBlock(dfs, path);
-      BlockCollection bc = null;
-      try {
-        fsn.writeLock();
-        BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
-        bc = fsn.getBlockCollection(bi);
-      } finally {
-        fsn.writeUnlock();
-      }
-      DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
-      bm.getDatanodeManager().getDecomManager().startDecommission(dn);
-      String dnName = dn.getXferAddr();
-
-      //wait for decommission start
-      DatanodeInfo datanodeInfo = null;
-      int count = 0;
-      do {
-        Thread.sleep(2000);
-        for (DatanodeInfo info : dfs.getDataNodeStats()) {
-          if (dnName.equals(info.getXferAddr())) {
-            datanodeInfo = info;
-          }
-        }
-         //check decommissioning only once
-        if(!checkDecommissionInProgress && datanodeInfo != null
-            && datanodeInfo.isDecommissionInProgress()) {
-          String fsckOut = runFsck(conf, 3, true, "/", "-blockId", bIds[0]);
-          assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONING_STATUS));
-          checkDecommissionInProgress =  true;
-        }
-      } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
 
-      //check decommissioned
-      String fsckOut = runFsck(conf, 2, true, "/", "-blockId", bIds[0]);
-      assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONED_STATUS));
+    //make sure datanode that has replica is fine before decommission
+    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+    //decommission datanode
+    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
+    BlockManager bm = fsn.getBlockManager();
+    ExtendedBlock eb = util.getFirstBlock(dfs, path);
+    BlockCollection bc = null;
+    try {
+      fsn.writeLock();
+      BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
+      bc = fsn.getBlockCollection(bi);
     } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+      fsn.writeUnlock();
     }
+    DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
+    bm.getDatanodeManager().getDecomManager().startDecommission(dn);
+    String dnName = dn.getXferAddr();
+
+    //wait for decommission start
+    DatanodeInfo datanodeInfo = null;
+    int count = 0;
+    do {
+      Thread.sleep(2000);
+      for (DatanodeInfo info : dfs.getDataNodeStats()) {
+        if (dnName.equals(info.getXferAddr())) {
+          datanodeInfo = info;
+        }
+      }
+       //check decommissioning only once
+      if(!checkDecommissionInProgress && datanodeInfo != null
+          && datanodeInfo.isDecommissionInProgress()) {
+        String fsckOut = runFsck(conf, 3, true, "/", "-blockId", bIds[0]);
+        assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONING_STATUS));
+        checkDecommissionInProgress =  true;
+      }
+    } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
+
+    //check decommissioned
+    String fsckOut = runFsck(conf, 2, true, "/", "-blockId", bIds[0]);
+    assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONED_STATUS));
   }
 
   /**
-   * Test for blockIdCK with block corruption
+   * Test for blockIdCK with block corruption.
    */
   @Test
   public void testBlockIdCKCorruption() throws Exception {
-    short NUM_DN = 1;
+    short numDn = 1;
     final long blockSize = 512;
     Random random = new Random();
     ExtendedBlock block;
     short repFactor = 1;
-    String [] racks = {"/rack1"};
-    String [] hosts = {"host1"};
+    String[] racks = {"/rack1"};
+    String[] hosts = {"host1"};
 
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
-    MiniDFSCluster cluster = null;
     DistributedFileSystem dfs = null;
-    try {
-      cluster =
-          new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
-              .racks(racks).build();
+    cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
+            .racks(racks).build();
 
-      assertNotNull("Failed Cluster Creation", cluster);
-      cluster.waitClusterUp();
-      dfs = cluster.getFileSystem();
-      assertNotNull("Failed to get FileSystem", dfs);
+    assertNotNull("Failed Cluster Creation", cluster);
+    cluster.waitClusterUp();
+    dfs = cluster.getFileSystem();
+    assertNotNull("Failed to get FileSystem", dfs);
 
-      DFSTestUtil util = new DFSTestUtil.Builder().
+    DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
-      //create files
-      final String pathString = new String("/testfile");
-      final Path path = new Path(pathString);
-      util.createFile(dfs, path, 1024, repFactor, 1000L);
-      util.waitReplication(dfs, path, repFactor);
-      StringBuilder sb = new StringBuilder();
-      for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
-        sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
-      }
-      String[] bIds = sb.toString().split(" ");
-
-      //make sure block is healthy before we corrupt it
-      String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-
-      // corrupt replicas
-      block = DFSTestUtil.getFirstBlock(dfs, path);
-      File blockFile = cluster.getBlockFile(0, block);
-      if (blockFile != null && blockFile.exists()) {
-        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
-        FileChannel channel = raFile.getChannel();
-        String badString = "BADBAD";
-        int rand = random.nextInt((int) channel.size()/2);
-        raFile.seek(rand);
-        raFile.write(badString.getBytes());
-        raFile.close();
-      }
+    //create files
+    final String pathString = new String("/testfile");
+    final Path path = new Path(pathString);
+    util.createFile(dfs, path, 1024, repFactor, 1000L);
+    util.waitReplication(dfs, path, repFactor);
+    StringBuilder sb = new StringBuilder();
+    for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
+      sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
+    }
+    String[] bIds = sb.toString().split(" ");
 
-      util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);
+    //make sure block is healthy before we corrupt it
+    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
 
-      outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+    // corrupt replicas
+    block = DFSTestUtil.getFirstBlock(dfs, path);
+    File blockFile = cluster.getBlockFile(0, block);
+    if (blockFile != null && blockFile.exists()) {
+      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+      FileChannel channel = raFile.getChannel();
+      String badString = "BADBAD";
+      int rand = random.nextInt((int) channel.size()/2);
+      raFile.seek(rand);
+      raFile.write(badString.getBytes());
+      raFile.close();
     }
+
+    util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);
+
+    outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
+    System.out.println(outStr);
+    assertTrue(outStr.cont

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/52] [abbrv] hadoop git commit: HDFS-10895. Update HDFS Erasure Coding doc to add how to use ISA-L based coder. Contributed by Sammi Chen

Posted by cn...@apache.org.
HDFS-10895. Update HDFS Erasure Coding doc to add how to use ISA-L based coder. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af50da32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af50da32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af50da32

Branch: refs/heads/HADOOP-13037
Commit: af50da3298f92a52cc20d5f6aab6f6ad8134efbd
Parents: 3d59b18
Author: Kai Zheng <ka...@intel.com>
Authored: Mon Oct 10 11:55:49 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Mon Oct 10 11:55:49 2016 +0600

----------------------------------------------------------------------
 .../src/site/markdown/HDFSErasureCoding.md           | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af50da32/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 18b3a25..627260f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -22,6 +22,7 @@ HDFS Erasure Coding
     * [Deployment](#Deployment)
         * [Cluster and hardware configuration](#Cluster_and_hardware_configuration)
         * [Configuration keys](#Configuration_keys)
+        * [Enable Intel ISA-L](#Enable_Intel_ISA-L)
         * [Administrative commands](#Administrative_commands)
 
 Purpose
@@ -73,6 +74,9 @@ Architecture
 
     There are three policies currently being supported: RS-DEFAULT-3-2-64k, RS-DEFAULT-6-3-64k and RS-LEGACY-6-3-64k. All with default cell size of 64KB. The system default policy is RS-DEFAULT-6-3-64k which use the default schema RS_6_3_SCHEMA with a cell size of 64KB.
 
+ *  **Intel ISA-L**
+    Intel ISA-L stands for Intel Intelligent Storage Acceleration Library. ISA-L is a collection of optimized low-level functions used primarily in storage applications. It includes a fast block Reed-Solomon type erasure codes optimized for Intel AVX and AVX2 instruction sets.
+    HDFS EC can leverage this open-source library to accelerate encoding and decoding calculation. ISA-L supports most of major operating systems, including Linux and Windows. By default, ISA-L is not enabled in HDFS.
 
 Deployment
 ----------
@@ -98,7 +102,7 @@ Deployment
   `io.erasurecode.codec.rs-default.rawcoder` for the default RS codec,
   `io.erasurecode.codec.rs-legacy.rawcoder` for the legacy RS codec,
   `io.erasurecode.codec.xor.rawcoder` for the XOR codec.
-  The default implementations for all of these codecs are pure Java.
+  The default implementations for all of these codecs are pure Java. For default RS codec, there is also a native implementation which leverages Intel ISA-L library to improve the encoding and decoding calculation. Please refer to section "Enable Intel ISA-L" for more detail information.
 
   Erasure coding background recovery work on the DataNodes can also be tuned via the following configuration parameters:
 
@@ -106,6 +110,15 @@ Deployment
   1. `dfs.datanode.stripedread.threads` - Number of concurrent reader threads. Default value is 20 threads.
   1. `dfs.datanode.stripedread.buffer.size` - Buffer size for reader service. Default value is 256KB.
 
+### Enable Intel ISA-L
+
+  HDFS native implementation of default RS codec leverages Intel ISA-L library to improve the encoding and decoding calculation. To enable and use Intel ISA-L, there are three steps.
+  1. Build ISA-L library. Please refer to the offical site "https://github.com/01org/isa-l/" for detail information.
+  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build options" section in "Build instructions for Hadoop"(BUILDING.txt) document. Use -Dbundle.isal to copy the contents of the isal.lib directory into the final tar file. Deploy hadoop with the tar file. Make sure ISA-L library is available on both HDFS client and DataNodes.
+  3. Configure the `io.erasurecode.codec.rs-default.rawcoder` key with value `org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory` on HDFS client and DataNodes.
+
+  To check ISA-L library enable state, try "Hadoop checknative" command. It will tell you if ISA-L library is enabled or not.
+
 ### Administrative commands
 
   HDFS provides an `erasurecode` subcommand to perform administrative commands related to erasure coding.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[51/52] [abbrv] hadoop git commit: YARN-4464. Lower the default max applications stored in the RM and store. (Daniel Templeton via kasha)

Posted by cn...@apache.org.
YARN-4464. Lower the default max applications stored in the RM and store. (Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6378845f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6378845f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6378845f

Branch: refs/heads/HADOOP-13037
Commit: 6378845f9ef789c3fda862c43bcd498aa3f35068
Parents: 7ba7092
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Tue Oct 11 21:41:58 2016 -0700
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Tue Oct 11 21:42:08 2016 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     | 20 ++++++++++++++++----
 .../src/main/resources/yarn-default.xml         |  4 ++--
 .../server/resourcemanager/RMAppManager.java    |  2 +-
 3 files changed, 19 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 4d43357..3bd0dcc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -719,17 +719,29 @@ public class YarnConfiguration extends Configuration {
       + "leveldb-state-store.compaction-interval-secs";
   public static final long DEFAULT_RM_LEVELDB_COMPACTION_INTERVAL_SECS = 3600;
 
-  /** The maximum number of completed applications RM keeps. */ 
+  /**
+   * The maximum number of completed applications RM keeps. By default equals
+   * to {@link #DEFAULT_RM_MAX_COMPLETED_APPLICATIONS}.
+   */
   public static final String RM_MAX_COMPLETED_APPLICATIONS =
     RM_PREFIX + "max-completed-applications";
-  public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 10000;
+  public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 1000;
 
   /**
-   * The maximum number of completed applications RM state store keeps, by
-   * default equals to DEFAULT_RM_MAX_COMPLETED_APPLICATIONS
+   * The maximum number of completed applications RM state store keeps. By
+   * default equals to value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.
    */
   public static final String RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS =
       RM_PREFIX + "state-store.max-completed-applications";
+  /**
+   * The default value for
+   * {@code yarn.resourcemanager.state-store.max-completed-applications}.
+   * @deprecated This default value is ignored and will be removed in a future
+   * release. The default value of
+   * {@code yarn.resourcemanager.state-store.max-completed-applications} is the
+   * value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.
+   */
+  @Deprecated
   public static final int DEFAULT_RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS =
       DEFAULT_RM_MAX_COMPLETED_APPLICATIONS;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 524afec..f37c689 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -417,7 +417,7 @@
     the applications remembered in RM memory.
     Any values larger than ${yarn.resourcemanager.max-completed-applications} will
     be reset to ${yarn.resourcemanager.max-completed-applications}.
-    Note that this value impacts the RM recovery performance.Typically,
+    Note that this value impacts the RM recovery performance. Typically,
     a smaller value indicates better performance on RM recovery.
     </description>
     <name>yarn.resourcemanager.state-store.max-completed-applications</name>
@@ -687,7 +687,7 @@
   <property>
     <description>The maximum number of completed applications RM keeps. </description>
     <name>yarn.resourcemanager.max-completed-applications</name>
-    <value>10000</value>
+    <value>1000</value>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 7352a28..c065b60 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -101,7 +101,7 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
     this.maxCompletedAppsInStateStore =
         conf.getInt(
           YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,
-          YarnConfiguration.DEFAULT_RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS);
+          this.maxCompletedAppsInMemory);
     if (this.maxCompletedAppsInStateStore > this.maxCompletedAppsInMemory) {
       this.maxCompletedAppsInStateStore = this.maxCompletedAppsInMemory;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/52] [abbrv] hadoop git commit: HADOOP-13678 Update jackson from 1.9.13 to 2.x in hadoop-tools. Contributed by Akira Ajisaka.

Posted by cn...@apache.org.
HADOOP-13678 Update jackson from 1.9.13 to 2.x in hadoop-tools. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cc841f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cc841f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cc841f1

Branch: refs/heads/HADOOP-13037
Commit: 2cc841f16ec9aa5336495fc20ee781a1276fddc5
Parents: 4d2f380
Author: Steve Loughran <st...@apache.org>
Authored: Thu Oct 6 16:30:26 2016 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Oct 6 16:31:00 2016 +0100

----------------------------------------------------------------------
 hadoop-tools/hadoop-azure-datalake/pom.xml      |  4 +++
 ...ClientCredentialBasedAccesTokenProvider.java |  5 +--
 hadoop-tools/hadoop-azure/pom.xml               |  6 +++-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 16 ++++-----
 hadoop-tools/hadoop-openstack/pom.xml           | 18 +++++-----
 .../swift/auth/ApiKeyAuthenticationRequest.java |  2 +-
 .../fs/swift/auth/entities/AccessToken.java     |  2 +-
 .../hadoop/fs/swift/auth/entities/Catalog.java  |  2 +-
 .../hadoop/fs/swift/auth/entities/Endpoint.java |  2 +-
 .../hadoop/fs/swift/auth/entities/Tenant.java   |  2 +-
 .../hadoop/fs/swift/auth/entities/User.java     |  2 +-
 .../snative/SwiftNativeFileSystemStore.java     |  3 +-
 .../apache/hadoop/fs/swift/util/JSONUtil.java   | 24 +++++--------
 hadoop-tools/hadoop-rumen/pom.xml               |  9 +++++
 .../apache/hadoop/tools/rumen/Anonymizer.java   | 23 ++++++-------
 .../hadoop/tools/rumen/HadoopLogsAnalyzer.java  |  3 +-
 .../tools/rumen/JsonObjectMapperParser.java     | 17 ++++-----
 .../tools/rumen/JsonObjectMapperWriter.java     | 21 +++++-------
 .../apache/hadoop/tools/rumen/LoggedJob.java    |  2 +-
 .../hadoop/tools/rumen/LoggedLocation.java      |  2 +-
 .../tools/rumen/LoggedNetworkTopology.java      |  2 +-
 .../rumen/LoggedSingleRelativeRanking.java      |  4 +--
 .../apache/hadoop/tools/rumen/LoggedTask.java   |  2 +-
 .../hadoop/tools/rumen/LoggedTaskAttempt.java   |  2 +-
 .../hadoop/tools/rumen/datatypes/NodeName.java  |  2 +-
 .../rumen/serializers/BlockingSerializer.java   | 10 +++---
 .../DefaultAnonymizingRumenSerializer.java      |  8 ++---
 .../serializers/DefaultRumenSerializer.java     |  9 ++---
 .../serializers/ObjectStringSerializer.java     | 10 +++---
 .../apache/hadoop/tools/rumen/state/State.java  |  2 +-
 .../tools/rumen/state/StateDeserializer.java    | 14 ++++----
 .../hadoop/tools/rumen/state/StatePool.java     | 36 ++++++++------------
 .../hadoop/tools/rumen/TestHistograms.java      | 13 +++----
 hadoop-tools/hadoop-sls/pom.xml                 |  4 +++
 .../hadoop/yarn/sls/RumenToSLSConverter.java    |  8 ++---
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |  7 ++--
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  | 10 +++---
 37 files changed, 151 insertions(+), 157 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-azure-datalake/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml
index c07a1d7..e1a0bfe 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -181,5 +181,9 @@
       <version>2.4.0</version>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java
index 6dfc593..11d07e7 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java
@@ -18,6 +18,9 @@
  */
 package org.apache.hadoop.hdfs.web.oauth2;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import com.fasterxml.jackson.databind.ObjectReader;
 import com.squareup.okhttp.OkHttpClient;
 import com.squareup.okhttp.Request;
 import com.squareup.okhttp.RequestBody;
@@ -29,8 +32,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.util.Timer;
 import org.apache.http.HttpStatus;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectReader;
 
 import java.io.IOException;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 48f9043..d8121e2 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -217,6 +217,10 @@
       <artifactId>mockito-all</artifactId>
       <scope>test</scope>
     </dependency>
-    
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
+
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index fb0d31f..54eb90f 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -40,6 +40,10 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.fasterxml.jackson.core.JsonParseException;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.databind.JsonMappingException;
+import com.fasterxml.jackson.databind.JsonNode;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -63,14 +67,10 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
-import org.codehaus.jackson.JsonNode;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.JsonParser;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.common.annotations.VisibleForTesting;
 import com.microsoft.azure.storage.StorageException;
 
@@ -193,8 +193,8 @@ public class NativeAzureFileSystem extends FileSystem {
       if (oldFolderName == null || newFolderName == null) {
     	  this.committed = false;
       } else {
-        this.srcKey = oldFolderName.getTextValue();
-        this.dstKey = newFolderName.getTextValue();
+        this.srcKey = oldFolderName.textValue();
+        this.dstKey = newFolderName.textValue();
         if (this.srcKey == null || this.dstKey == null) {
           this.committed = false;
         } else {
@@ -203,7 +203,7 @@ public class NativeAzureFileSystem extends FileSystem {
             this.committed = false;
           } else {
             for (int i = 0; i < fileList.size(); i++) {
-              fileStrList.add(fileList.get(i).getTextValue());
+              fileStrList.add(fileList.get(i).textValue());
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-openstack/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml
index b036e84..f9a4df0 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -124,16 +124,6 @@
     </dependency>
 
     <dependency>
-      <groupId>org.codehaus.jackson</groupId>
-      <artifactId>jackson-mapper-asl</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.codehaus.jackson</groupId>
-      <artifactId>jackson-core-asl</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
       <groupId>commons-httpclient</groupId>
       <artifactId>commons-httpclient</artifactId>
       <version>3.1</version>
@@ -150,5 +140,13 @@
       <artifactId>junit</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/ApiKeyAuthenticationRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/ApiKeyAuthenticationRequest.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/ApiKeyAuthenticationRequest.java
index f5f9a8c..e25d17d 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/ApiKeyAuthenticationRequest.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/ApiKeyAuthenticationRequest.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.fs.swift.auth;
 
-import org.codehaus.jackson.annotate.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonProperty;
 
 /**
  * Class that represents authentication request to Openstack Keystone.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/AccessToken.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/AccessToken.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/AccessToken.java
index a01e855..b38d466 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/AccessToken.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/AccessToken.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.fs.swift.auth.entities;
 
-import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 
 /**
  * Access token representation of Openstack Keystone authentication.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Catalog.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Catalog.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Catalog.java
index 838d87f..76e161b 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Catalog.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Catalog.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.fs.swift.auth.entities;
 
-import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Endpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Endpoint.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Endpoint.java
index f9de895..b1cbf2a 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Endpoint.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Endpoint.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.fs.swift.auth.entities;
 
-import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 
 import java.net.URI;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Tenant.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Tenant.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Tenant.java
index da94c40..405d2c8 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Tenant.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/Tenant.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.fs.swift.auth.entities;
 
-import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 
 /**
  * Tenant is abstraction in Openstack which describes all account

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/User.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/User.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/User.java
index 1a6954a..da3bac2 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/User.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/auth/entities/User.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.fs.swift.auth.entities;
 
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import org.apache.hadoop.fs.swift.auth.Roles;
-import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
index cc3e3d2..71d8d82 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs.swift.snative;
 
+import com.fasterxml.jackson.databind.type.CollectionType;
+
 import org.apache.commons.httpclient.Header;
 import org.apache.commons.httpclient.HttpStatus;
 import org.apache.commons.logging.Log;
@@ -36,7 +38,6 @@ import org.apache.hadoop.fs.swift.util.DurationStats;
 import org.apache.hadoop.fs.swift.util.JSONUtil;
 import org.apache.hadoop.fs.swift.util.SwiftObjectPath;
 import org.apache.hadoop.fs.swift.util.SwiftUtils;
-import org.codehaus.jackson.map.type.CollectionType;
 
 import java.io.ByteArrayInputStream;
 import java.io.FileNotFoundException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/JSONUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/JSONUtil.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/JSONUtil.java
index b17cb65..fee7e7f 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/JSONUtil.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/JSONUtil.java
@@ -18,12 +18,12 @@
 
 package org.apache.hadoop.fs.swift.util;
 
+import com.fasterxml.jackson.core.JsonGenerationException;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.JsonMappingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.type.CollectionType;
 import org.apache.hadoop.fs.swift.exceptions.SwiftJsonMarshallingException;
-import org.codehaus.jackson.JsonGenerationException;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.type.CollectionType;
-import org.codehaus.jackson.type.TypeReference;
 
 import java.io.IOException;
 import java.io.StringWriter;
@@ -54,9 +54,7 @@ public class JSONUtil {
     try {
       jsonMapper.writeValue(json, object);
       return json.toString();
-    } catch (JsonGenerationException e) {
-      throw new SwiftJsonMarshallingException(e.toString(), e);
-    } catch (JsonMappingException e) {
+    } catch (JsonGenerationException | JsonMappingException e) {
       throw new SwiftJsonMarshallingException(e.toString(), e);
     }
   }
@@ -96,9 +94,7 @@ public class JSONUtil {
             throws IOException {
     try {
       return (T)jsonMapper.readValue(value, typeReference);
-    } catch (JsonGenerationException e) {
-      throw new SwiftJsonMarshallingException("Error generating response", e);
-    } catch (JsonMappingException e) {
+    } catch (JsonGenerationException | JsonMappingException e) {
       throw new SwiftJsonMarshallingException("Error generating response", e);
     }
   }
@@ -115,11 +111,7 @@ public class JSONUtil {
               throws IOException {
     try {
       return (T)jsonMapper.readValue(value, collectionType);
-    } catch (JsonGenerationException e) {
-      throw new SwiftJsonMarshallingException(e.toString()
-                                              + " source: " + value,
-                                              e);
-    } catch (JsonMappingException e) {
+    } catch (JsonGenerationException | JsonMappingException e) {
       throw new SwiftJsonMarshallingException(e.toString()
                                               + " source: " + value,
                                               e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml
index f5acf4e..9e1b1f3 100644
--- a/hadoop-tools/hadoop-rumen/pom.xml
+++ b/hadoop-tools/hadoop-rumen/pom.xml
@@ -94,6 +94,15 @@
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Anonymizer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Anonymizer.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Anonymizer.java
index e1a2be7..3c85a93 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Anonymizer.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Anonymizer.java
@@ -21,6 +21,12 @@ package org.apache.hadoop.tools.rumen;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import com.fasterxml.jackson.core.JsonEncoding;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.Version;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.module.SimpleModule;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -36,13 +42,6 @@ import org.apache.hadoop.tools.rumen.datatypes.*;
 import org.apache.hadoop.tools.rumen.serializers.*;
 import org.apache.hadoop.tools.rumen.state.*;
 
-import org.codehaus.jackson.JsonEncoding;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.Version;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.module.SimpleModule;
-
 public class Anonymizer extends Configured implements Tool {
   private boolean anonymizeTrace = false;
   private Path inputTracePath = null;
@@ -88,8 +87,8 @@ public class Anonymizer extends Configured implements Tool {
      
     outMapper = new ObjectMapper();
     // define a module
-    SimpleModule module = new SimpleModule("Anonymization Serializer",  
-                                           new Version(0, 1, 1, "FINAL"));
+    SimpleModule module = new SimpleModule(
+        "Anonymization Serializer", new Version(0, 1, 1, "FINAL", "", ""));
     // add various serializers to the module
     // use the default (as-is) serializer for default data types
     module.addSerializer(DataType.class, new DefaultRumenSerializer());
@@ -106,7 +105,7 @@ public class Anonymizer extends Configured implements Tool {
     // register the module with the object-mapper
     outMapper.registerModule(module);
     
-    outFactory = outMapper.getJsonFactory();
+    outFactory = outMapper.getFactory();
   }
   
   // anonymize the job trace file
@@ -191,8 +190,8 @@ public class Anonymizer extends Configured implements Tool {
       output = outFS.create(path);
     }
 
-    JsonGenerator outGen = outFactory.createJsonGenerator(output, 
-                                                          JsonEncoding.UTF8);
+    JsonGenerator outGen =
+        outFactory.createGenerator(output, JsonEncoding.UTF8);
     outGen.useDefaultPrettyPrinter();
     
     return outGen;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
index c53a7c2..eceb98d 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
@@ -35,6 +35,7 @@ import java.util.List;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.fasterxml.jackson.core.JsonProcessingException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -57,8 +58,6 @@ import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.io.compress.CodecPool;
 import org.apache.hadoop.io.compress.Decompressor;
 
-import org.codehaus.jackson.JsonProcessingException;
-
 /**
  * This is the main class for rumen log mining functionality.
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java
index cbd3679..f95878d 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperParser.java
@@ -18,15 +18,14 @@
 package org.apache.hadoop.tools.rumen;
 
 import java.io.Closeable;
-import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
 
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.databind.JsonMappingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.codehaus.jackson.JsonParser;
-import org.codehaus.jackson.map.DeserializationConfig;
-import org.codehaus.jackson.map.ObjectMapper;
 
 /**
  * A simple wrapper for parsing JSON-encoded data using ObjectMapper.
@@ -50,11 +49,9 @@ class JsonObjectMapperParser<T> implements Closeable {
   public JsonObjectMapperParser(Path path, Class<? extends T> clazz,
       Configuration conf) throws IOException {
     mapper = new ObjectMapper();
-    mapper.configure(
-        DeserializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
     this.clazz = clazz;
     InputStream input = new PossiblyDecompressedInputStream(path, conf);
-    jsonParser = mapper.getJsonFactory().createJsonParser(input);
+    jsonParser = mapper.getFactory().createParser(input);
   }
 
   /**
@@ -66,10 +63,8 @@ class JsonObjectMapperParser<T> implements Closeable {
   public JsonObjectMapperParser(InputStream input, Class<? extends T> clazz)
       throws IOException {
     mapper = new ObjectMapper();
-    mapper.configure(
-        DeserializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
     this.clazz = clazz;
-    jsonParser = mapper.getJsonFactory().createJsonParser(input);
+    jsonParser = mapper.getFactory().createParser(input);
   }
 
   /**
@@ -82,7 +77,7 @@ class JsonObjectMapperParser<T> implements Closeable {
   public T getNext() throws IOException {
     try {
       return mapper.readValue(jsonParser, clazz);
-    } catch (EOFException e) {
+    } catch (JsonMappingException e) {
       return null;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java
index 47bfee0..747b141 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JsonObjectMapperWriter.java
@@ -21,16 +21,15 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import com.fasterxml.jackson.core.JsonEncoding;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.Version;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.module.SimpleModule;
 import org.apache.hadoop.mapreduce.ID;
 import org.apache.hadoop.tools.rumen.datatypes.DataType;
 import org.apache.hadoop.tools.rumen.serializers.DefaultRumenSerializer;
 import org.apache.hadoop.tools.rumen.serializers.ObjectStringSerializer;
-import org.codehaus.jackson.JsonEncoding;
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.Version;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.SerializationConfig;
-import org.codehaus.jackson.map.module.SimpleModule;
 
 /**
  * Simple wrapper around {@link JsonGenerator} to write objects in JSON format.
@@ -41,12 +40,10 @@ public class JsonObjectMapperWriter<T> implements Closeable {
   
   public JsonObjectMapperWriter(OutputStream output, boolean prettyPrint) throws IOException {
     ObjectMapper mapper = new ObjectMapper();
-    mapper.configure(
-        SerializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
 
     // define a module
-    SimpleModule module = new SimpleModule("Default Serializer",  
-                                           new Version(0, 1, 1, "FINAL"));
+    SimpleModule module = new SimpleModule(
+        "Default Serializer", new Version(0, 1, 1, "FINAL", "", ""));
     // add various serializers to the module
     //   add default (all-pass) serializer for all rumen specific data types
     module.addSerializer(DataType.class, new DefaultRumenSerializer());
@@ -56,9 +53,7 @@ public class JsonObjectMapperWriter<T> implements Closeable {
     // register the module with the object-mapper
     mapper.registerModule(module);
 
-    mapper.getJsonFactory();
-    writer = mapper.getJsonFactory().createJsonGenerator(
-        output, JsonEncoding.UTF8);
+    writer = mapper.getFactory().createGenerator(output, JsonEncoding.UTF8);
     if (prettyPrint) {
       writer.useDefaultPrettyPrinter();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedJob.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedJob.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedJob.java
index 785feb3..597aab8 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedJob.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedJob.java
@@ -27,9 +27,9 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.TreeSet;
 
+import com.fasterxml.jackson.annotation.JsonAnySetter;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.tools.rumen.datatypes.*;
-import org.codehaus.jackson.annotate.JsonAnySetter;
 
 /**
  * A {@link LoggedDiscreteCDF} is a representation of an hadoop job, with the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedLocation.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedLocation.java
index 047cd63..6d51e4a 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedLocation.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedLocation.java
@@ -25,8 +25,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
+import com.fasterxml.jackson.annotation.JsonAnySetter;
 import org.apache.hadoop.tools.rumen.datatypes.NodeName;
-import org.codehaus.jackson.annotate.JsonAnySetter;
 
 /**
  * A {@link LoggedLocation} is a representation of a point in an hierarchical

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java
index 23bbb98..5d79a83 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedNetworkTopology.java
@@ -29,8 +29,8 @@ import java.util.TreeSet;
 import java.util.ArrayList;
 import java.util.Comparator;
 
+import com.fasterxml.jackson.annotation.JsonAnySetter;
 import org.apache.hadoop.tools.rumen.datatypes.NodeName;
-import org.codehaus.jackson.annotate.JsonAnySetter;
 
 /**
  * A {@link LoggedNetworkTopology} represents a tree that in turn represents a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java
index e507116..d9be325 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedSingleRelativeRanking.java
@@ -17,11 +17,11 @@
  */
 package org.apache.hadoop.tools.rumen;
 
+import com.fasterxml.jackson.annotation.JsonAnySetter;
+
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.codehaus.jackson.annotate.JsonAnySetter;
-
 /**
  * A {@link LoggedSingleRelativeRanking} represents an X-Y coordinate of a
  * single point in a discrete CDF.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTask.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTask.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTask.java
index 4a23fa6..4ae33a7 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTask.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTask.java
@@ -23,13 +23,13 @@ import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
 
+import com.fasterxml.jackson.annotation.JsonAnySetter;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.jobhistory.JhCounter;
 import org.apache.hadoop.mapreduce.jobhistory.JhCounterGroup;
 import org.apache.hadoop.mapreduce.jobhistory.JhCounters;
 
 import org.apache.hadoop.util.StringUtils;
-import org.codehaus.jackson.annotate.JsonAnySetter;
 
 /**
  * A {@link LoggedTask} represents a [hadoop] task that is part of a hadoop job.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
index c21eb39..5c6abd3 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
@@ -23,8 +23,8 @@ import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
 
+import com.fasterxml.jackson.annotation.JsonAnySetter;
 import org.apache.hadoop.util.StringUtils;
-import org.codehaus.jackson.annotate.JsonAnySetter;
 
 // HACK ALERT!!!  This "should" have have two subclasses, which might be called
 //                LoggedMapTaskAttempt and LoggedReduceTaskAttempt, but 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java
index c0b8d45..20eb535 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/NodeName.java
@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.tools.rumen.datatypes;
 
+import com.fasterxml.jackson.annotation.JsonIgnore;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.tools.rumen.ParsedHost;
 import org.apache.hadoop.tools.rumen.anonymization.WordList;
 import org.apache.hadoop.tools.rumen.state.State;
 import org.apache.hadoop.tools.rumen.state.StatePool;
-import org.codehaus.jackson.annotate.JsonIgnore;
 
 /**
  * Represents the cluster host.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/BlockingSerializer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/BlockingSerializer.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/BlockingSerializer.java
index 4338602..a720214 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/BlockingSerializer.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/BlockingSerializer.java
@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.tools.rumen.serializers;
 
-import java.io.IOException;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
 
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.JsonProcessingException;
-import org.codehaus.jackson.map.JsonSerializer;
-import org.codehaus.jackson.map.SerializerProvider;
+import java.io.IOException;
 
 /**
  * A JSON serializer for Strings.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultAnonymizingRumenSerializer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultAnonymizingRumenSerializer.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultAnonymizingRumenSerializer.java
index d4e6fd5..944d193 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultAnonymizingRumenSerializer.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultAnonymizingRumenSerializer.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.tools.rumen.serializers;
 
 import java.io.IOException;
 
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.tools.rumen.datatypes.AnonymizableDataType;
 import org.apache.hadoop.tools.rumen.state.StatePool;
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.JsonProcessingException;
-import org.codehaus.jackson.map.JsonSerializer;
-import org.codehaus.jackson.map.SerializerProvider;
 
 /**
  * Default Rumen JSON serializer.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultRumenSerializer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultRumenSerializer.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultRumenSerializer.java
index 1b433d8..766a750 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultRumenSerializer.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/DefaultRumenSerializer.java
@@ -19,11 +19,12 @@ package org.apache.hadoop.tools.rumen.serializers;
 
 import java.io.IOException;
 
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+
 import org.apache.hadoop.tools.rumen.datatypes.DataType;
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.JsonProcessingException;
-import org.codehaus.jackson.map.JsonSerializer;
-import org.codehaus.jackson.map.SerializerProvider;
 
 /**
  * Default Rumen JSON serializer.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/ObjectStringSerializer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/ObjectStringSerializer.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/ObjectStringSerializer.java
index 69e8950..a576871 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/ObjectStringSerializer.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/serializers/ObjectStringSerializer.java
@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.tools.rumen.serializers;
 
-import java.io.IOException;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
 
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.JsonProcessingException;
-import org.codehaus.jackson.map.JsonSerializer;
-import org.codehaus.jackson.map.SerializerProvider;
+import java.io.IOException;
 
 /**
  * Rumen JSON serializer for serializing object using toSring() API.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/State.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/State.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/State.java
index 94a78c2..6ff43d5 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/State.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/State.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.tools.rumen.state;
 
-import org.codehaus.jackson.annotate.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonIgnore;
 
 /**
  * Represents a state. This state is managed by {@link StatePool}.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StateDeserializer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StateDeserializer.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StateDeserializer.java
index 47ceb8e..4fd2f12 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StateDeserializer.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StateDeserializer.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.tools.rumen.state;
 
 import java.io.IOException;
 
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationContext;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.deser.std.StdDeserializer;
+import com.fasterxml.jackson.databind.node.ObjectNode;
 import org.apache.hadoop.tools.rumen.state.StatePool.StatePair;
-import org.codehaus.jackson.JsonParser;
-import org.codehaus.jackson.JsonProcessingException;
-import org.codehaus.jackson.map.DeserializationContext;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.deser.std.StdDeserializer;
-import org.codehaus.jackson.node.ObjectNode;
 
 /**
  * Rumen JSON deserializer for deserializing the {@link State} object.
@@ -46,7 +46,7 @@ public class StateDeserializer extends StdDeserializer<StatePair> {
     
     try {
       stateClass = 
-        Class.forName(statePairObject.get("className").getTextValue().trim());
+        Class.forName(statePairObject.get("className").textValue().trim());
     } catch (ClassNotFoundException cnfe) {
       throw new RuntimeException("Invalid classname!", cnfe);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java
index 576a3c0..6e6c859 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/state/StatePool.java
@@ -27,6 +27,14 @@ import java.text.SimpleDateFormat;
 import java.util.Calendar;
 import java.util.HashMap;
 
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.core.JsonEncoding;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.Version;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.module.SimpleModule;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -35,16 +43,6 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.tools.rumen.Anonymizer;
 import org.apache.hadoop.tools.rumen.datatypes.DataType;
-import org.codehaus.jackson.JsonEncoding;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.JsonParser;
-import org.codehaus.jackson.Version;
-import org.codehaus.jackson.annotate.JsonIgnore;
-import org.codehaus.jackson.map.DeserializationConfig;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.SerializationConfig;
-import org.codehaus.jackson.map.module.SimpleModule;
 
 /**
  * A pool of states. States used by {@link DataType}'s can be managed the 
@@ -212,20 +210,16 @@ public class StatePool {
   
   private void read(DataInput in) throws IOException {
     ObjectMapper mapper = new ObjectMapper();
-    mapper.configure(
-        DeserializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
-    
     // define a module
     SimpleModule module = new SimpleModule("State Serializer",  
-        new Version(0, 1, 1, "FINAL"));
+        new Version(0, 1, 1, "FINAL", "", ""));
     // add the state deserializer
     module.addDeserializer(StatePair.class, new StateDeserializer());
 
     // register the module with the object-mapper
     mapper.registerModule(module);
 
-    JsonParser parser = 
-      mapper.getJsonFactory().createJsonParser((DataInputStream)in);
+    JsonParser parser = mapper.getFactory().createParser((DataInputStream)in);
     StatePool statePool = mapper.readValue(parser, StatePool.class);
     this.setStates(statePool.getStates());
     parser.close();
@@ -283,20 +277,18 @@ public class StatePool {
     // This is just a JSON experiment
     System.out.println("Dumping the StatePool's in JSON format.");
     ObjectMapper outMapper = new ObjectMapper();
-    outMapper.configure(
-        SerializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
     // define a module
     SimpleModule module = new SimpleModule("State Serializer",  
-        new Version(0, 1, 1, "FINAL"));
+        new Version(0, 1, 1, "FINAL", "", ""));
     // add the state serializer
     //module.addSerializer(State.class, new StateSerializer());
 
     // register the module with the object-mapper
     outMapper.registerModule(module);
 
-    JsonFactory outFactory = outMapper.getJsonFactory();
-    JsonGenerator jGen = 
-      outFactory.createJsonGenerator((DataOutputStream)out, JsonEncoding.UTF8);
+    JsonFactory outFactory = outMapper.getFactory();
+    JsonGenerator jGen =
+        outFactory.createGenerator((DataOutputStream)out, JsonEncoding.UTF8);
     jGen.useDefaultPrettyPrinter();
 
     jGen.writeObject(this);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java b/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java
index 372d93e..206095a 100644
--- a/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java
+++ b/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java
@@ -21,16 +21,17 @@ import java.io.IOException;
 
 import java.util.List;
 
+import com.fasterxml.jackson.core.JsonEncoding;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.codehaus.jackson.JsonEncoding;
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.map.ObjectMapper;
 
 import org.junit.Ignore;
 import org.junit.Test;
@@ -139,9 +140,9 @@ public class TestHistograms {
         Path goldFilePath = new Path(filePath.getParent(), "gold"+testName);
 
         ObjectMapper mapper = new ObjectMapper();
-        JsonFactory factory = mapper.getJsonFactory();
+        JsonFactory factory = mapper.getFactory();
         FSDataOutputStream ostream = lfs.create(goldFilePath, true);
-        JsonGenerator gen = factory.createJsonGenerator(ostream,
+        JsonGenerator gen = factory.createGenerator(ostream,
             JsonEncoding.UTF8);
         gen.useDefaultPrettyPrinter();
         

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-sls/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml
index e361ba3..da70b24 100644
--- a/hadoop-tools/hadoop-sls/pom.xml
+++ b/hadoop-tools/hadoop-sls/pom.xml
@@ -70,6 +70,10 @@
       <artifactId>jetty-util</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
index 0d0745c..76bcb15 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
@@ -34,6 +34,9 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
@@ -41,9 +44,6 @@ import org.apache.commons.cli.Options;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectWriter;
 
 @Private
 @Unstable
@@ -127,7 +127,7 @@ public class RumenToSLSConverter {
         ObjectMapper mapper = new ObjectMapper();
         ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
         Iterator<Map> i = mapper.readValues(
-                new JsonFactory().createJsonParser(input), Map.class);
+            new JsonFactory().createParser(input), Map.class);
         while (i.hasNext()) {
           Map m = i.next();
           output.write(writer.writeValueAsString(createSLSJob(m)) + EOL);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index c9c5c38..61738fb 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -33,6 +33,9 @@ import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
@@ -66,8 +69,6 @@ import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.log4j.Logger;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.map.ObjectMapper;
 
 @Private
 @Unstable
@@ -281,7 +282,7 @@ public class SLSRunner {
       Reader input =
           new InputStreamReader(new FileInputStream(inputTrace), "UTF-8");
       try {
-        Iterator<Map> i = mapper.readValues(jsonF.createJsonParser(input),
+        Iterator<Map> i = mapper.readValues(jsonF.createParser(input),
                 Map.class);
         while (i.hasNext()) {
           Map jsonJob = i.next();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
index f1b4f07..e5f7cd0 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
@@ -28,6 +28,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -37,8 +39,6 @@ import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedTask;
 import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.map.ObjectMapper;
 
 @Private
 @Unstable
@@ -106,8 +106,7 @@ public class SLSUtils {
     Reader input =
         new InputStreamReader(new FileInputStream(jobTrace), "UTF-8");
     try {
-      Iterator<Map> i = mapper.readValues(
-              jsonF.createJsonParser(input), Map.class);
+      Iterator<Map> i = mapper.readValues(jsonF.createParser(input), Map.class);
       while (i.hasNext()) {
         Map jsonE = i.next();
         List tasks = (List) jsonE.get("job.tasks");
@@ -134,8 +133,7 @@ public class SLSUtils {
     Reader input =
         new InputStreamReader(new FileInputStream(nodeFile), "UTF-8");
     try {
-      Iterator<Map> i = mapper.readValues(
-              jsonF.createJsonParser(input), Map.class);
+      Iterator<Map> i = mapper.readValues(jsonF.createParser(input), Map.class);
       while (i.hasNext()) {
         Map jsonE = i.next();
         String rack = "/" + jsonE.get("rack");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/52] [abbrv] hadoop git commit: HDFS-10980. Optimize check for existence of parent directory. Contributed by Daryn Sharp.

Posted by cn...@apache.org.
HDFS-10980. Optimize check for existence of parent directory. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e57fa81d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e57fa81d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e57fa81d

Branch: refs/heads/HADOOP-13037
Commit: e57fa81d9559a93d77fd724f7792326c31a490be
Parents: f3f37e6
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Oct 7 17:20:15 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Oct 7 17:20:15 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirMkdirOp.java      |  2 +-
 .../hdfs/server/namenode/FSDirSymlinkOp.java    |  2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  2 +-
 .../hdfs/server/namenode/FSDirectory.java       | 11 ++---
 .../hdfs/server/namenode/TestFSDirectory.java   | 48 ++++++++++++++++++++
 5 files changed, 56 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 2d1914f..4d8d7d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -66,7 +66,7 @@ class FSDirMkdirOp {
         }
 
         if (!createParent) {
-          fsd.verifyParentDir(iip, src);
+          fsd.verifyParentDir(iip);
         }
 
         // validate that we have enough inodes. This is, at best, a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
index 6938a84..71362f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
@@ -58,7 +58,7 @@ class FSDirSymlinkOp {
       iip = fsd.resolvePathForWrite(pc, link, false);
       link = iip.getPath();
       if (!createParent) {
-        fsd.verifyParentDir(iip, link);
+        fsd.verifyParentDir(iip);
       }
       if (!fsd.isValidToCreate(link, iip)) {
         throw new IOException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 40be83b..aab0f76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -323,7 +323,7 @@ class FSDirWriteFileOp {
       }
     } else {
       if (!createParent) {
-        dir.verifyParentDir(iip, src);
+        dir.verifyParentDir(iip);
       }
       if (!flag.contains(CreateFlag.CREATE)) {
         throw new FileNotFoundException("Can't overwrite non-existent " + src);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 8456da6..a059ee5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1765,17 +1765,16 @@ public class FSDirectory implements Closeable {
   /**
    * Verify that parent directory of src exists.
    */
-  void verifyParentDir(INodesInPath iip, String src)
+  void verifyParentDir(INodesInPath iip)
       throws FileNotFoundException, ParentNotDirectoryException {
-    Path parent = new Path(src).getParent();
-    if (parent != null) {
+    if (iip.length() > 2) {
       final INode parentNode = iip.getINode(-2);
       if (parentNode == null) {
         throw new FileNotFoundException("Parent directory doesn't exist: "
-            + parent);
-      } else if (!parentNode.isDirectory() && !parentNode.isSymlink()) {
+            + iip.getParentPath());
+      } else if (!parentNode.isDirectory()) {
         throw new ParentNotDirectoryException("Parent path is not a directory: "
-            + parent);
+            + iip.getParentPath());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
index 2b43c0f..071bdf7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.BufferedReader;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.StringReader;
 import java.util.EnumSet;
@@ -30,6 +31,7 @@ import com.google.common.collect.ImmutableList;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
@@ -386,4 +388,50 @@ public class TestFSDirectory {
                                                        XAttrSetFlag.REPLACE));
     verifyXAttrsPresent(newXAttrs, 4);
   }
+
+  @Test
+  public void testVerifyParentDir() throws Exception {
+    hdfs.mkdirs(new Path("/dir1/dir2"));
+    hdfs.createNewFile(new Path("/dir1/file"));
+    hdfs.createNewFile(new Path("/dir1/dir2/file"));
+
+    INodesInPath iip = fsdir.resolvePath(null, "/");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir1");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir1/file");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir-nonexist/file");
+    try {
+      fsdir.verifyParentDir(iip);
+      fail("expected FNF");
+    } catch (FileNotFoundException fnf) {
+      // expected.
+    }
+
+    iip = fsdir.resolvePath(null, "/dir1/dir2");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir1/dir2/file");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir1/dir-nonexist/file");
+    try {
+      fsdir.verifyParentDir(iip);
+      fail("expected FNF");
+    } catch (FileNotFoundException fnf) {
+      // expected.
+    }
+
+    iip = fsdir.resolvePath(null, "/dir1/file/fail");
+    try {
+      fsdir.verifyParentDir(iip);
+      fail("expected FNF");
+    } catch (ParentNotDirectoryException pnd) {
+      // expected.
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/52] [abbrv] hadoop git commit: HADOOP-13669. KMS Server should log exceptions before throwing. Contributed by Suraj Acharya.

Posted by cn...@apache.org.
HADOOP-13669. KMS Server should log exceptions before throwing. Contributed by Suraj Acharya.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65912e40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65912e40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65912e40

Branch: refs/heads/HADOOP-13037
Commit: 65912e4027548868ebefd8ee36eb00fa889704a7
Parents: 0306007
Author: Xiao Chen <xi...@apache.org>
Authored: Mon Oct 10 12:49:19 2016 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Mon Oct 10 12:51:12 2016 -0700

----------------------------------------------------------------------
 .../hadoop/crypto/key/kms/server/KMS.java       | 711 ++++++++++---------
 1 file changed, 392 insertions(+), 319 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65912e40/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index 371f3f5..d8755ec 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -104,89 +104,101 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   @SuppressWarnings("unchecked")
   public Response createKey(Map jsonKey) throws Exception {
-    LOG.trace("Entering createKey Method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
-    KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
-    assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
-    String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
-    final String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
-    int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
-                 ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
-    String description = (String)
-        jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
-    LOG.debug("Creating key with name {}, cipher being used{}, " +
-            "length of key {}, description of key {}", name, cipher,
-             length, description);
-    Map<String, String> attributes = (Map<String, String>)
-        jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
-    if (material != null) {
-      assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
-          KMSOp.CREATE_KEY, name);
-    }
-    final KeyProvider.Options options = new KeyProvider.Options(
-        KMSWebApp.getConfiguration());
-    if (cipher != null) {
-      options.setCipher(cipher);
-    }
-    if (length != 0) {
-      options.setBitLength(length);
-    }
-    options.setDescription(description);
-    options.setAttributes(attributes);
-
-    KeyProvider.KeyVersion keyVersion = user.doAs(
-        new PrivilegedExceptionAction<KeyVersion>() {
-          @Override
-          public KeyVersion run() throws Exception {
-            KeyProvider.KeyVersion keyVersion = (material != null)
-              ? provider.createKey(name, Base64.decodeBase64(material), options)
-              : provider.createKey(name, options);
-            provider.flush();
-            return keyVersion;
+    try{
+      LOG.trace("Entering createKey Method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
+      KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
+      assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
+      String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
+      final String material;
+      material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
+      int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
+                   ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
+      String description = (String)
+          jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
+      LOG.debug("Creating key with name {}, cipher being used{}, " +
+              "length of key {}, description of key {}", name, cipher,
+               length, description);
+      Map<String, String> attributes = (Map<String, String>)
+          jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
+      if (material != null) {
+        assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
+            KMSOp.CREATE_KEY, name);
+      }
+      final KeyProvider.Options options = new KeyProvider.Options(
+          KMSWebApp.getConfiguration());
+      if (cipher != null) {
+        options.setCipher(cipher);
+      }
+      if (length != 0) {
+        options.setBitLength(length);
+      }
+      options.setDescription(description);
+      options.setAttributes(attributes);
+
+      KeyProvider.KeyVersion keyVersion = user.doAs(
+          new PrivilegedExceptionAction<KeyVersion>() {
+            @Override
+            public KeyVersion run() throws Exception {
+              KeyProvider.KeyVersion keyVersion = (material != null)
+                  ? provider.createKey(name, Base64.decodeBase64(material),
+                      options)
+                  : provider.createKey(name, options);
+              provider.flush();
+              return keyVersion;
+            }
           }
-        }
-    );
+      );
 
-    kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
-        (material != null) + " Description:" + description);
+      kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
+          (material != null) + " Description:" + description);
 
-    if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
-      keyVersion = removeKeyMaterial(keyVersion);
+      if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
+        keyVersion = removeKeyMaterial(keyVersion);
+      }
+      Map json = KMSServerJSONUtils.toJSON(keyVersion);
+      String requestURL = KMSMDCFilter.getURL();
+      int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
+      requestURL = requestURL.substring(0, idx);
+      LOG.trace("Exiting createKey Method.");
+      return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
+          .type(MediaType.APPLICATION_JSON)
+          .header("Location", getKeyURI(requestURL, name)).entity(json).build();
+    } catch (Exception e) {
+      LOG.debug("Exception in createKey.", e);
+      throw e;
     }
-    Map json = KMSServerJSONUtils.toJSON(keyVersion);
-    String requestURL = KMSMDCFilter.getURL();
-    int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
-    requestURL = requestURL.substring(0, idx);
-    LOG.trace("Exiting createKey Method.");
-    return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
-        .type(MediaType.APPLICATION_JSON)
-        .header("Location", getKeyURI(requestURL, name)).entity(json).build();
   }
 
   @DELETE
   @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
   public Response deleteKey(@PathParam("name") final String name)
       throws Exception {
-    LOG.trace("Entering deleteKey method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
-    KMSClientProvider.checkNotEmpty(name, "name");
-    LOG.debug("Deleting key with name {}.", name);
-    user.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        provider.deleteKey(name);
-        provider.flush();
-        return null;
-      }
-    });
-
-    kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
-    LOG.trace("Exiting deleteKey method.");
-    return Response.ok().build();
+    try {
+      LOG.trace("Entering deleteKey method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
+      KMSClientProvider.checkNotEmpty(name, "name");
+      LOG.debug("Deleting key with name {}.", name);
+      user.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          provider.deleteKey(name);
+          provider.flush();
+          return null;
+        }
+      });
+
+      kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
+      LOG.trace("Exiting deleteKey method.");
+      return Response.ok().build();
+    } catch (Exception e) {
+      LOG.debug("Exception in deleteKey.", e);
+      throw e;
+    }
   }
 
   @POST
@@ -195,41 +207,49 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response rolloverKey(@PathParam("name") final String name,
       Map jsonMaterial) throws Exception {
-    LOG.trace("Entering rolloverKey Method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
-    KMSClientProvider.checkNotEmpty(name, "name");
-    LOG.debug("Rolling key with name {}.", name);
-    final String material = (String)
-        jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
-    if (material != null) {
-      assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
-          KMSOp.ROLL_NEW_VERSION, name);
-    }
+    try {
+      LOG.trace("Entering rolloverKey Method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
+      KMSClientProvider.checkNotEmpty(name, "name");
+      LOG.debug("Rolling key with name {}.", name);
+      final String material = (String)
+              jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
+      if (material != null) {
+        assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
+                KMSOp.ROLL_NEW_VERSION, name);
+      }
 
-    KeyProvider.KeyVersion keyVersion = user.doAs(
-        new PrivilegedExceptionAction<KeyVersion>() {
-          @Override
-          public KeyVersion run() throws Exception {
-            KeyVersion keyVersion = (material != null)
-              ? provider.rollNewVersion(name, Base64.decodeBase64(material))
-              : provider.rollNewVersion(name);
-            provider.flush();
-            return keyVersion;
-          }
-        }
-    );
+      KeyProvider.KeyVersion keyVersion = user.doAs(
+              new PrivilegedExceptionAction<KeyVersion>() {
+              @Override
+                public KeyVersion run() throws Exception {
+                KeyVersion keyVersion = (material != null)
+                        ? provider.rollNewVersion(name,
+                        Base64.decodeBase64(material))
+                        : provider.rollNewVersion(name);
+                provider.flush();
+                return keyVersion;
+              }
+            }
+      );
 
-    kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
-        (material != null) + " NewVersion:" + keyVersion.getVersionName());
+      kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
+              (material != null) +
+              " NewVersion:" + keyVersion.getVersionName());
 
-    if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
-      keyVersion = removeKeyMaterial(keyVersion);
+      if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
+        keyVersion = removeKeyMaterial(keyVersion);
+      }
+      Map json = KMSServerJSONUtils.toJSON(keyVersion);
+      LOG.trace("Exiting rolloverKey Method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in rolloverKey.", e);
+      throw e;
     }
-    Map json = KMSServerJSONUtils.toJSON(keyVersion);
-    LOG.trace("Exiting rolloverKey Method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
   @GET
@@ -237,59 +257,76 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeysMetadata(@QueryParam(KMSRESTConstants.KEY)
       List<String> keyNamesList) throws Exception {
-    LOG.trace("Entering getKeysMetadata method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    final String[] keyNames = keyNamesList.toArray(
-        new String[keyNamesList.size()]);
-    assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
-
-    KeyProvider.Metadata[] keysMeta = user.doAs(
-        new PrivilegedExceptionAction<KeyProvider.Metadata[]>() {
-          @Override
-          public KeyProvider.Metadata[] run() throws Exception {
-            return provider.getKeysMetadata(keyNames);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getKeysMetadata method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      final String[] keyNames = keyNamesList.toArray(
+              new String[keyNamesList.size()]);
+      assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
+
+      KeyProvider.Metadata[] keysMeta = user.doAs(
+              new PrivilegedExceptionAction<KeyProvider.Metadata[]>() {
+              @Override
+                public KeyProvider.Metadata[] run() throws Exception {
+                return provider.getKeysMetadata(keyNames);
+              }
+            }
+      );
 
-    Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
-    kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
-    LOG.trace("Exiting getKeysMetadata method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
+      kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
+      LOG.trace("Exiting getKeysMetadata method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getKeysmetadata.", e);
+      throw e;
+    }
   }
 
   @GET
   @Path(KMSRESTConstants.KEYS_NAMES_RESOURCE)
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeyNames() throws Exception {
-    LOG.trace("Entering getKeyNames method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
-
-    List<String> json = user.doAs(
-        new PrivilegedExceptionAction<List<String>>() {
-          @Override
-          public List<String> run() throws Exception {
-            return provider.getKeys();
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getKeyNames method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
+
+      List<String> json = user.doAs(
+              new PrivilegedExceptionAction<List<String>>() {
+              @Override
+                public List<String> run() throws Exception {
+                return provider.getKeys();
+              }
+            }
+      );
 
-    kmsAudit.ok(user, KMSOp.GET_KEYS, "");
-    LOG.trace("Exiting getKeyNames method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      kmsAudit.ok(user, KMSOp.GET_KEYS, "");
+      LOG.trace("Exiting getKeyNames method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getkeyNames.", e);
+      throw e;
+    }
   }
 
   @GET
   @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
   public Response getKey(@PathParam("name") String name)
       throws Exception {
-    LOG.trace("Entering getKey method.");
-    LOG.debug("Getting key information for key with name {}.", name);
-    LOG.trace("Exiting getKey method.");
-    return getMetadata(name);
+    try {
+      LOG.trace("Entering getKey method.");
+      LOG.debug("Getting key information for key with name {}.", name);
+      LOG.trace("Exiting getKey method.");
+      return getMetadata(name);
+    } catch (Exception e) {
+      LOG.debug("Exception in getKey.", e);
+      throw e;
+    }
   }
 
   @GET
@@ -298,26 +335,32 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getMetadata(@PathParam("name") final String name)
       throws Exception {
-    LOG.trace("Entering getMetadata method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(name, "name");
-    KMSWebApp.getAdminCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
-    LOG.debug("Getting metadata for key with name {}.", name);
-
-    KeyProvider.Metadata metadata = user.doAs(
-        new PrivilegedExceptionAction<KeyProvider.Metadata>() {
-          @Override
-          public KeyProvider.Metadata run() throws Exception {
-            return provider.getMetadata(name);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getMetadata method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(name, "name");
+      KMSWebApp.getAdminCallsMeter().mark();
+      assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
+      LOG.debug("Getting metadata for key with name {}.", name);
+
+      KeyProvider.Metadata metadata = user.doAs(
+              new PrivilegedExceptionAction<KeyProvider.Metadata>() {
+              @Override
+                public KeyProvider.Metadata run() throws Exception {
+                return provider.getMetadata(name);
+              }
+            }
+      );
 
-    Object json = KMSServerJSONUtils.toJSON(name, metadata);
-    kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
-    LOG.trace("Exiting getMetadata method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      Object json = KMSServerJSONUtils.toJSON(name, metadata);
+      kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
+      LOG.trace("Exiting getMetadata method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getMetadata.", e);
+      throw e;
+    }
   }
 
   @GET
@@ -326,26 +369,32 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getCurrentVersion(@PathParam("name") final String name)
       throws Exception {
-    LOG.trace("Entering getCurrentVersion method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(name, "name");
-    KMSWebApp.getKeyCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
-    LOG.debug("Getting key version for key with name {}.", name);
-
-    KeyVersion keyVersion = user.doAs(
-        new PrivilegedExceptionAction<KeyVersion>() {
-          @Override
-          public KeyVersion run() throws Exception {
-            return provider.getCurrentKey(name);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getCurrentVersion method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(name, "name");
+      KMSWebApp.getKeyCallsMeter().mark();
+      assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
+      LOG.debug("Getting key version for key with name {}.", name);
+
+      KeyVersion keyVersion = user.doAs(
+              new PrivilegedExceptionAction<KeyVersion>() {
+              @Override
+                public KeyVersion run() throws Exception {
+                return provider.getCurrentKey(name);
+            }
+            }
+      );
 
-    Object json = KMSServerJSONUtils.toJSON(keyVersion);
-    kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
-    LOG.trace("Exiting getCurrentVersion method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      Object json = KMSServerJSONUtils.toJSON(keyVersion);
+      kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
+      LOG.trace("Exiting getCurrentVersion method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getCurrentVersion.", e);
+      throw e;
+    }
   }
 
   @GET
@@ -353,28 +402,34 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeyVersion(
       @PathParam("versionName") final String versionName) throws Exception {
-    LOG.trace("Entering getKeyVersion method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(versionName, "versionName");
-    KMSWebApp.getKeyCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
-    LOG.debug("Getting key with version name {}.", versionName);
-
-    KeyVersion keyVersion = user.doAs(
-        new PrivilegedExceptionAction<KeyVersion>() {
-          @Override
-          public KeyVersion run() throws Exception {
-            return provider.getKeyVersion(versionName);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getKeyVersion method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(versionName, "versionName");
+      KMSWebApp.getKeyCallsMeter().mark();
+      assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
+      LOG.debug("Getting key with version name {}.", versionName);
+
+      KeyVersion keyVersion = user.doAs(
+              new PrivilegedExceptionAction<KeyVersion>() {
+              @Override
+                public KeyVersion run() throws Exception {
+                return provider.getKeyVersion(versionName);
+              }
+            }
+      );
 
-    if (keyVersion != null) {
-      kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
+      if (keyVersion != null) {
+        kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
+      }
+      Object json = KMSServerJSONUtils.toJSON(keyVersion);
+      LOG.trace("Exiting getKeyVersion method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getKeyVersion.", e);
+      throw e;
     }
-    Object json = KMSServerJSONUtils.toJSON(keyVersion);
-    LOG.trace("Exiting getKeyVersion method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
   @SuppressWarnings({ "rawtypes", "unchecked" })
@@ -388,60 +443,65 @@ public class KMS {
           @DefaultValue("1")
           @QueryParam(KMSRESTConstants.EEK_NUM_KEYS) final int numKeys)
           throws Exception {
-    LOG.trace("Entering generateEncryptedKeys method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(name, "name");
-    KMSClientProvider.checkNotNull(edekOp, "eekOp");
-    LOG.debug("Generating encrypted key with name {}," +
-            " the edek Operation is {}.", name, edekOp);
-
-    Object retJSON;
-    if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
-      LOG.debug("edek Operation is Generate.");
-      assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
-
-      final List<EncryptedKeyVersion> retEdeks =
-          new LinkedList<EncryptedKeyVersion>();
-      try {
-
-        user.doAs(
-            new PrivilegedExceptionAction<Void>() {
-              @Override
-              public Void run() throws Exception {
-                  LOG.debug("Generated Encrypted key for {} number of keys.",
-                          numKeys);
-                for (int i = 0; i < numKeys; i++) {
-                  retEdeks.add(provider.generateEncryptedKey(name));
+    try {
+      LOG.trace("Entering generateEncryptedKeys method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(name, "name");
+      KMSClientProvider.checkNotNull(edekOp, "eekOp");
+      LOG.debug("Generating encrypted key with name {}," +
+              " the edek Operation is {}.", name, edekOp);
+
+      Object retJSON;
+      if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
+        LOG.debug("edek Operation is Generate.");
+        assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
+
+        final List<EncryptedKeyVersion> retEdeks =
+                new LinkedList<EncryptedKeyVersion>();
+        try {
+
+          user.doAs(
+                  new PrivilegedExceptionAction<Void>() {
+                  @Override
+                    public Void run() throws Exception {
+                    LOG.debug("Generated Encrypted key for {} number of " +
+                              "keys.", numKeys);
+                    for (int i = 0; i < numKeys; i++) {
+                      retEdeks.add(provider.generateEncryptedKey(name));
+                    }
+                    return null;
+                  }
                 }
-                return null;
-              }
-            }
-        );
+          );
 
-      } catch (Exception e) {
-        LOG.error("Exception in generateEncryptedKeys:", e);
-        throw new IOException(e);
-      }
-      kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
-      retJSON = new ArrayList();
-      for (EncryptedKeyVersion edek : retEdeks) {
-        ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
+        } catch (Exception e) {
+          LOG.error("Exception in generateEncryptedKeys:", e);
+          throw new IOException(e);
+        }
+        kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
+        retJSON = new ArrayList();
+        for (EncryptedKeyVersion edek : retEdeks) {
+          ((ArrayList) retJSON).add(KMSServerJSONUtils.toJSON(edek));
+        }
+      } else {
+        StringBuilder error;
+        error = new StringBuilder("IllegalArgumentException Wrong ");
+        error.append(KMSRESTConstants.EEK_OP);
+        error.append(" value, it must be ");
+        error.append(KMSRESTConstants.EEK_GENERATE);
+        error.append(" or ");
+        error.append(KMSRESTConstants.EEK_DECRYPT);
+        LOG.error(error.toString());
+        throw new IllegalArgumentException(error.toString());
       }
-    } else {
-      StringBuilder error;
-      error = new StringBuilder("IllegalArgumentException Wrong ");
-      error.append(KMSRESTConstants.EEK_OP);
-      error.append(" value, it must be ");
-      error.append(KMSRESTConstants.EEK_GENERATE);
-      error.append(" or ");
-      error.append(KMSRESTConstants.EEK_DECRYPT);
-      LOG.error(error.toString());
-      throw new IllegalArgumentException(error.toString());
+      KMSWebApp.getGenerateEEKCallsMeter().mark();
+      LOG.trace("Exiting generateEncryptedKeys method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in generateEncryptedKeys.", e);
+      throw e;
     }
-    KMSWebApp.getGenerateEEKCallsMeter().mark();
-    LOG.trace("Exiting generateEncryptedKeys method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
-        .build();
   }
 
   @SuppressWarnings("rawtypes")
@@ -454,57 +514,64 @@ public class KMS {
       @QueryParam(KMSRESTConstants.EEK_OP) String eekOp,
       Map jsonPayload)
       throws Exception {
-    LOG.trace("Entering decryptEncryptedKey method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(versionName, "versionName");
-    KMSClientProvider.checkNotNull(eekOp, "eekOp");
-    LOG.debug("Decrypting key for {}, the edek Operation is {}.",
-            versionName, eekOp);
-
-    final String keyName = (String) jsonPayload.get(
-        KMSRESTConstants.NAME_FIELD);
-    String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
-    String encMaterialStr =
-        (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
-    Object retJSON;
-    if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
-      assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName);
-      KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
-      final byte[] iv = Base64.decodeBase64(ivStr);
-      KMSClientProvider.checkNotNull(encMaterialStr,
-          KMSRESTConstants.MATERIAL_FIELD);
-      final byte[] encMaterial = Base64.decodeBase64(encMaterialStr);
-
-      KeyProvider.KeyVersion retKeyVersion = user.doAs(
-          new PrivilegedExceptionAction<KeyVersion>() {
-            @Override
-            public KeyVersion run() throws Exception {
-              return provider.decryptEncryptedKey(
-                  new KMSClientProvider.KMSEncryptedKeyVersion(keyName,
-                      versionName, iv, KeyProviderCryptoExtension.EEK,
-                      encMaterial)
-              );
-            }
-          }
-      );
+    try {
+      LOG.trace("Entering decryptEncryptedKey method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(versionName, "versionName");
+      KMSClientProvider.checkNotNull(eekOp, "eekOp");
+      LOG.debug("Decrypting key for {}, the edek Operation is {}.",
+              versionName, eekOp);
+
+      final String keyName = (String) jsonPayload.get(
+              KMSRESTConstants.NAME_FIELD);
+      String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
+      String encMaterialStr =
+              (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
+      Object retJSON;
+      if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
+        assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK,
+                keyName);
+        KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
+        final byte[] iv = Base64.decodeBase64(ivStr);
+        KMSClientProvider.checkNotNull(encMaterialStr,
+                KMSRESTConstants.MATERIAL_FIELD);
+        final byte[] encMaterial = Base64.decodeBase64(encMaterialStr);
+
+        KeyProvider.KeyVersion retKeyVersion = user.doAs(
+                new PrivilegedExceptionAction<KeyVersion>() {
+                @Override
+                  public KeyVersion run() throws Exception {
+                  return provider.decryptEncryptedKey(
+                            new KMSClientProvider.KMSEncryptedKeyVersion(
+                                    keyName, versionName, iv,
+                                            KeyProviderCryptoExtension.EEK,
+                                            encMaterial)
+                    );
+                }
+              }
+        );
 
-      retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
-      kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
-    } else {
-      StringBuilder error;
-      error = new StringBuilder("IllegalArgumentException Wrong ");
-      error.append(KMSRESTConstants.EEK_OP);
-      error.append(" value, it must be ");
-      error.append(KMSRESTConstants.EEK_GENERATE);
-      error.append(" or ");
-      error.append(KMSRESTConstants.EEK_DECRYPT);
-      LOG.error(error.toString());
-      throw new IllegalArgumentException(error.toString());
+        retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
+        kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
+      } else {
+        StringBuilder error;
+        error = new StringBuilder("IllegalArgumentException Wrong ");
+        error.append(KMSRESTConstants.EEK_OP);
+        error.append(" value, it must be ");
+        error.append(KMSRESTConstants.EEK_GENERATE);
+        error.append(" or ");
+        error.append(KMSRESTConstants.EEK_DECRYPT);
+        LOG.error(error.toString());
+        throw new IllegalArgumentException(error.toString());
+      }
+      KMSWebApp.getDecryptEEKCallsMeter().mark();
+      LOG.trace("Exiting decryptEncryptedKey method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in decryptEncryptedKey.", e);
+      throw e;
     }
-    KMSWebApp.getDecryptEEKCallsMeter().mark();
-    LOG.trace("Exiting decryptEncryptedKey method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
-        .build();
   }
 
   @GET
@@ -513,26 +580,32 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeyVersions(@PathParam("name") final String name)
       throws Exception {
-    LOG.trace("Entering getKeyVersions method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(name, "name");
-    KMSWebApp.getKeyCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
-    LOG.debug("Getting key versions for key {}", name);
-
-    List<KeyVersion> ret = user.doAs(
-        new PrivilegedExceptionAction<List<KeyVersion>>() {
-          @Override
-          public List<KeyVersion> run() throws Exception {
-            return provider.getKeyVersions(name);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getKeyVersions method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(name, "name");
+      KMSWebApp.getKeyCallsMeter().mark();
+      assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
+      LOG.debug("Getting key versions for key {}", name);
+
+      List<KeyVersion> ret = user.doAs(
+              new PrivilegedExceptionAction<List<KeyVersion>>() {
+              @Override
+                public List<KeyVersion> run() throws Exception {
+                return provider.getKeyVersions(name);
+              }
+            }
+      );
 
-    Object json = KMSServerJSONUtils.toJSON(ret);
-    kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
-    LOG.trace("Exiting getKeyVersions method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      Object json = KMSServerJSONUtils.toJSON(ret);
+      kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
+      LOG.trace("Exiting getKeyVersions method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getKeyVersions.", e);
+      throw e;
+    }
   }
 
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/52] [abbrv] hadoop git commit: HADOOP-13705. Revert HADOOP-13534 Remove unused TrashPolicy#getInstance and initialize code.

Posted by cn...@apache.org.
HADOOP-13705. Revert HADOOP-13534 Remove unused TrashPolicy#getInstance and initialize code.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a09bf7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a09bf7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a09bf7c

Branch: refs/heads/HADOOP-13037
Commit: 8a09bf7c19d9d2f6d6853d45e11b0d38c7c67f2a
Parents: 4b32b14
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Oct 11 13:46:07 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Oct 11 13:46:07 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/TrashPolicy.java  | 30 ++++++++++++++++++++
 .../apache/hadoop/fs/TrashPolicyDefault.java    | 15 ++++++++++
 .../java/org/apache/hadoop/fs/TestTrash.java    |  4 +++
 3 files changed, 49 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index bd99db4..157b9ab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -38,6 +38,17 @@ public abstract class TrashPolicy extends Configured {
 
   /**
    * Used to setup the trash policy. Must be implemented by all TrashPolicy
+   * implementations.
+   * @param conf the configuration to be used
+   * @param fs the filesystem to be used
+   * @param home the home directory
+   * @deprecated Use {@link #initialize(Configuration, FileSystem)} instead.
+   */
+  @Deprecated
+  public abstract void initialize(Configuration conf, FileSystem fs, Path home);
+
+  /**
+   * Used to setup the trash policy. Must be implemented by all TrashPolicy
    * implementations. Different from initialize(conf, fs, home), this one does
    * not assume trash always under /user/$USER due to HDFS encryption zone.
    * @param conf the configuration to be used
@@ -105,6 +116,25 @@ public abstract class TrashPolicy extends Configured {
    *
    * @param conf the configuration to be used
    * @param fs the file system to be used
+   * @param home the home directory
+   * @return an instance of TrashPolicy
+   * @deprecated Use {@link #getInstance(Configuration, FileSystem)} instead.
+   */
+  @Deprecated
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home) {
+    Class<? extends TrashPolicy> trashClass = conf.getClass(
+        "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
+    TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
+    trash.initialize(conf, fs, home); // initialize TrashPolicy
+    return trash;
+  }
+
+  /**
+   * Get an instance of the configured TrashPolicy based on the value
+   * of the configuration parameter fs.trash.classname.
+   *
+   * @param conf the configuration to be used
+   * @param fs the file system to be used
    * @return an instance of TrashPolicy
    */
   public static TrashPolicy getInstance(Configuration conf, FileSystem fs)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index f4a825c..72222be 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -75,6 +75,21 @@ public class TrashPolicyDefault extends TrashPolicy {
     initialize(conf, fs);
   }
 
+  /**
+   * @deprecated Use {@link #initialize(Configuration, FileSystem)} instead.
+   */
+  @Override
+  @Deprecated
+  public void initialize(Configuration conf, FileSystem fs, Path home) {
+    this.fs = fs;
+    this.deletionInterval = (long)(conf.getFloat(
+        FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
+        * MSECS_PER_MINUTE);
+    this.emptierInterval = (long)(conf.getFloat(
+        FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
+        * MSECS_PER_MINUTE);
+   }
+
   @Override
   public void initialize(Configuration conf, FileSystem fs) {
     this.fs = fs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 2aba01f..338aff6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -692,6 +692,10 @@ public class TestTrash extends TestCase {
     public TestTrashPolicy() { }
 
     @Override
+    public void initialize(Configuration conf, FileSystem fs, Path home) {
+    }
+
+    @Override
     public void initialize(Configuration conf, FileSystem fs) {
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/52] [abbrv] hadoop git commit: HDFS-10933. Refactor TestFsck. Contributed by Takanobu Asanuma.

Posted by cn...@apache.org.
HDFS-10933. Refactor TestFsck. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3059b251
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3059b251
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3059b251

Branch: refs/heads/HADOOP-13037
Commit: 3059b251d8f37456c5761ecaf73fe6c0c5a59067
Parents: be3cb10
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Oct 7 10:17:50 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Oct 7 10:17:50 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 2482 ++++++++----------
 1 file changed, 1152 insertions(+), 1330 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/52] [abbrv] hadoop git commit: HDFS-10916. Switch from "raw" to "system" xattr namespace for erasure coding policy. (Andrew Wang via lei)

Posted by cn...@apache.org.
HDFS-10916. Switch from "raw" to "system" xattr namespace for erasure coding policy. (Andrew Wang via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/809cfd27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/809cfd27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/809cfd27

Branch: refs/heads/HADOOP-13037
Commit: 809cfd27a30900d2c0e0e133574de49d0b4538cf
Parents: ecb51b8
Author: Lei Xu <le...@apache.org>
Authored: Tue Oct 11 10:04:46 2016 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Tue Oct 11 10:04:46 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/809cfd27/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 3798394..d112a48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -369,7 +369,7 @@ public interface HdfsServerConstants {
   String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
       "security.hdfs.unreadable.by.superuser";
   String XATTR_ERASURECODING_POLICY =
-      "raw.hdfs.erasurecoding.policy";
+      "system.hdfs.erasurecoding.policy";
 
   long BLOCK_GROUP_INDEX_MASK = 15;
   byte MAX_BLOCKS_IN_GROUP = 16;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/52] [abbrv] hadoop git commit: MAPREDUCE-6780. Add support for HDFS directory with erasure code policy to TeraGen and TeraSort. Contributed by Sammi Chen

Posted by cn...@apache.org.
MAPREDUCE-6780. Add support for HDFS directory with erasure code policy to TeraGen and TeraSort. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bea004ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bea004ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bea004ea

Branch: refs/heads/HADOOP-13037
Commit: bea004eaeb7ba33bf324ef3e7065cfdd614d8198
Parents: ec0b707
Author: Kai Zheng <ka...@intel.com>
Authored: Sun Oct 9 15:33:26 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Sun Oct 9 15:33:26 2016 +0600

----------------------------------------------------------------------
 .../hadoop/examples/terasort/TeraGen.java       |  3 +++
 .../examples/terasort/TeraOutputFormat.java     | 20 +++++++++++++++++---
 .../hadoop/examples/terasort/TeraSort.java      |  3 +++
 3 files changed, 23 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea004ea/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index 22fe344..7fbb22a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -246,6 +246,9 @@ public class TeraGen extends Configured implements Tool {
 
   private static void usage() throws IOException {
     System.err.println("teragen <num rows> <output dir>");
+    System.err.println("If you want to generate data and store them as " +
+        "erasure code striping file, just make sure that the parent dir " +
+        "of <output dir> has erasure code policy set");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea004ea/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
index fd3ea78..73c446d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.examples.terasort;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -40,6 +42,7 @@ import org.apache.hadoop.mapreduce.security.TokenCache;
  * An output format that writes the key and value appended together.
  */
 public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
+  private static final Log LOG = LogFactory.getLog(TeraOutputFormat.class);
   private OutputCommitter committer = null;
 
   /**
@@ -74,10 +77,22 @@ public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
       out.write(key.getBytes(), 0, key.getLength());
       out.write(value.getBytes(), 0, value.getLength());
     }
-    
+
     public void close(TaskAttemptContext context) throws IOException {
       if (finalSync) {
-        out.hsync();
+        try {
+          out.hsync();
+        } catch (UnsupportedOperationException e) {
+          /*
+           * Currently, hsync operation on striping file with erasure code
+           * policy is not supported yet. So this is a workaround to make
+           * teragen and terasort to support directory with striping files. In
+           * future, if the hsync operation is supported on striping file, this
+           * workaround should be removed.
+           */
+          LOG.info("Operation hsync is not supported so far on path with " +
+                  "erasure code policy set");
+        }
       }
       out.close();
     }
@@ -135,5 +150,4 @@ public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
     }
     return committer;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea004ea/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
index 9beff3e..040d13f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
@@ -287,6 +287,9 @@ public class TeraSort extends Configured implements Tool {
     for (TeraSortConfigKeys teraSortConfigKeys : TeraSortConfigKeys.values()) {
       System.err.println(teraSortConfigKeys.toString());
     }
+    System.err.println("If you want to store the output data as " +
+        "erasure code striping file, just make sure that the parent dir " +
+        "of <out> has erasure code policy set");
   }
 
   public int run(String[] args) throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/52] [abbrv] hadoop git commit: HADOOP-13150. Avoid use of toString() in output of HDFS ACL shell commands. Contributed by Chris Nauroth.

Posted by cn...@apache.org.
HADOOP-13150. Avoid use of toString() in output of HDFS ACL shell commands. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d330fba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d330fba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d330fba

Branch: refs/heads/HADOOP-13037
Commit: 1d330fbaf6b50802750aa461640773fb788ef884
Parents: f32e9fc
Author: Chris Nauroth <cn...@apache.org>
Authored: Thu Oct 6 12:45:11 2016 -0700
Committer: Chris Nauroth <cn...@apache.org>
Committed: Thu Oct 6 13:19:16 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/fs/permission/AclEntry.java   | 24 ++++++++++++++++++--
 .../hadoop/fs/permission/AclEntryScope.java     |  2 +-
 .../hadoop/fs/permission/AclEntryType.java      | 23 ++++++++++++++++++-
 .../apache/hadoop/fs/permission/AclStatus.java  |  2 +-
 .../org/apache/hadoop/fs/shell/AclCommands.java |  6 ++---
 .../hdfs/web/resources/AclPermissionParam.java  | 23 ++++++++++++++++---
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    |  2 +-
 7 files changed, 70 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d330fba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
index 45402f8..b42c365 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.util.StringUtils;
  * to create a new instance.
  */
 @InterfaceAudience.Public
-@InterfaceStability.Evolving
+@InterfaceStability.Stable
 public class AclEntry {
   private final AclEntryType type;
   private final String name;
@@ -100,13 +100,29 @@ public class AclEntry {
   }
 
   @Override
+  @InterfaceStability.Unstable
   public String toString() {
+    // This currently just delegates to the stable string representation, but it
+    // is permissible for the output of this method to change across versions.
+    return toStringStable();
+  }
+
+  /**
+   * Returns a string representation guaranteed to be stable across versions to
+   * satisfy backward compatibility requirements, such as for shell command
+   * output or serialization.  The format of this string representation matches
+   * what is expected by the {@link #parseAclSpec(String, boolean)} and
+   * {@link #parseAclEntry(String, boolean)} methods.
+   *
+   * @return stable, backward compatible string representation
+   */
+  public String toStringStable() {
     StringBuilder sb = new StringBuilder();
     if (scope == AclEntryScope.DEFAULT) {
       sb.append("default:");
     }
     if (type != null) {
-      sb.append(StringUtils.toLowerCase(type.toString()));
+      sb.append(StringUtils.toLowerCase(type.toStringStable()));
     }
     sb.append(':');
     if (name != null) {
@@ -203,6 +219,8 @@ public class AclEntry {
   /**
    * Parses a string representation of an ACL spec into a list of AclEntry
    * objects. Example: "user::rwx,user:foo:rw-,group::r--,other::---"
+   * The expected format of ACL entries in the string parameter is the same
+   * format produced by the {@link #toStringStable()} method.
    * 
    * @param aclSpec
    *          String representation of an ACL spec.
@@ -228,6 +246,8 @@ public class AclEntry {
 
   /**
    * Parses a string representation of an ACL into a AclEntry object.<br>
+   * The expected format of ACL entries in the string parameter is the same
+   * format produced by the {@link #toStringStable()} method.
    * 
    * @param aclStr
    *          String representation of an ACL.<br>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d330fba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java
index 6d941e7..64c70aa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  * Specifies the scope or intended usage of an ACL entry.
  */
 @InterfaceAudience.Public
-@InterfaceStability.Evolving
+@InterfaceStability.Stable
 public enum AclEntryScope {
   /**
    * An ACL entry that is inspected during permission checks to enforce

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d330fba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java
index ffd62d7..002ead2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  * Specifies the type of an ACL entry.
  */
 @InterfaceAudience.Public
-@InterfaceStability.Evolving
+@InterfaceStability.Stable
 public enum AclEntryType {
   /**
    * An ACL entry applied to a specific user.  These ACL entries can be unnamed,
@@ -55,4 +55,25 @@ public enum AclEntryType {
    * of the more specific ACL entry types.
    */
   OTHER;
+
+  @Override
+  @InterfaceStability.Unstable
+  public String toString() {
+    // This currently just delegates to the stable string representation, but it
+    // is permissible for the output of this method to change across versions.
+    return toStringStable();
+  }
+
+  /**
+   * Returns a string representation guaranteed to be stable across versions to
+   * satisfy backward compatibility requirements, such as for shell command
+   * output or serialization.
+   *
+   * @return stable, backward compatible string representation
+   */
+  public String toStringStable() {
+    // The base implementation uses the enum value names, which are public API
+    // and therefore stable.
+    return super.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d330fba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
index 9d7500a..131aa19 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
@@ -31,7 +31,7 @@ import com.google.common.collect.Lists;
  * instances are immutable. Use a {@link Builder} to create a new instance.
  */
 @InterfaceAudience.Public
-@InterfaceStability.Evolving
+@InterfaceStability.Stable
 public class AclStatus {
   private final String owner;
   private final String group;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d330fba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
index 9a54040..a5e386c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
@@ -117,7 +117,7 @@ class AclCommands extends FsCommand {
       }
       if (AclUtil.isMinimalAcl(entries)) {
         for (AclEntry entry: entries) {
-          out.println(entry);
+          out.println(entry.toStringStable());
         }
       } else {
         for (AclEntry entry: entries) {
@@ -145,10 +145,10 @@ class AclCommands extends FsCommand {
           out.println(String.format("%s\t#effective:%s", entry,
             effectivePerm.SYMBOL));
         } else {
-          out.println(entry);
+          out.println(entry.toStringStable());
         }
       } else {
-        out.println(entry);
+        out.println(entry.toStringStable());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d330fba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
index 48f202c..130c8fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
@@ -20,11 +20,11 @@ package org.apache.hadoop.hdfs.web.resources;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys
     .DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
 
+import java.util.Iterator;
 import java.util.List;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.commons.lang.StringUtils;
 
 /** AclPermission parameter. */
 public class AclPermissionParam extends StringParam {
@@ -63,7 +63,24 @@ public class AclPermissionParam extends StringParam {
   /**
    * @return parse {@code aclEntry} and return aclspec
    */
-  private static String parseAclSpec(List<AclEntry> aclEntry) {
-    return StringUtils.join(aclEntry, ",");
+  private static String parseAclSpec(List<AclEntry> aclEntries) {
+    if (aclEntries == null) {
+      return null;
+    }
+    if (aclEntries.isEmpty()) {
+      return "";
+    }
+    if (aclEntries.size() == 1) {
+      AclEntry entry = aclEntries.get(0);
+      return entry == null ? "" : entry.toStringStable();
+    }
+    StringBuilder sb = new StringBuilder();
+    Iterator<AclEntry> iter = aclEntries.iterator();
+    sb.append(iter.next().toStringStable());
+    while (iter.hasNext()) {
+      AclEntry entry = iter.next();
+      sb.append(',').append(entry == null ? "" : entry.toStringStable());
+    }
+    return sb.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d330fba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 6b6cca6..0542f3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -360,7 +360,7 @@ public class JsonUtil {
 
     final List<String> stringEntries = new ArrayList<>();
     for (AclEntry entry : status.getEntries()) {
-      stringEntries.add(entry.toString());
+      stringEntries.add(entry.toStringStable());
     }
     m.put("entries", stringEntries);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/52] [abbrv] hadoop git commit: HDFS-10797. Disk usage summary of snapshots causes renamed blocks to get counted twice. Contributed by Sean Mackrory.

Posted by cn...@apache.org.
HDFS-10797. Disk usage summary of snapshots causes renamed blocks to get counted twice. Contributed by Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a38d118
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a38d118
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a38d118

Branch: refs/heads/HADOOP-13037
Commit: 6a38d118d86b7907009bcec34f1b788d076f1d1c
Parents: e57fa81
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Oct 7 17:30:30 2016 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Oct 7 17:37:15 2016 -0700

----------------------------------------------------------------------
 .../ContentSummaryComputationContext.java       |  94 ++++++++-
 .../hadoop/hdfs/server/namenode/INode.java      |   1 +
 .../hdfs/server/namenode/INodeDirectory.java    |  11 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../hdfs/server/namenode/INodeReference.java    |   2 +
 .../hdfs/server/namenode/INodeSymlink.java      |   1 +
 .../snapshot/DirectorySnapshottableFeature.java |   9 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |  14 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   1 +
 .../snapshot/TestRenameWithSnapshots.java       | 199 +++++++++++++++++++
 10 files changed, 307 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 6df9e75..4208b53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -21,6 +21,10 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+
+import java.util.HashSet;
+import java.util.Set;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -35,6 +39,8 @@ public class ContentSummaryComputationContext {
   private long yieldCount = 0;
   private long sleepMilliSec = 0;
   private int sleepNanoSec = 0;
+  private Set<INode> includedNodes = new HashSet<>();
+  private Set<INode> deletedSnapshottedNodes = new HashSet<>();
 
   /**
    * Constructor
@@ -51,8 +57,8 @@ public class ContentSummaryComputationContext {
     this.fsn = fsn;
     this.limitPerRun = limitPerRun;
     this.nextCountLimit = limitPerRun;
-    this.counts = new ContentCounts.Builder().build();
-    this.snapshotCounts = new ContentCounts.Builder().build();
+    setCounts(new ContentCounts.Builder().build());
+    setSnapshotCounts(new ContentCounts.Builder().build());
     this.sleepMilliSec = sleepMicroSec/1000;
     this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
   }
@@ -82,6 +88,7 @@ public class ContentSummaryComputationContext {
     }
 
     // Have we reached the limit?
+    ContentCounts counts = getCounts();
     long currentCount = counts.getFileCount() +
         counts.getSymlinkCount() +
         counts.getDirectoryCount() +
@@ -123,14 +130,22 @@ public class ContentSummaryComputationContext {
   }
 
   /** Get the content counts */
-  public ContentCounts getCounts() {
+  public synchronized ContentCounts getCounts() {
     return counts;
   }
 
+  private synchronized void setCounts(ContentCounts counts) {
+    this.counts = counts;
+  }
+
   public ContentCounts getSnapshotCounts() {
     return snapshotCounts;
   }
 
+  private void setSnapshotCounts(ContentCounts snapshotCounts) {
+    this.snapshotCounts = snapshotCounts;
+  }
+
   public BlockStoragePolicySuite getBlockStoragePolicySuite() {
     Preconditions.checkState((bsps != null || fsn != null),
         "BlockStoragePolicySuite must be either initialized or available via" +
@@ -138,4 +153,77 @@ public class ContentSummaryComputationContext {
     return (bsps != null) ? bsps:
         fsn.getBlockManager().getStoragePolicySuite();
   }
+
+  /**
+   * If the node is an INodeReference, resolves it to the actual inode.
+   * Snapshot diffs represent renamed / moved files as different
+   * INodeReferences, but the underlying INode it refers to is consistent.
+   *
+   * @param node
+   * @return The referred INode if there is one, else returns the input
+   * unmodified.
+   */
+  private INode resolveINodeReference(INode node) {
+    if (node.isReference() && node instanceof INodeReference) {
+      return ((INodeReference)node).getReferredINode();
+    }
+    return node;
+  }
+
+  /**
+   * Reports that a node is about to be included in this summary. Can be used
+   * either to simply report that a node has been including, or check whether
+   * a node has already been included.
+   *
+   * @param node
+   * @return true if node has already been included
+   */
+  public boolean nodeIncluded(INode node) {
+    INode resolvedNode = resolveINodeReference(node);
+    synchronized (includedNodes) {
+      if (!includedNodes.contains(resolvedNode)) {
+        includedNodes.add(resolvedNode);
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   * Schedules a node that is listed as DELETED in a snapshot's diff to be
+   * included in the summary at the end of computation. See
+   * {@link #tallyDeletedSnapshottedINodes()} for more context.
+   *
+   * @param node
+   */
+  public void reportDeletedSnapshottedNode(INode node) {
+    deletedSnapshottedNodes.add(node);
+  }
+
+  /**
+   * Finalizes the computation by including all nodes that were reported as
+   * deleted by a snapshot but have not been already including due to other
+   * references.
+   * <p>
+   * Nodes that get renamed are listed in the snapshot's diff as both DELETED
+   * under the old name and CREATED under the new name. The computation
+   * relies on nodes to report themselves as being included (via
+   * {@link #nodeIncluded(INode)} as the only reliable way to determine which
+   * nodes were renamed within the tree being summarized and which were
+   * removed (either by deletion or being renamed outside of the tree).
+   */
+  public synchronized void tallyDeletedSnapshottedINodes() {
+    /* Temporarily create a new counts object so these results can then be
+    added to both counts and snapshotCounts */
+    ContentCounts originalCounts = getCounts();
+    setCounts(new ContentCounts.Builder().build());
+    for (INode node : deletedSnapshottedNodes) {
+      if (!nodeIncluded(node)) {
+        node.computeContentSummary(Snapshot.CURRENT_STATE_ID, this);
+      }
+    }
+    originalCounts.addContents(getCounts());
+    snapshotCounts.addContents(getCounts());
+    setCounts(originalCounts);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index c6258a1..e1db990 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -429,6 +429,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
   public final ContentSummary computeAndConvertContentSummary(int snapshotId,
       ContentSummaryComputationContext summary) {
     computeContentSummary(snapshotId, summary);
+    summary.tallyDeletedSnapshottedINodes();
     final ContentCounts counts = summary.getCounts();
     final ContentCounts snapshotCounts = summary.getSnapshotCounts();
     final QuotaCounts q = getQuotaCounts();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 24c8815..b6e2713 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -628,17 +628,10 @@ public class INodeDirectory extends INodeWithAdditionalFields
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
       ContentSummaryComputationContext summary) {
+    summary.nodeIncluded(this);
     final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
     if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
-      final ContentCounts counts = new ContentCounts.Builder().build();
-      // if the getContentSummary call is against a non-snapshot path, the
-      // computation should include all the deleted files/directories
-      sf.computeContentSummary4Snapshot(summary.getBlockStoragePolicySuite(),
-          counts);
-      summary.getCounts().addContents(counts);
-      // Also add ContentSummary to snapshotCounts (So we can extract it
-      // later from the ContentSummary of all).
-      summary.getSnapshotCounts().addContents(counts);
+      sf.computeContentSummary4Snapshot(summary);
     }
     final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
     if (q != null && snapshotId == Snapshot.CURRENT_STATE_ID) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 12ead7f..37f97db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -770,6 +770,7 @@ public class INodeFile extends INodeWithAdditionalFields
   @Override
   public final ContentSummaryComputationContext computeContentSummary(
       int snapshotId, final ContentSummaryComputationContext summary) {
+    summary.nodeIncluded(this);
     final ContentCounts counts = summary.getCounts();
     counts.addContent(Content.FILE, 1);
     final long fileLen = computeFileSize(snapshotId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 1b85237..56aaf8c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -315,6 +315,7 @@ public abstract class INodeReference extends INode {
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
       ContentSummaryComputationContext summary) {
+    summary.nodeIncluded(this);
     return referred.computeContentSummary(snapshotId, summary);
   }
 
@@ -504,6 +505,7 @@ public abstract class INodeReference extends INode {
     @Override
     public final ContentSummaryComputationContext computeContentSummary(
         int snapshotId, ContentSummaryComputationContext summary) {
+      summary.nodeIncluded(this);
       final int s = snapshotId < lastSnapshotId ? snapshotId : lastSnapshotId;
       // only count storagespace for WithName
       final QuotaCounts q = computeQuotaUsage(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
index c76bea0..1223f4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
@@ -96,6 +96,7 @@ public class INodeSymlink extends INodeWithAdditionalFields {
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
       final ContentSummaryComputationContext summary) {
+    summary.nodeIncluded(this);
     summary.getCounts().addContent(Content.SYMLINK, 1);
     return summary;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 39db979..fa7bace 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -29,9 +29,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.Content;
 import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
+import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode;
@@ -220,11 +220,12 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
   }
 
   @Override
-  public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final ContentCounts counts) {
+  public void computeContentSummary4Snapshot(ContentSummaryComputationContext
+                                                   context) {
+    ContentCounts counts = context.getCounts();
     counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
     counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
-    super.computeContentSummary4Snapshot(bsps, counts);
+    super.computeContentSummary4Snapshot(context);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 0111b3b..9addbfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.AclStorage;
-import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
 import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
@@ -629,18 +628,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
     return counts;
   }
 
-  public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final ContentCounts counts) {
-    // Create a new blank summary context for blocking processing of subtree.
-    ContentSummaryComputationContext summary = 
-        new ContentSummaryComputationContext(bsps);
+  public void computeContentSummary4Snapshot(
+      ContentSummaryComputationContext context) {
     for(DirectoryDiff d : diffs) {
-      for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
-        deleted.computeContentSummary(Snapshot.CURRENT_STATE_ID, summary);
+      for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) {
+        context.reportDeletedSnapshottedNode(deletedNode);
       }
     }
-    // Add the counts from deleted trees.
-    counts.addContents(summary.getCounts());
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
index e98e766..832a339 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
@@ -177,6 +177,7 @@ public class Snapshot implements Comparable<byte[]> {
     @Override
     public ContentSummaryComputationContext computeContentSummary(
         int snapshotId, ContentSummaryComputationContext summary) {
+      summary.nodeIncluded(this);
       return computeDirectoryContentSummary(summary, snapshotId);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index 91eec78..d1b3aa6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -36,8 +36,10 @@ import java.util.Random;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -2411,4 +2413,201 @@ public class TestRenameWithSnapshots {
     assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", "newDir/file2"));
     assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", "newDir/file1"));
   }
+
+  private void checkSpaceConsumed(String message, Path directory,
+                                  long expectedSpace) throws Exception {
+    ContentSummary summary = hdfs.getContentSummary(directory);
+    assertEquals(message, expectedSpace, summary.getSpaceConsumed());
+  }
+
+  /**
+   * Runs through various combinations of renames, deletes, appends and other
+   * operations in a snapshotted directory and ensures disk usage summaries
+   * (e.g. du -s) are computed correctly.
+   *
+   * @throws Exception
+   */
+  @Test (timeout=300000)
+  public void testDu() throws Exception {
+    File tempFile = File.createTempFile("testDu-", ".tmp");
+    tempFile.deleteOnExit();
+
+    final FileSystem localfs = FileSystem.getLocal(conf);
+    final Path localOriginal = new Path(tempFile.getPath());
+    final Path dfsRoot = new Path("/testDu");
+    final Path dfsOriginal = new Path(dfsRoot, "original");
+    final Path dfsRenamed1 = new Path(dfsRoot, "renamed1");
+    final Path dfsRenamed2 = new Path(dfsRoot, "renamed2");
+    final Path dfsAppended = new Path(dfsRoot, "appended");
+
+    /* We will test with a single block worth of data. If we don't at least use
+    a multiple of BLOCKSIZE, append operations will modify snapshotted blocks
+    and other factors will come into play here that we'll have to account for */
+    final long spaceIncrement = BLOCKSIZE * REPL;
+    final byte[] appendData = new byte[(int) BLOCKSIZE];
+    DFSTestUtil.createFile(localfs, localOriginal, BLOCKSIZE, REPL, SEED);
+
+    FSDataOutputStream out = null;
+    long expectedSpace = 0;
+
+    hdfs.mkdirs(dfsRoot);
+    checkSpaceConsumed("Du is wrong immediately",
+        dfsRoot, 0L);
+
+    hdfs.copyFromLocalFile(localOriginal, dfsOriginal);
+    expectedSpace += spaceIncrement;
+    checkSpaceConsumed("Du is wrong after creating / copying file",
+        dfsRoot, expectedSpace);
+
+    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s0");
+    checkSpaceConsumed("Du is wrong after snapshotting",
+        dfsRoot, expectedSpace);
+
+    hdfs.rename(dfsOriginal, dfsRenamed1);
+    checkSpaceConsumed("Du is wrong after 1 rename",
+        dfsRoot, expectedSpace);
+
+    hdfs.rename(dfsRenamed1, dfsRenamed2);
+    checkSpaceConsumed("Du is wrong after 2 renames",
+        dfsRoot, expectedSpace);
+
+    hdfs.delete(dfsRenamed2, false);
+    checkSpaceConsumed("Du is wrong after deletion",
+        dfsRoot, expectedSpace);
+
+    hdfs.copyFromLocalFile(localOriginal, dfsOriginal);
+    expectedSpace += spaceIncrement;
+    checkSpaceConsumed("Du is wrong after replacing a renamed file",
+        dfsRoot, expectedSpace);
+
+    hdfs.copyFromLocalFile(localOriginal, dfsAppended);
+    expectedSpace += spaceIncrement;
+    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s1");
+
+    out = hdfs.append(dfsAppended);
+    out.write(appendData);
+    out.close();
+    expectedSpace += spaceIncrement;
+    checkSpaceConsumed("Du is wrong after 1 snapshot + append",
+        dfsRoot, expectedSpace);
+
+    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s2");
+    out = hdfs.append(dfsAppended);
+    out.write(appendData);
+    out.close();
+    expectedSpace += spaceIncrement;
+    checkSpaceConsumed("Du is wrong after 2 snapshot + appends",
+        dfsRoot, expectedSpace);
+
+    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s3");
+    out = hdfs.append(dfsAppended);
+    out.write(appendData);
+    out.close();
+    expectedSpace += spaceIncrement;
+    hdfs.rename(dfsAppended, dfsRenamed1);
+    checkSpaceConsumed("Du is wrong after snapshot, append, & rename",
+        dfsRoot, expectedSpace);
+    hdfs.delete(dfsRenamed1, false);
+    // everything but the last append is snapshotted
+    expectedSpace -= spaceIncrement;
+    checkSpaceConsumed("Du is wrong after snapshot, append, delete & rename",
+        dfsRoot, expectedSpace);
+
+    hdfs.delete(dfsOriginal, false);
+    hdfs.deleteSnapshot(dfsRoot, "s0");
+    hdfs.deleteSnapshot(dfsRoot, "s1");
+    hdfs.deleteSnapshot(dfsRoot, "s2");
+    hdfs.deleteSnapshot(dfsRoot, "s3");
+    expectedSpace = 0;
+    checkSpaceConsumed("Du is wrong after deleting all files and snapshots",
+        dfsRoot, expectedSpace);
+  }
+
+  /**
+   * Runs through various combinations of renames, deletes, appends and other
+   * operations between two snapshotted directories and ensures disk usage
+   * summaries (e.g. du -s) are computed correctly.
+   *
+   * This test currently assumes some incorrect behavior when files have been
+   * moved between subdirectories of the one being queried. In the cases
+   * below, only 1 block worth of data should ever actually be used. However
+   * if there are 2 - 3 subdirectories that do contained or have contained
+   * when snapshotted the same file, that file will be counted 2-3 times,
+   * respectively, since each directory is computed independently recursively.
+   *
+   * @throws Exception
+   */
+  @Test (timeout=300000)
+  public void testDuMultipleDirs() throws Exception {
+    File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp");
+    tempFile.deleteOnExit();
+
+    final FileSystem localfs = FileSystem.getLocal(conf);
+    final Path localOriginal = new Path(tempFile.getPath());
+    final Path dfsRoot = new Path("/testDuMultipleDirs");
+    final Path snapshottable1 = new Path(dfsRoot, "snapshottable1");
+    final Path snapshottable2 = new Path(dfsRoot, "snapshottable2");
+    final Path nonsnapshottable = new Path(dfsRoot, "nonsnapshottable");
+    final Path subdirectory = new Path(snapshottable1, "subdirectory");
+    final Path dfsOriginal = new Path(snapshottable1, "file");
+    final Path renamedNonsnapshottable = new Path(nonsnapshottable, "file");
+    final Path renamedSnapshottable = new Path(snapshottable2, "file");
+    final Path renamedSubdirectory = new Path(subdirectory, "file");
+
+    /* We will test with a single block worth of data. If we don't at least use
+    a multiple of BLOCKSIZE, append operations will modify snapshotted blocks
+    and other factors will come into play here that we'll have to account for */
+    final long spaceConsumed = BLOCKSIZE * REPL;
+    DFSTestUtil.createFile(localfs, localOriginal, BLOCKSIZE, REPL, SEED);
+
+    hdfs.mkdirs(snapshottable1);
+    hdfs.mkdirs(snapshottable2);
+    hdfs.mkdirs(nonsnapshottable);
+    hdfs.mkdirs(subdirectory);
+    checkSpaceConsumed("Du is wrong immediately",
+        dfsRoot, 0L);
+
+    hdfs.copyFromLocalFile(localOriginal, dfsOriginal);
+    checkSpaceConsumed("Du is wrong after creating / copying file",
+        snapshottable1, spaceConsumed);
+
+    SnapshotTestHelper.createSnapshot(hdfs, snapshottable1, "s1");
+    checkSpaceConsumed("Du is wrong in original dir after 1st snapshot",
+        snapshottable1, spaceConsumed);
+
+    hdfs.rename(dfsOriginal, renamedNonsnapshottable);
+    checkSpaceConsumed("Du is wrong in original dir after 1st rename",
+        snapshottable1, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in non-snapshottable dir after 1st rename",
+        nonsnapshottable, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in root dir after 1st rename",
+        dfsRoot, spaceConsumed);
+
+    hdfs.rename(renamedNonsnapshottable, renamedSnapshottable);
+    checkSpaceConsumed("Du is wrong in original dir after 2nd rename",
+        snapshottable1, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in non-snapshottable dir after 2nd rename",
+        nonsnapshottable, 0);
+    checkSpaceConsumed("Du is wrong in snapshottable dir after 2nd rename",
+        snapshottable2, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in root dir after 2nd rename",
+        dfsRoot, spaceConsumed);
+
+    SnapshotTestHelper.createSnapshot(hdfs, snapshottable2, "s2");
+    hdfs.rename(renamedSnapshottable, renamedSubdirectory);
+    checkSpaceConsumed("Du is wrong in original dir after 3rd rename",
+        snapshottable1, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in snapshottable dir after 3rd rename",
+        snapshottable2, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in original subdirectory after 3rd rename",
+        subdirectory, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in root dir after 3rd rename",
+        dfsRoot, spaceConsumed);
+
+    hdfs.delete(renamedSubdirectory, false);
+    hdfs.deleteSnapshot(snapshottable1, "s1");
+    hdfs.deleteSnapshot(snapshottable2, "s2");
+    checkSpaceConsumed("Du is wrong after deleting all files and snapshots",
+        dfsRoot, 0);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/52] [abbrv] hadoop git commit: HADOOP-13698. Document caveat for KeyShell when underlying KeyProvider does not delete a key.

Posted by cn...@apache.org.
HADOOP-13698. Document caveat for KeyShell when underlying KeyProvider does not delete a key.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b84c4891
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b84c4891
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b84c4891

Branch: refs/heads/HADOOP-13037
Commit: b84c4891f9eca8d56593e48e9df88be42e24220d
Parents: 3c9a010
Author: Xiao Chen <xi...@apache.org>
Authored: Tue Oct 11 17:05:00 2016 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Tue Oct 11 17:05:00 2016 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/CommandsManual.md            | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b84c4891/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 4d7d504..2ece71a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -202,7 +202,9 @@ Manage keys via the KeyProvider. For details on KeyProviders, see the [Transpare
 
 Providers frequently require that a password or other secret is supplied. If the provider requires a password and is unable to find one, it will use a default password and emit a warning message that the default password is being used. If the `-strict` flag is supplied, the warning message becomes an error message and the command returns immediately with an error status.
 
-NOTE: Some KeyProviders (e.g. org.apache.hadoop.crypto.key.JavaKeyStoreProvider) does not support uppercase key names.
+NOTE: Some KeyProviders (e.g. org.apache.hadoop.crypto.key.JavaKeyStoreProvider) do not support uppercase key names.
+
+NOTE: Some KeyProviders do not directly execute a key deletion (e.g. performs a soft-delete instead, or delay the actual deletion, to prevent mistake). In these cases, one may encounter errors when creating/deleting a key with the same name after deleting it. Please check the underlying KeyProvider for details.
 
 ### `trace`
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/52] [abbrv] hadoop git commit: YARN-5659. getPathFromYarnURL should use standard methods. Contributed by Sergey Shelukhin.

Posted by cn...@apache.org.
YARN-5659. getPathFromYarnURL should use standard methods. Contributed by Sergey Shelukhin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/459a4833
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/459a4833
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/459a4833

Branch: refs/heads/HADOOP-13037
Commit: 459a4833a90437a52787a41c2759a4b18cfe411c
Parents: ebd4f39
Author: Junping Du <ju...@apache.org>
Authored: Fri Oct 7 07:46:08 2016 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Fri Oct 7 07:46:08 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/api/records/URL.java | 58 ++++++------
 .../apache/hadoop/yarn/api/records/TestURL.java | 99 ++++++++++++++++++++
 2 files changed, 130 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/459a4833/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java
index aa28585..19bfc32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java
@@ -18,11 +18,15 @@
 
 package org.apache.hadoop.yarn.api.records;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import java.net.URI;
 import java.net.URISyntaxException;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.util.Records;
@@ -52,7 +56,7 @@ public abstract class URL {
   @Public
   @Stable
   public abstract String getScheme();
-  
+
   /**
    * Set the scheme of the URL
    * @param scheme scheme of the URL
@@ -68,7 +72,7 @@ public abstract class URL {
   @Public
   @Stable
   public abstract String getUserInfo();
-  
+
   /**
    * Set the user info of the URL.
    * @param userInfo user info of the URL
@@ -84,7 +88,7 @@ public abstract class URL {
   @Public
   @Stable
   public abstract String getHost();
-  
+
   /**
    * Set the host of the URL.
    * @param host host of the URL
@@ -100,7 +104,7 @@ public abstract class URL {
   @Public
   @Stable
   public abstract int getPort();
-  
+
   /**
    * Set the port of the URL
    * @param port port of the URL
@@ -116,7 +120,7 @@ public abstract class URL {
   @Public
   @Stable
   public abstract String getFile();
-  
+
   /**
    * Set the file of the URL.
    * @param file file of the URL
@@ -124,32 +128,20 @@ public abstract class URL {
   @Public
   @Stable
   public abstract void setFile(String file);
-  
+
   @Public
   @Stable
   public Path toPath() throws URISyntaxException {
-    String scheme = getScheme() == null ? "" : getScheme();
-    
-    String authority = "";
-    if (getHost() != null) {
-      authority = getHost();
-      if (getUserInfo() != null) {
-        authority = getUserInfo() + "@" + authority;
-      }
-      if (getPort() > 0) {
-        authority += ":" + getPort();
-      }
-    }
-    
-    return new Path(
-        (new URI(scheme, authority, getFile(), null, null)).normalize());
+    return new Path(new URI(getScheme(), getUserInfo(),
+      getHost(), getPort(), getFile(), null, null));
   }
-  
-  @Public
-  @Stable
-  public static URL fromURI(URI uri) {
+
+
+  @Private
+  @VisibleForTesting
+  public static URL fromURI(URI uri, Configuration conf) {
     URL url =
-        RecordFactoryProvider.getRecordFactory(null).newRecordInstance(
+        RecordFactoryProvider.getRecordFactory(conf).newRecordInstance(
             URL.class);
     if (uri.getHost() != null) {
       url.setHost(uri.getHost());
@@ -162,7 +154,19 @@ public abstract class URL {
     url.setFile(uri.getPath());
     return url;
   }
-  
+
+  @Public
+  @Stable
+  public static URL fromURI(URI uri) {
+    return fromURI(uri, null);
+  }
+
+  @Private
+  @VisibleForTesting
+  public static URL fromPath(Path path, Configuration conf) {
+    return fromURI(path.toUri(), conf);
+  }
+
   @Public
   @Stable
   public static URL fromPath(Path path) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/459a4833/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/TestURL.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/TestURL.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/TestURL.java
new file mode 100644
index 0000000..b464eca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/TestURL.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.junit.Test;
+
+/** Test for the URL class. */
+public class TestURL {
+
+  @Test
+  public void testConversion() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.IPC_RECORD_FACTORY_CLASS,
+        RecordFactoryForTest.class.getName());
+    String[] pathStrs = new String[] {"/", ".", "foo/bar", "foo",
+        "/foo/bar/baz", "moo://bar/baz", "moo://bar:123/baz", "moo:///foo",
+        "moo://foo@bar:123/baz/foo", "moo://foo@bar/baz/foo", "moo://foo@bar",
+        "moo://foo:123"};
+    for (String s : pathStrs) {
+      Path path = new Path(s);
+      assertEquals(path, URL.fromPath(path, conf).toPath());
+    }
+
+    Path p = new Path("/foo/bar#baz");
+    assertEquals(p, URL.fromPath(p, conf).toPath());
+  }
+
+  /** Record factory that instantiates URLs for this test. */
+  public static class RecordFactoryForTest implements RecordFactory {
+    private static final RecordFactoryForTest SELF =
+        new RecordFactoryForTest();
+    @SuppressWarnings("unchecked")
+    @Override
+    public <T> T newRecordInstance(Class<T> clazz) {
+      return (T) new URLForTest();
+    }
+    public static RecordFactory get() {
+      return SELF;
+    }
+  }
+
+  /** URL fake for this test; sidesteps proto-URL dependency. */
+  public static class URLForTest extends URL {
+    private String scheme, userInfo, host, file;
+    private int port;
+    public String getScheme() {
+      return scheme;
+    }
+    public void setScheme(String scheme) {
+      this.scheme = scheme;
+    }
+    public String getUserInfo() {
+      return userInfo;
+    }
+    public void setUserInfo(String userInfo) {
+      this.userInfo = userInfo;
+    }
+    public String getHost() {
+      return host;
+    }
+    public void setHost(String host) {
+      this.host = host;
+    }
+    public String getFile() {
+      return file;
+    }
+    public void setFile(String file) {
+      this.file = file;
+    }
+    public int getPort() {
+      return port;
+    }
+    public void setPort(int port) {
+      this.port = port;
+    }
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/52] [abbrv] hadoop git commit: HDFS-10969. Fix typos in hdfs-default.xml Contributed by Yiqun Lin

Posted by cn...@apache.org.
HDFS-10969. Fix typos in hdfs-default.xml Contributed by Yiqun Lin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be3cb10f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be3cb10f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be3cb10f

Branch: refs/heads/HADOOP-13037
Commit: be3cb10f5301c2d526d0ba37dbe82f426683a801
Parents: c183b9d
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Fri Oct 7 22:18:40 2016 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Fri Oct 7 22:18:40 2016 +0530

----------------------------------------------------------------------
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml            | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be3cb10f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index ebaefde..672b597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -725,7 +725,7 @@
 
   Setting this limit to 1000 disables compiler thread throttling. Only
   values between 1 and 1000 are valid. Setting an invalid value will result
-  in the throttle being disbled and an error message being logged. 1000 is
+  in the throttle being disabled and an error message being logged. 1000 is
   the default setting.
   </description>
 </property>
@@ -2559,7 +2559,7 @@
   <name>dfs.block.local-path-access.user</name>
   <value></value>
   <description>
-    Comma separated list of the users allowd to open block files
+    Comma separated list of the users allowed to open block files
     on legacy short-circuit local read.
   </description>
 </property>
@@ -3650,7 +3650,7 @@
   <name>dfs.datanode.transferTo.allowed</name>
   <value>true</value>
   <description>
-    If false, break block tranfers on 32-bit machines greater than
+    If false, break block transfers on 32-bit machines greater than
     or equal to 2GB into smaller chunks.
   </description>
 </property>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/52] [abbrv] hadoop git commit: HADOOP-13641. Update UGI#spawnAutoRenewalThreadForUserCreds to reduce indentation. Contributed by Huafeng Wang

Posted by cn...@apache.org.
HADOOP-13641. Update UGI#spawnAutoRenewalThreadForUserCreds to reduce indentation. Contributed by Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d59b18d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d59b18d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d59b18d

Branch: refs/heads/HADOOP-13037
Commit: 3d59b18d49d98a293ae14c5b89d515ef83cc4ff7
Parents: bea004e
Author: Kai Zheng <ka...@intel.com>
Authored: Sun Oct 9 15:53:36 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Sun Oct 9 15:53:36 2016 +0600

----------------------------------------------------------------------
 .../hadoop/security/UserGroupInformation.java   | 98 ++++++++++----------
 1 file changed, 49 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d59b18d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 329859d..e8711b0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -946,60 +946,60 @@ public class UserGroupInformation {
 
   /**Spawn a thread to do periodic renewals of kerberos credentials*/
   private void spawnAutoRenewalThreadForUserCreds() {
-    if (isSecurityEnabled()) {
-      //spawn thread only if we have kerb credentials
-      if (user.getAuthenticationMethod() == AuthenticationMethod.KERBEROS &&
-          !isKeytab) {
-        Thread t = new Thread(new Runnable() {
-          
-          @Override
-          public void run() {
-            String cmd = conf.get("hadoop.kerberos.kinit.command",
-                                  "kinit");
-            KerberosTicket tgt = getTGT();
+    if (!isSecurityEnabled()
+        || user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
+        || isKeytab) {
+      return;
+    }
+
+    //spawn thread only if we have kerb credentials
+    Thread t = new Thread(new Runnable() {
+
+      @Override
+      public void run() {
+        String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
+        KerberosTicket tgt = getTGT();
+        if (tgt == null) {
+          return;
+        }
+        long nextRefresh = getRefreshTime(tgt);
+        while (true) {
+          try {
+            long now = Time.now();
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Current time is " + now);
+              LOG.debug("Next refresh is " + nextRefresh);
+            }
+            if (now < nextRefresh) {
+              Thread.sleep(nextRefresh - now);
+            }
+            Shell.execCommand(cmd, "-R");
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("renewed ticket");
+            }
+            reloginFromTicketCache();
+            tgt = getTGT();
             if (tgt == null) {
+              LOG.warn("No TGT after renewal. Aborting renew thread for " +
+                  getUserName());
               return;
             }
-            long nextRefresh = getRefreshTime(tgt);
-            while (true) {
-              try {
-                long now = Time.now();
-                if(LOG.isDebugEnabled()) {
-                  LOG.debug("Current time is " + now);
-                  LOG.debug("Next refresh is " + nextRefresh);
-                }
-                if (now < nextRefresh) {
-                  Thread.sleep(nextRefresh - now);
-                }
-                Shell.execCommand(cmd, "-R");
-                if(LOG.isDebugEnabled()) {
-                  LOG.debug("renewed ticket");
-                }
-                reloginFromTicketCache();
-                tgt = getTGT();
-                if (tgt == null) {
-                  LOG.warn("No TGT after renewal. Aborting renew thread for " +
-                           getUserName());
-                  return;
-                }
-                nextRefresh = Math.max(getRefreshTime(tgt),
-                                       now + kerberosMinSecondsBeforeRelogin);
-              } catch (InterruptedException ie) {
-                LOG.warn("Terminating renewal thread");
-                return;
-              } catch (IOException ie) {
-                LOG.warn("Exception encountered while running the" +
-                    " renewal command. Aborting renew thread. " + ie);
-                return;
-              }
-            }
+            nextRefresh = Math.max(getRefreshTime(tgt),
+              now + kerberosMinSecondsBeforeRelogin);
+          } catch (InterruptedException ie) {
+            LOG.warn("Terminating renewal thread");
+            return;
+          } catch (IOException ie) {
+            LOG.warn("Exception encountered while running the" +
+                " renewal command. Aborting renew thread. " + ie);
+            return;
           }
-        });
-        t.setDaemon(true);
-        t.setName("TGT Renewer for " + getUserName());
-        t.start();
+        }
       }
-    }
+    });
+    t.setDaemon(true);
+    t.setName("TGT Renewer for " + getUserName());
+    t.start();
   }
   /**
    * Log a user in from a keytab file. Loads a user identity from a keytab


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org