You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2015/07/07 00:52:23 UTC

[1/7] hadoop git commit: YARN-3882. AggregatedLogFormat should close aclScanner and ownerScanner after create them. Contributed by zhihai xu

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12111 dcde7e4a2 -> 60343661c


YARN-3882. AggregatedLogFormat should close aclScanner and ownerScanner
after create them. Contributed by zhihai xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/688617d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/688617d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/688617d6

Branch: refs/heads/HADOOP-12111
Commit: 688617d6d7e6377a37682b5676b805cc6e8cf3f0
Parents: 2eae130
Author: Xuan <xg...@apache.org>
Authored: Sat Jul 4 21:51:58 2015 -0700
Committer: Xuan <xg...@apache.org>
Committed: Sat Jul 4 21:51:58 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../logaggregation/AggregatedLogFormat.java     | 83 +++++++++++---------
 2 files changed, 49 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/688617d6/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2009b47..803d725 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -589,6 +589,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3875. FSSchedulerNode#reserveResource() doesn't print Application Id
     properly in log. (Bibin A Chundatt via devaraj)
 
+    YARN-3882. AggregatedLogFormat should close aclScanner and ownerScanner
+    after create them. (zhihai xu via xgong)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/688617d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index debe770..c9453b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -489,18 +489,23 @@ public class AggregatedLogFormat {
      * @throws IOException
      */
     public String getApplicationOwner() throws IOException {
-      TFile.Reader.Scanner ownerScanner = reader.createScanner();
-      LogKey key = new LogKey();
-      while (!ownerScanner.atEnd()) {
-        TFile.Reader.Scanner.Entry entry = ownerScanner.entry();
-        key.readFields(entry.getKeyStream());
-        if (key.toString().equals(APPLICATION_OWNER_KEY.toString())) {
-          DataInputStream valueStream = entry.getValueStream();
-          return valueStream.readUTF();
+      TFile.Reader.Scanner ownerScanner = null;
+      try {
+        ownerScanner = reader.createScanner();
+        LogKey key = new LogKey();
+        while (!ownerScanner.atEnd()) {
+          TFile.Reader.Scanner.Entry entry = ownerScanner.entry();
+          key.readFields(entry.getKeyStream());
+          if (key.toString().equals(APPLICATION_OWNER_KEY.toString())) {
+            DataInputStream valueStream = entry.getValueStream();
+            return valueStream.readUTF();
+          }
+          ownerScanner.advance();
         }
-        ownerScanner.advance();
+        return null;
+      } finally {
+        IOUtils.cleanup(LOG, ownerScanner);
       }
-      return null;
     }
 
     /**
@@ -513,38 +518,42 @@ public class AggregatedLogFormat {
     public Map<ApplicationAccessType, String> getApplicationAcls()
         throws IOException {
       // TODO Seek directly to the key once a comparator is specified.
-      TFile.Reader.Scanner aclScanner = reader.createScanner();
-      LogKey key = new LogKey();
-      Map<ApplicationAccessType, String> acls =
-          new HashMap<ApplicationAccessType, String>();
-      while (!aclScanner.atEnd()) {
-        TFile.Reader.Scanner.Entry entry = aclScanner.entry();
-        key.readFields(entry.getKeyStream());
-        if (key.toString().equals(APPLICATION_ACL_KEY.toString())) {
-          DataInputStream valueStream = entry.getValueStream();
-          while (true) {
-            String appAccessOp = null;
-            String aclString = null;
-            try {
-              appAccessOp = valueStream.readUTF();
-            } catch (EOFException e) {
-              // Valid end of stream.
-              break;
-            }
-            try {
-              aclString = valueStream.readUTF();
-            } catch (EOFException e) {
-              throw new YarnRuntimeException("Error reading ACLs", e);
+      TFile.Reader.Scanner aclScanner = null;
+      try {
+        aclScanner = reader.createScanner();
+        LogKey key = new LogKey();
+        Map<ApplicationAccessType, String> acls =
+            new HashMap<ApplicationAccessType, String>();
+        while (!aclScanner.atEnd()) {
+          TFile.Reader.Scanner.Entry entry = aclScanner.entry();
+          key.readFields(entry.getKeyStream());
+          if (key.toString().equals(APPLICATION_ACL_KEY.toString())) {
+            DataInputStream valueStream = entry.getValueStream();
+            while (true) {
+              String appAccessOp = null;
+              String aclString = null;
+              try {
+                appAccessOp = valueStream.readUTF();
+              } catch (EOFException e) {
+                // Valid end of stream.
+                break;
+              }
+              try {
+                aclString = valueStream.readUTF();
+              } catch (EOFException e) {
+                throw new YarnRuntimeException("Error reading ACLs", e);
+              }
+              acls.put(ApplicationAccessType.valueOf(appAccessOp), aclString);
             }
-            acls.put(ApplicationAccessType.valueOf(appAccessOp), aclString);
           }
-
+          aclScanner.advance();
         }
-        aclScanner.advance();
+        return acls;
+      } finally {
+        IOUtils.cleanup(LOG, aclScanner);
       }
-      return acls;
     }
-    
+
     /**
      * Read the next key and return the value-stream.
      * 


[5/7] hadoop git commit: HADOOP-12045. Enable LocalFileSystem#setTimes to change atime. Contributed by Kazuho Fujii.

Posted by aw...@apache.org.
HADOOP-12045. Enable LocalFileSystem#setTimes to change atime. Contributed by Kazuho Fujii.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed1e3ce4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed1e3ce4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed1e3ce4

Branch: refs/heads/HADOOP-12111
Commit: ed1e3ce482f679ae2fad43a203f6578d7af59327
Parents: fc92d3e
Author: cnauroth <cn...@apache.org>
Authored: Mon Jul 6 13:40:15 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Mon Jul 6 13:40:15 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../apache/hadoop/fs/RawLocalFileSystem.java    | 36 ++++++-----
 .../org/apache/hadoop/fs/SymlinkBaseTest.java   | 45 +++++++++++---
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 26 ++++++--
 .../apache/hadoop/fs/TestSymlinkLocalFS.java    | 18 ++++++
 .../hadoop/fs/shell/TestCopyPreserveFlag.java   | 63 ++++++++++----------
 6 files changed, 132 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1e3ce4/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1d737e5..f2f9d5c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -675,6 +675,9 @@ Release 2.8.0 - UNRELEASED
 
     HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)
 
+    HADOOP-12045. Enable LocalFileSystem#setTimes to change atime.
+    (Kazuho Fujii via cnauroth)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1e3ce4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 96d1ab4..ac65b62 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -33,6 +33,10 @@ import java.io.OutputStream;
 import java.io.FileDescriptor;
 import java.net.URI;
 import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.BasicFileAttributeView;
+import java.nio.file.attribute.FileTime;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.StringTokenizer;
@@ -644,9 +648,14 @@ public class RawLocalFileSystem extends FileSystem {
       return !super.getOwner().isEmpty(); 
     }
     
-    DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) {
+    DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs)
+      throws IOException {
       super(f.length(), f.isDirectory(), 1, defaultBlockSize,
-          f.lastModified(), new Path(f.getPath()).makeQualified(fs.getUri(),
+          f.lastModified(),
+          Files.readAttributes(f.toPath(),
+            BasicFileAttributes.class).lastAccessTime().toMillis(),
+          null, null, null,
+          new Path(f.getPath()).makeQualified(fs.getUri(),
             fs.getWorkingDirectory()));
     }
     
@@ -758,25 +767,20 @@ public class RawLocalFileSystem extends FileSystem {
   }
  
   /**
-   * Sets the {@link Path}'s last modified time <em>only</em> to the given
-   * valid time.
+   * Sets the {@link Path}'s last modified time and last access time to
+   * the given valid times.
    *
    * @param mtime the modification time to set (only if greater than zero).
-   * @param atime currently ignored.
-   * @throws IOException if setting the last modified time fails.
+   * @param atime the access time to set (only if greater than zero).
+   * @throws IOException if setting the times fails.
    */
   @Override
   public void setTimes(Path p, long mtime, long atime) throws IOException {
-    File f = pathToFile(p);
-    if(mtime >= 0) {
-      if(!f.setLastModified(mtime)) {
-        throw new IOException(
-          "couldn't set last-modified time to " +
-          mtime +
-          " for " +
-          f.getAbsolutePath());
-      }
-    }
+    BasicFileAttributeView view = Files.getFileAttributeView(
+        pathToFile(p).toPath(), BasicFileAttributeView.class);
+    FileTime fmtime = (mtime >= 0) ? FileTime.fromMillis(mtime) : null;
+    FileTime fatime = (atime >= 0) ? FileTime.fromMillis(atime) : null;
+    view.setTimes(fmtime, fatime, null);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1e3ce4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java
index 4d6485d..8018946 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java
@@ -1386,19 +1386,48 @@ public abstract class SymlinkBaseTest {
   }
 
   @Test(timeout=10000)
-  /** setTimes affects the target not the link */
-  public void testSetTimes() throws IOException {
+  /** setTimes affects the target file not the link */
+  public void testSetTimesSymlinkToFile() throws IOException {
     Path file = new Path(testBaseDir1(), "file");
     Path link = new Path(testBaseDir1(), "linkToFile");
     createAndWriteFile(file);
     wrapper.createSymlink(file, link, false);
     long at = wrapper.getFileLinkStatus(link).getAccessTime();
-    wrapper.setTimes(link, 2L, 3L);
-    // NB: local file systems don't implement setTimes
-    if (!"file".equals(getScheme())) {
-      assertEquals(at, wrapper.getFileLinkStatus(link).getAccessTime());
-      assertEquals(3, wrapper.getFileStatus(file).getAccessTime());
-      assertEquals(2, wrapper.getFileStatus(file).getModificationTime());
+    // the local file system may not support millisecond timestamps
+    wrapper.setTimes(link, 2000L, 3000L);
+    assertEquals(at, wrapper.getFileLinkStatus(link).getAccessTime());
+    assertEquals(2000, wrapper.getFileStatus(file).getModificationTime());
+    assertEquals(3000, wrapper.getFileStatus(file).getAccessTime());
+  }
+
+  @Test(timeout=10000)
+  /** setTimes affects the target directory not the link */
+  public void testSetTimesSymlinkToDir() throws IOException {
+    Path dir = new Path(testBaseDir1(), "dir");
+    Path link = new Path(testBaseDir1(), "linkToDir");
+    wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
+    wrapper.createSymlink(dir, link, false);
+    long at = wrapper.getFileLinkStatus(link).getAccessTime();
+    // the local file system may not support millisecond timestamps
+    wrapper.setTimes(link, 2000L, 3000L);
+    assertEquals(at, wrapper.getFileLinkStatus(link).getAccessTime());
+    assertEquals(2000, wrapper.getFileStatus(dir).getModificationTime());
+    assertEquals(3000, wrapper.getFileStatus(dir).getAccessTime());
+  }
+
+  @Test(timeout=10000)
+  /** setTimes does not affect the link even though target does not exist */
+  public void testSetTimesDanglingLink() throws IOException {
+    Path file = new Path("/noSuchFile");
+    Path link = new Path(testBaseDir1()+"/link");
+    wrapper.createSymlink(file, link, false);
+    long at = wrapper.getFileLinkStatus(link).getAccessTime();
+    try {
+      wrapper.setTimes(link, 2000L, 3000L);
+      fail("set times to non-existant file");
+    } catch (IOException e) {
+      // Expected
     }
+    assertEquals(at, wrapper.getFileLinkStatus(link).getAccessTime());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1e3ce4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index df5cba9..f641f04 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -378,7 +378,14 @@ public class TestLocalFileSystem {
     assertTrue(dataFileFound);
     assertTrue(checksumFileFound);
   }
-  
+
+  private void checkTimesStatus(Path path,
+    long expectedModTime, long expectedAccTime) throws IOException {
+    FileStatus status = fileSys.getFileStatus(path);
+    assertEquals(expectedModTime, status.getModificationTime());
+    assertEquals(expectedAccTime, status.getAccessTime());
+  }
+
   @Test(timeout = 1000)
   public void testSetTimes() throws Exception {
     Path path = new Path(TEST_ROOT_DIR, "set-times");
@@ -387,15 +394,24 @@ public class TestLocalFileSystem {
     // test only to the nearest second, as the raw FS may not
     // support millisecond timestamps
     long newModTime = 12345000;
+    long newAccTime = 23456000;
 
     FileStatus status = fileSys.getFileStatus(path);
     assertTrue("check we're actually changing something", newModTime != status.getModificationTime());
-    long accessTime = status.getAccessTime();
+    assertTrue("check we're actually changing something", newAccTime != status.getAccessTime());
+
+    fileSys.setTimes(path, newModTime, newAccTime);
+    checkTimesStatus(path, newModTime, newAccTime);
+
+    newModTime = 34567000;
 
     fileSys.setTimes(path, newModTime, -1);
-    status = fileSys.getFileStatus(path);
-    assertEquals(newModTime, status.getModificationTime());
-    assertEquals(accessTime, status.getAccessTime());
+    checkTimesStatus(path, newModTime, newAccTime);
+
+    newAccTime = 45678000;
+
+    fileSys.setTimes(path, -1, newAccTime);
+    checkTimesStatus(path, newModTime, newAccTime);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1e3ce4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
index 64e34af..602af97 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
@@ -231,4 +231,22 @@ abstract public class TestSymlinkLocalFS extends SymlinkBaseTest {
       // Expected.
     }
   }
+
+  @Override
+  public void testSetTimesSymlinkToFile() throws IOException {
+    assumeTrue(!Path.WINDOWS);
+    super.testSetTimesSymlinkToFile();
+  }
+
+  @Override
+  public void testSetTimesSymlinkToDir() throws IOException {
+    assumeTrue(!Path.WINDOWS);
+    super.testSetTimesSymlinkToDir();
+  }
+
+  @Override
+  public void testSetTimesDanglingLink() throws IOException {
+    assumeTrue(!Path.WINDOWS);
+    super.testSetTimesDanglingLink();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1e3ce4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
index ecfb5a5..263c697 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
@@ -18,12 +18,13 @@
 package org.apache.hadoop.fs.shell;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotEquals;
 
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
@@ -38,8 +39,12 @@ import org.junit.Test;
 
 public class TestCopyPreserveFlag {
   private static final int MODIFICATION_TIME = 12345000;
-  private static final Path FROM = new Path("d1", "f1");
-  private static final Path TO = new Path("d2", "f2");
+  private static final int ACCESS_TIME = 23456000;
+  private static final Path DIR_FROM = new Path("d0");
+  private static final Path DIR_TO1 = new Path("d1");
+  private static final Path DIR_TO2 = new Path("d2");
+  private static final Path FROM = new Path(DIR_FROM, "f0");
+  private static final Path TO = new Path(DIR_TO1, "f1");
   private static final FsPermission PERMISSIONS = new FsPermission(
     FsAction.ALL,
     FsAction.EXECUTE,
@@ -62,8 +67,8 @@ public class TestCopyPreserveFlag {
 
     FileSystem.setDefaultUri(conf, fs.getUri());
     fs.setWorkingDirectory(testDir);
-    fs.mkdirs(new Path("d1"));
-    fs.mkdirs(new Path("d2"));
+    fs.mkdirs(DIR_FROM);
+    fs.mkdirs(DIR_TO1);
     fs.createNewFile(FROM);
 
     FSDataOutputStream output = fs.create(FROM, true);
@@ -72,10 +77,10 @@ public class TestCopyPreserveFlag {
         output.writeChar('\n');
     }
     output.close();
-    fs.setTimes(FROM, MODIFICATION_TIME, 0);
+    fs.setTimes(FROM, MODIFICATION_TIME, ACCESS_TIME);
     fs.setPermission(FROM, PERMISSIONS);
-    fs.setTimes(new Path("d1"), MODIFICATION_TIME, 0);
-    fs.setPermission(new Path("d1"), PERMISSIONS);
+    fs.setTimes(DIR_FROM, MODIFICATION_TIME, ACCESS_TIME);
+    fs.setPermission(DIR_FROM, PERMISSIONS);
   }
 
   @After
@@ -84,14 +89,18 @@ public class TestCopyPreserveFlag {
     fs.close();
   }
 
-  private void assertAttributesPreserved() throws IOException {
-    assertEquals(MODIFICATION_TIME, fs.getFileStatus(TO).getModificationTime());
-    assertEquals(PERMISSIONS, fs.getFileStatus(TO).getPermission());
+  private void assertAttributesPreserved(Path to) throws IOException {
+    FileStatus status = fs.getFileStatus(to);
+    assertEquals(MODIFICATION_TIME, status.getModificationTime());
+    assertEquals(ACCESS_TIME, status.getAccessTime());
+    assertEquals(PERMISSIONS, status.getPermission());
   }
 
-  private void assertAttributesChanged() throws IOException {
-      assertTrue(MODIFICATION_TIME != fs.getFileStatus(TO).getModificationTime());
-      assertTrue(!PERMISSIONS.equals(fs.getFileStatus(TO).getPermission()));
+  private void assertAttributesChanged(Path to) throws IOException {
+    FileStatus status = fs.getFileStatus(to);
+    assertNotEquals(MODIFICATION_TIME, status.getModificationTime());
+    assertNotEquals(ACCESS_TIME, status.getAccessTime());
+    assertNotEquals(PERMISSIONS, status.getPermission());
   }
 
   private void run(CommandWithDestination cmd, String... args) {
@@ -102,54 +111,48 @@ public class TestCopyPreserveFlag {
   @Test(timeout = 10000)
   public void testPutWithP() throws Exception {
     run(new Put(), "-p", FROM.toString(), TO.toString());
-    assertAttributesPreserved();
+    assertAttributesPreserved(TO);
   }
 
   @Test(timeout = 10000)
   public void testPutWithoutP() throws Exception {
     run(new Put(), FROM.toString(), TO.toString());
-    assertAttributesChanged();
+    assertAttributesChanged(TO);
   }
 
   @Test(timeout = 10000)
   public void testGetWithP() throws Exception {
     run(new Get(), "-p", FROM.toString(), TO.toString());
-    assertAttributesPreserved();
+    assertAttributesPreserved(TO);
   }
 
   @Test(timeout = 10000)
   public void testGetWithoutP() throws Exception {
     run(new Get(), FROM.toString(), TO.toString());
-    assertAttributesChanged();
+    assertAttributesChanged(TO);
   }
 
   @Test(timeout = 10000)
   public void testCpWithP() throws Exception {
       run(new Cp(), "-p", FROM.toString(), TO.toString());
-      assertAttributesPreserved();
+      assertAttributesPreserved(TO);
   }
 
   @Test(timeout = 10000)
   public void testCpWithoutP() throws Exception {
       run(new Cp(), FROM.toString(), TO.toString());
-      assertAttributesChanged();
+      assertAttributesChanged(TO);
   }
 
   @Test(timeout = 10000)
   public void testDirectoryCpWithP() throws Exception {
-    run(new Cp(), "-p", "d1", "d3");
-    assertEquals(fs.getFileStatus(new Path("d1")).getModificationTime(),
-        fs.getFileStatus(new Path("d3")).getModificationTime());
-    assertEquals(fs.getFileStatus(new Path("d1")).getPermission(),
-        fs.getFileStatus(new Path("d3")).getPermission());
+    run(new Cp(), "-p", DIR_FROM.toString(), DIR_TO2.toString());
+    assertAttributesPreserved(DIR_TO2);
   }
 
   @Test(timeout = 10000)
   public void testDirectoryCpWithoutP() throws Exception {
-    run(new Cp(), "d1", "d4");
-    assertTrue(fs.getFileStatus(new Path("d1")).getModificationTime() !=
-        fs.getFileStatus(new Path("d4")).getModificationTime());
-    assertTrue(!fs.getFileStatus(new Path("d1")).getPermission()
-        .equals(fs.getFileStatus(new Path("d4")).getPermission()));
+    run(new Cp(), DIR_FROM.toString(), DIR_TO2.toString());
+    assertAttributesChanged(DIR_TO2);
   }
 }


[6/7] hadoop git commit: HADOOP-12185. NetworkTopology is not efficient adding/getting/removing nodes. Contributed by Inigo Goiri

Posted by aw...@apache.org.
HADOOP-12185. NetworkTopology is not efficient adding/getting/removing nodes. Contributed by Inigo Goiri


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47a69ec7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47a69ec7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47a69ec7

Branch: refs/heads/HADOOP-12111
Commit: 47a69ec7a5417cb56b75d07184dd6888ff068302
Parents: ed1e3ce
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Jul 6 15:03:22 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Mon Jul 6 15:03:22 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/net/NetworkTopology.java  | 59 +++++++--------
 .../apache/hadoop/net/TestClusterTopology.java  | 75 ++++++++++++++++++--
 2 files changed, 102 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47a69ec7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 63b6763..970ad40 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -18,9 +18,11 @@
 package org.apache.hadoop.net;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 import java.util.TreeMap;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -80,6 +82,7 @@ public class NetworkTopology {
    */
   static class InnerNode extends NodeBase {
     protected List<Node> children=new ArrayList<Node>();
+    private Map<String, Node> childrenMap = new HashMap<String, Node>();
     private int numOfLeaves;
         
     /** Construct an InnerNode from a path-like string */
@@ -171,10 +174,13 @@ public class NetworkTopology {
         // this node is the parent of n; add n directly
         n.setParent(this);
         n.setLevel(this.level+1);
-        for(int i=0; i<children.size(); i++) {
-          if (children.get(i).getName().equals(n.getName())) {
-            children.set(i, n);
-            return false;
+        Node prev = childrenMap.put(n.getName(), n);
+        if (prev != null) {
+          for(int i=0; i<children.size(); i++) {
+            if (children.get(i).getName().equals(n.getName())) {
+              children.set(i, n);
+              return false;
+            }
           }
         }
         children.add(n);
@@ -183,17 +189,12 @@ public class NetworkTopology {
       } else {
         // find the next ancestor node
         String parentName = getNextAncestorName(n);
-        InnerNode parentNode = null;
-        for(int i=0; i<children.size(); i++) {
-          if (children.get(i).getName().equals(parentName)) {
-            parentNode = (InnerNode)children.get(i);
-            break;
-          }
-        }
+        InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
         if (parentNode == null) {
           // create a new InnerNode
           parentNode = createParentNode(parentName);
           children.add(parentNode);
+          childrenMap.put(parentNode.getName(), parentNode);
         }
         // add n to the subtree of the next ancestor node
         if (parentNode.add(n)) {
@@ -234,12 +235,15 @@ public class NetworkTopology {
                                            +parent+", is not a descendent of "+currentPath);
       if (isParent(n)) {
         // this node is the parent of n; remove n directly
-        for(int i=0; i<children.size(); i++) {
-          if (children.get(i).getName().equals(n.getName())) {
-            children.remove(i);
-            numOfLeaves--;
-            n.setParent(null);
-            return true;
+        if (childrenMap.containsKey(n.getName())) {
+          for (int i=0; i<children.size(); i++) {
+            if (children.get(i).getName().equals(n.getName())) {
+              children.remove(i);
+              childrenMap.remove(n.getName());
+              numOfLeaves--;
+              n.setParent(null);
+              return true;
+            }
           }
         }
         return false;
@@ -262,7 +266,8 @@ public class NetworkTopology {
         // if the parent node has no children, remove the parent node too
         if (isRemoved) {
           if (parentNode.getNumOfChildren() == 0) {
-            children.remove(i);
+            Node prev = children.remove(i);
+            childrenMap.remove(prev.getName());
           }
           numOfLeaves--;
         }
@@ -279,12 +284,7 @@ public class NetworkTopology {
       if (loc == null || loc.length() == 0) return this;
             
       String[] path = loc.split(PATH_SEPARATOR_STR, 2);
-      Node childnode = null;
-      for(int i=0; i<children.size(); i++) {
-        if (children.get(i).getName().equals(path[0])) {
-          childnode = children.get(i);
-        }
-      }
+      Node childnode = childrenMap.get(path[0]);
       if (childnode == null) return null; // non-existing node
       if (path.length == 1) return childnode;
       if (childnode instanceof InnerNode) {
@@ -311,10 +311,13 @@ public class NetworkTopology {
         isLeaf ? 1 : ((InnerNode)excludedNode).getNumOfLeaves();
       if (isLeafParent()) { // children are leaves
         if (isLeaf) { // excluded node is a leaf node
-          int excludedIndex = children.indexOf(excludedNode);
-          if (excludedIndex != -1 && leafIndex >= 0) {
-            // excluded node is one of the children so adjust the leaf index
-            leafIndex = leafIndex>=excludedIndex ? leafIndex+1 : leafIndex;
+          if (excludedNode != null &&
+              childrenMap.containsKey(excludedNode.getName())) {
+            int excludedIndex = children.indexOf(excludedNode);
+            if (excludedIndex != -1 && leafIndex >= 0) {
+              // excluded node is one of the children so adjust the leaf index
+              leafIndex = leafIndex>=excludedIndex ? leafIndex+1 : leafIndex;
+            }
           }
         }
         // range check

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47a69ec7/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
index 3ab663f..72fc5cb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
@@ -18,8 +18,10 @@
 package org.apache.hadoop.net;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 
+import org.apache.commons.math3.stat.inference.ChiSquareTest;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -79,12 +81,14 @@ public class TestClusterTopology extends Assert {
   public void testCountNumNodes() throws Exception {
     // create the topology
     NetworkTopology cluster = new NetworkTopology();
-    cluster.add(getNewNode("node1", "/d1/r1"));
+    NodeElement node1 = getNewNode("node1", "/d1/r1");
+    cluster.add(node1);
     NodeElement node2 = getNewNode("node2", "/d1/r2");
     cluster.add(node2);
-    cluster.add(getNewNode("node3", "/d1/r3"));
-    NodeElement node3 = getNewNode("node4", "/d1/r4");
+    NodeElement node3 = getNewNode("node3", "/d1/r3");
     cluster.add(node3);
+    NodeElement node4 = getNewNode("node4", "/d1/r4");
+    cluster.add(node4);
     // create exclude list
     List<Node> excludedNodes = new ArrayList<Node>();
 
@@ -95,7 +99,7 @@ public class TestClusterTopology extends Assert {
     assertEquals("4 nodes should be available with extra excluded Node", 4,
         cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
     // add one existing node to exclude list
-    excludedNodes.add(node3);
+    excludedNodes.add(node4);
     assertEquals("excluded nodes with ROOT scope should be considered", 3,
         cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
     assertEquals("excluded nodes without ~ scope should be considered", 2,
@@ -112,6 +116,69 @@ public class TestClusterTopology extends Assert {
     // getting count with non-exist scope.
     assertEquals("No nodes should be considered for non-exist scope", 0,
         cluster.countNumOfAvailableNodes("/non-exist", excludedNodes));
+    // remove a node from the cluster
+    cluster.remove(node1);
+    assertEquals("1 node should be available", 1,
+        cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
+  }
+
+  /**
+   * Test how well we pick random nodes.
+   */
+  @Test
+  public void testChooseRandom() {
+    // create the topology
+    NetworkTopology cluster = new NetworkTopology();
+    NodeElement node1 = getNewNode("node1", "/d1/r1");
+    cluster.add(node1);
+    NodeElement node2 = getNewNode("node2", "/d1/r2");
+    cluster.add(node2);
+    NodeElement node3 = getNewNode("node3", "/d1/r3");
+    cluster.add(node3);
+    NodeElement node4 = getNewNode("node4", "/d1/r3");
+    cluster.add(node4);
+
+    // Number of iterations to do the test
+    int numIterations = 100;
+
+    // Pick random nodes
+    HashMap<String,Integer> histogram = new HashMap<String,Integer>();
+    for (int i=0; i<numIterations; i++) {
+      String randomNode = cluster.chooseRandom(NodeBase.ROOT).getName();
+      if (!histogram.containsKey(randomNode)) {
+        histogram.put(randomNode, 0);
+      }
+      histogram.put(randomNode, histogram.get(randomNode) + 1);
+    }
+    assertEquals("Random is not selecting all nodes", 4, histogram.size());
+
+    // Check with 99% confidence (alpha=0.01 as confidence = (100 * (1 - alpha)
+    ChiSquareTest chiSquareTest = new ChiSquareTest();
+    double[] expected = new double[histogram.size()];
+    long[] observed = new long[histogram.size()];
+    int j=0;
+    for (Integer occurrence : histogram.values()) {
+      expected[j] = 1.0 * numIterations / histogram.size();
+      observed[j] = occurrence;
+      j++;
+    }
+    boolean chiSquareTestRejected =
+        chiSquareTest.chiSquareTest(expected, observed, 0.01);
+
+    // Check that they have the proper distribution
+    assertFalse("Not choosing nodes randomly", chiSquareTestRejected);
+
+    // Pick random nodes excluding the 2 nodes in /d1/r3
+    histogram = new HashMap<String,Integer>();
+    for (int i=0; i<numIterations; i++) {
+      String randomNode = cluster.chooseRandom("~/d1/r3").getName();
+      if (!histogram.containsKey(randomNode)) {
+        histogram.put(randomNode, 0);
+      }
+      histogram.put(randomNode, histogram.get(randomNode) + 1);
+    }
+    assertEquals("Random is not selecting the nodes it should",
+        2, histogram.size());
   }
 
   private NodeElement getNewNode(String name, String rackLocation) {


[3/7] hadoop git commit: HADOOP-12186. ActiveStandbyElector shouldn't call monitorLockNodeAsync multiple times (Contributed by zhihai xu)

Posted by aw...@apache.org.
HADOOP-12186. ActiveStandbyElector shouldn't call monitorLockNodeAsync multiple times (Contributed by zhihai xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/233cab89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/233cab89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/233cab89

Branch: refs/heads/HADOOP-12111
Commit: 233cab89adb6bae21d7e171f2af516b92266242c
Parents: bff67df
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon Jul 6 15:39:43 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Mon Jul 6 15:39:43 2015 +0530

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../apache/hadoop/ha/ActiveStandbyElector.java  | 20 +++++++++++--
 .../hadoop/ha/TestActiveStandbyElector.java     | 31 ++++++++++++++++++++
 3 files changed, 51 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/233cab89/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 312a996..1d737e5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -924,6 +924,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12164. Fix TestMove and TestFsShellReturnCode failed to get command
     name using reflection. (Lei (Eddy) Xu)
 
+    HADOOP-12186. ActiveStandbyElector shouldn't call monitorLockNodeAsync
+    multiple times (zhihai xu via vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/233cab89/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index e520a16..e458181 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -173,7 +173,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   private Lock sessionReestablishLockForTests = new ReentrantLock();
   private boolean wantToBeInElection;
-  
+  private boolean monitorLockNodePending = false;
+  private ZooKeeper monitorLockNodeClient;
+
   /**
    * Create a new ActiveStandbyElector object <br/>
    * The elector is created by providing to it the Zookeeper configuration, the
@@ -468,7 +470,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   public synchronized void processResult(int rc, String path, Object ctx,
       Stat stat) {
     if (isStaleClient(ctx)) return;
-    
+    monitorLockNodePending = false;
+
     assert wantToBeInElection :
         "Got a StatNode result after quitting election";
 
@@ -744,6 +747,11 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     return state;
   }
 
+  @VisibleForTesting
+  synchronized boolean isMonitorLockNodePending() {
+    return monitorLockNodePending;
+  }
+
   private boolean reEstablishSession() {
     int connectionRetryCount = 0;
     boolean success = false;
@@ -949,7 +957,13 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   }
 
   private void monitorLockNodeAsync() {
-    zkClient.exists(zkLockFilePath, 
+    if (monitorLockNodePending && monitorLockNodeClient == zkClient) {
+      LOG.info("Ignore duplicate monitor lock-node request.");
+      return;
+    }
+    monitorLockNodePending = true;
+    monitorLockNodeClient = zkClient;
+    zkClient.exists(zkLockFilePath,
         watcher, this,
         zkClient);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/233cab89/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
index 2e578e2..83a3a4f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
@@ -452,6 +452,10 @@ public class TestActiveStandbyElector {
         Event.KeeperState.SyncConnected);
     elector.processWatchEvent(mockZK, mockEvent);
     verifyExistCall(1);
+    Assert.assertTrue(elector.isMonitorLockNodePending());
+    elector.processResult(Code.SESSIONEXPIRED.intValue(), ZK_LOCK_NAME,
+        mockZK, new Stat());
+    Assert.assertFalse(elector.isMonitorLockNodePending());
 
     // session expired should enter safe mode and initiate re-election
     // re-election checked via checking re-creation of new zookeeper and
@@ -495,6 +499,13 @@ public class TestActiveStandbyElector {
         ZK_LOCK_NAME);
     Mockito.verify(mockApp, Mockito.times(1)).becomeStandby();
     verifyExistCall(1);
+    Assert.assertTrue(elector.isMonitorLockNodePending());
+
+    Stat stat = new Stat();
+    stat.setEphemeralOwner(0L);
+    Mockito.when(mockZK.getSessionId()).thenReturn(1L);
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
+    Assert.assertFalse(elector.isMonitorLockNodePending());
 
     WatchedEvent mockEvent = Mockito.mock(WatchedEvent.class);
     Mockito.when(mockEvent.getPath()).thenReturn(ZK_LOCK_NAME);
@@ -504,12 +515,18 @@ public class TestActiveStandbyElector {
         Event.EventType.NodeDataChanged);
     elector.processWatchEvent(mockZK, mockEvent);
     verifyExistCall(2);
+    Assert.assertTrue(elector.isMonitorLockNodePending());
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
+    Assert.assertFalse(elector.isMonitorLockNodePending());
 
     // monitoring should be setup again after event is received
     Mockito.when(mockEvent.getType()).thenReturn(
         Event.EventType.NodeChildrenChanged);
     elector.processWatchEvent(mockZK, mockEvent);
     verifyExistCall(3);
+    Assert.assertTrue(elector.isMonitorLockNodePending());
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
+    Assert.assertFalse(elector.isMonitorLockNodePending());
 
     // lock node deletion when in standby mode should create znode again
     // successful znode creation enters active state and sets monitor
@@ -524,6 +541,10 @@ public class TestActiveStandbyElector {
         ZK_LOCK_NAME);
     Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
     verifyExistCall(4);
+    Assert.assertTrue(elector.isMonitorLockNodePending());
+    stat.setEphemeralOwner(1L);
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
+    Assert.assertFalse(elector.isMonitorLockNodePending());
 
     // lock node deletion in active mode should enter neutral mode and create
     // znode again successful znode creation enters active state and sets
@@ -538,6 +559,9 @@ public class TestActiveStandbyElector {
         ZK_LOCK_NAME);
     Mockito.verify(mockApp, Mockito.times(2)).becomeActive();
     verifyExistCall(5);
+    Assert.assertTrue(elector.isMonitorLockNodePending());
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
+    Assert.assertFalse(elector.isMonitorLockNodePending());
 
     // bad path name results in fatal error
     Mockito.when(mockEvent.getPath()).thenReturn(null);
@@ -570,6 +594,13 @@ public class TestActiveStandbyElector {
         ZK_LOCK_NAME);
     Mockito.verify(mockApp, Mockito.times(1)).becomeStandby();
     verifyExistCall(1);
+    Assert.assertTrue(elector.isMonitorLockNodePending());
+
+    Stat stat = new Stat();
+    stat.setEphemeralOwner(0L);
+    Mockito.when(mockZK.getSessionId()).thenReturn(1L);
+    elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
+    Assert.assertFalse(elector.isMonitorLockNodePending());
 
     WatchedEvent mockEvent = Mockito.mock(WatchedEvent.class);
     Mockito.when(mockEvent.getPath()).thenReturn(ZK_LOCK_NAME);


[7/7] hadoop git commit: Merge branch 'trunk' into HADOOP-12111

Posted by aw...@apache.org.
Merge branch 'trunk' into HADOOP-12111


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60343661
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60343661
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60343661

Branch: refs/heads/HADOOP-12111
Commit: 60343661c22ed528fc848d6962cec7e8e4bfe884
Parents: dcde7e4 47a69ec
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon Jul 6 15:52:09 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Mon Jul 6 15:52:09 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   6 ++
 .../apache/hadoop/fs/RawLocalFileSystem.java    |  36 ++++---
 .../apache/hadoop/ha/ActiveStandbyElector.java  |  20 +++-
 .../org/apache/hadoop/net/NetworkTopology.java  |  59 ++++++-----
 .../org/apache/hadoop/fs/SymlinkBaseTest.java   |  45 +++++++--
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  26 ++++-
 .../apache/hadoop/fs/TestSymlinkLocalFS.java    |  18 ++++
 .../hadoop/fs/shell/TestCopyPreserveFlag.java   |  63 ++++++------
 .../hadoop/ha/TestActiveStandbyElector.java     |  31 ++++++
 .../apache/hadoop/net/TestClusterTopology.java  |  75 +++++++++++++-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  14 +++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |   2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hdfs/server/namenode/FSXAttrBaseTest.java   |   5 +-
 .../TestOfflineImageViewerForXAttr.java         |   3 +
 hadoop-mapreduce-project/CHANGES.txt            |   3 +
 .../apache/hadoop/mapred/ShuffleHandler.java    |   3 +-
 .../hadoop/mapred/TestShuffleHandler.java       | 101 +++++++++++++++++++
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../logaggregation/AggregatedLogFormat.java     |  83 ++++++++-------
 20 files changed, 465 insertions(+), 134 deletions(-)
----------------------------------------------------------------------



[4/7] hadoop git commit: HDFS-8686. WebHdfsFileSystem#getXAttr(Path p, final String name) doesn't work if namespace is in capitals (Contributed by kanaka kumar avvaru)

Posted by aw...@apache.org.
HDFS-8686. WebHdfsFileSystem#getXAttr(Path p, final String name) doesn't work if namespace is in capitals (Contributed by kanaka kumar avvaru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc92d3e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc92d3e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc92d3e6

Branch: refs/heads/HADOOP-12111
Commit: fc92d3e6515a391847cb6170244b3d911712d96a
Parents: 233cab8
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon Jul 6 16:09:24 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Mon Jul 6 16:09:24 2015 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java    | 14 ++++++++++++++
 .../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt           |  3 +++
 .../hadoop/hdfs/server/namenode/FSXAttrBaseTest.java  |  5 ++++-
 .../TestOfflineImageViewerForXAttr.java               |  3 +++
 5 files changed, 25 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc92d3e6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index e025e31..713836c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -413,6 +413,20 @@ class JsonUtilClient {
     return null;
   }
 
+  /** Expecting only single XAttr in the map. return its value */
+  static byte[] getXAttr(final Map<?, ?> json) throws IOException {
+    if (json == null) {
+      return null;
+    }
+
+    Map<String, byte[]> xAttrs = toXAttrs(json);
+    if (xAttrs != null && !xAttrs.values().isEmpty()) {
+      return xAttrs.values().iterator().next();
+    }
+
+    return null;
+  }
+
   static Map<String, byte[]> toXAttrs(final Map<?, ?> json)
       throws IOException {
     if (json == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc92d3e6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 2650dca..b661d07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -963,7 +963,7 @@ public class WebHdfsFileSystem extends FileSystem
         new XAttrEncodingParam(XAttrCodec.HEX)) {
       @Override
       byte[] decodeResponse(Map<?, ?> json) throws IOException {
-        return JsonUtilClient.getXAttr(json, name);
+        return JsonUtilClient.getXAttr(json);
       }
     }.run();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc92d3e6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4f184fb..9edc2af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1005,6 +1005,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8577. Avoid retrying to recover lease on a file which does not exist
     (J.Andreina via vinayakumarb)
 
+    HDFS-8686. WebHdfsFileSystem#getXAttr(Path p, final String name) doesn't
+    work if namespace is in capitals (kanaka kumar avvaru via vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc92d3e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index e21e34c..eb9053c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -395,7 +395,10 @@ public class FSXAttrBaseTest {
     FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
     fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
-    
+
+    final byte[] theValue = fs.getXAttr(path, "USER.a2");
+    Assert.assertArrayEquals(value2, theValue);
+
     /* An XAttr that was requested does not exist. */
     try {
       final byte[] value = fs.getXAttr(path, name3);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc92d3e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java
index 3f23f64..6c82101 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java
@@ -231,6 +231,9 @@ public class TestOfflineImageViewerForXAttr {
           "user.attr1"));
       assertEquals("value1", value);
 
+      value = new String(webhdfs.getXAttr(new Path("/dir1"), "USER.attr1"));
+      assertEquals("value1", value);
+
       Map<String, byte[]> contentMap = webhdfs.getXAttrs(new Path("/dir1"),
           names);
 


[2/7] hadoop git commit: MAPREDUCE-6425. ShuffleHandler passes wrong "base" parameter to getMapOutputInfo if mapId is not in the cache. Contributed by zhihai xu.

Posted by aw...@apache.org.
MAPREDUCE-6425. ShuffleHandler passes wrong "base" parameter to
getMapOutputInfo if mapId is not in the cache. Contributed by zhihai xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bff67dfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bff67dfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bff67dfe

Branch: refs/heads/HADOOP-12111
Commit: bff67dfe2f811654ffb1bbcbd87509c185f452b6
Parents: 688617d
Author: Devaraj K <de...@apache.org>
Authored: Mon Jul 6 13:46:37 2015 +0530
Committer: Devaraj K <de...@apache.org>
Committed: Mon Jul 6 13:46:37 2015 +0530

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |   3 +
 .../apache/hadoop/mapred/ShuffleHandler.java    |   3 +-
 .../hadoop/mapred/TestShuffleHandler.java       | 101 +++++++++++++++++++
 3 files changed, 106 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bff67dfe/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 2f80615..2458403 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -542,6 +542,9 @@ Release 2.7.2 - UNRELEASED
 
   BUG FIXES
 
+    MAPREDUCE-6425. ShuffleHandler passes wrong "base" parameter to getMapOutputInfo
+    if mapId is not in the cache. (zhihai xu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bff67dfe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index eedf42b..ee1be23 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -815,7 +815,8 @@ public class ShuffleHandler extends AuxiliaryService {
         try {
           MapOutputInfo info = mapOutputInfoMap.get(mapId);
           if (info == null) {
-            info = getMapOutputInfo(outputBasePathStr, mapId, reduceId, user);
+            info = getMapOutputInfo(outputBasePathStr + mapId,
+                mapId, reduceId, user);
           }
           lastMap =
               sendMapOutput(ctx, ch, user, mapId,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bff67dfe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
index 7053653..746071f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
@@ -601,6 +601,7 @@ public class TestShuffleHandler {
       Assert.assertTrue((new String(byteArr)).contains(message));
     } finally {
       shuffleHandler.stop();
+      FileUtil.fullyDelete(absLogDir);
     }
   }
 
@@ -829,4 +830,104 @@ public class TestShuffleHandler {
     conn.disconnect();
     return rc;
   }
+
+  @Test(timeout = 100000)
+  public void testGetMapOutputInfo() throws Exception {
+    final ArrayList<Throwable> failures = new ArrayList<Throwable>(1);
+    Configuration conf = new Configuration();
+    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
+    conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
+    File absLogDir = new File("target", TestShuffleHandler.class.
+        getSimpleName() + "LocDir").getAbsoluteFile();
+    conf.set(YarnConfiguration.NM_LOCAL_DIRS, absLogDir.getAbsolutePath());
+    ApplicationId appId = ApplicationId.newInstance(12345, 1);
+    String appAttemptId = "attempt_12345_1_m_1_0";
+    String user = "randomUser";
+    String reducerId = "0";
+    List<File> fileMap = new ArrayList<File>();
+    createShuffleHandlerFiles(absLogDir, user, appId.toString(), appAttemptId,
+        conf, fileMap);
+    ShuffleHandler shuffleHandler = new ShuffleHandler() {
+      @Override
+      protected Shuffle getShuffle(Configuration conf) {
+        // replace the shuffle handler with one stubbed for testing
+        return new Shuffle(conf) {
+          @Override
+          protected void populateHeaders(List<String> mapIds,
+              String outputBaseStr, String user, int reduce,
+              HttpRequest request, HttpResponse response,
+              boolean keepAliveParam, Map<String, MapOutputInfo> infoMap)
+              throws IOException {
+            // Only set response headers and skip everything else
+            // send some dummy value for content-length
+            super.setResponseHeaders(response, keepAliveParam, 100);
+          }
+          @Override
+          protected void verifyRequest(String appid,
+              ChannelHandlerContext ctx, HttpRequest request,
+              HttpResponse response, URL requestUri) throws IOException {
+            // Do nothing.
+          }
+          @Override
+          protected void sendError(ChannelHandlerContext ctx, String message,
+              HttpResponseStatus status) {
+            if (failures.size() == 0) {
+              failures.add(new Error(message));
+              ctx.getChannel().close();
+            }
+          }
+          @Override
+          protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx,
+              Channel ch, String user, String mapId, int reduce,
+              MapOutputInfo info) throws IOException {
+            // send a shuffle header
+            ShuffleHeader header =
+                new ShuffleHeader("attempt_12345_1_m_1_0", 5678, 5678, 1);
+            DataOutputBuffer dob = new DataOutputBuffer();
+            header.write(dob);
+            return ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
+          }
+        };
+      }
+    };
+    shuffleHandler.init(conf);
+    try {
+      shuffleHandler.start();
+      DataOutputBuffer outputBuffer = new DataOutputBuffer();
+      outputBuffer.reset();
+      Token<JobTokenIdentifier> jt =
+          new Token<JobTokenIdentifier>("identifier".getBytes(),
+          "password".getBytes(), new Text(user), new Text("shuffleService"));
+      jt.write(outputBuffer);
+      shuffleHandler
+          .initializeApplication(new ApplicationInitializationContext(user,
+          appId, ByteBuffer.wrap(outputBuffer.getData(), 0,
+          outputBuffer.getLength())));
+      URL url =
+          new URL(
+              "http://127.0.0.1:"
+                  + shuffleHandler.getConfig().get(
+                      ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY)
+                  + "/mapOutput?job=job_12345_0001&reduce=" + reducerId
+                  + "&map=attempt_12345_1_m_1_0");
+      HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
+          ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
+      conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
+          ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
+      conn.connect();
+      try {
+        DataInputStream is = new DataInputStream(conn.getInputStream());
+        ShuffleHeader header = new ShuffleHeader();
+        header.readFields(is);
+        is.close();
+      } catch (EOFException e) {
+        // ignore
+      }
+      Assert.assertEquals(failures.size(), 0);
+    } finally {
+      shuffleHandler.stop();
+      FileUtil.fullyDelete(absLogDir);
+    }
+  }
 }