You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by so...@apache.org on 2020/06/05 12:02:12 UTC

[hadoop] branch branch-3.0 updated (cb94911 -> fe89969)

This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a change to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


    from cb94911  Revert "HADOOP-8143. Change distcp to have -pb on by default."
     new b7b3164  Revert "HDFS-13179. TestLazyPersistReplicaRecovery#testDnRestartWithSavedReplicas fails intermittently. Contributed by Ahmed Hussein."
     new 0d33320  HADOOP-14698. Make copyFromLocals -t option available for put as well. Contributed by Andras Bokor.
     new fe89969  HDFS-15386. ReplicaNotFoundException keeps happening in DN after removing multiple DN's data directories (#2052)

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/hadoop/fs/shell/CopyCommands.java   | 108 ++++++++-------------
 .../org/apache/hadoop/fs/shell/MoveCommands.java   |  20 +++-
 .../src/site/markdown/FileSystemShell.md           |   4 +-
 .../java/org/apache/hadoop/fs/shell/TestMove.java  |   7 ++
 .../hadoop-common/src/test/resources/testConf.xml  |  66 ++++---------
 .../datanode/fsdataset/impl/FsDatasetImpl.java     |  10 +-
 .../datanode/fsdataset/impl/TestFsDatasetImpl.java | 100 +++++++++++++++++--
 .../impl/TestLazyPersistReplicaRecovery.java       |  41 +-------
 8 files changed, 182 insertions(+), 174 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 02/03: HADOOP-14698. Make copyFromLocals -t option available for put as well. Contributed by Andras Bokor.

Posted by so...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 0d3332037f5d10ae47aedb1ec2d71061409e5e41
Author: S O'Donnell <so...@cloudera.com>
AuthorDate: Fri May 29 10:32:37 2020 +0100

    HADOOP-14698. Make copyFromLocals -t option available for put as well. Contributed by Andras Bokor.
    
    (cherry picked from commit 1f674e26ba056ae2bffd54ddda267302e5d2519b)
---
 .../org/apache/hadoop/fs/shell/CopyCommands.java   | 108 ++++++++-------------
 .../org/apache/hadoop/fs/shell/MoveCommands.java   |  20 +++-
 .../src/site/markdown/FileSystemShell.md           |   4 +-
 .../java/org/apache/hadoop/fs/shell/TestMove.java  |   7 ++
 .../hadoop-common/src/test/resources/testConf.xml  |  66 ++++---------
 5 files changed, 82 insertions(+), 123 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index c408a4f..180f98c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -232,26 +232,35 @@ class CopyCommands {
    *  Copy local files to a remote filesystem
    */
   public static class Put extends CommandWithDestination {
+    private ThreadPoolExecutor executor = null;
+    private int numThreads = 1;
+
+    private static final int MAX_THREADS =
+        Runtime.getRuntime().availableProcessors() * 2;
+
     public static final String NAME = "put";
     public static final String USAGE =
-        "[-f] [-p] [-l] [-d] <localsrc> ... <dst>";
+        "[-f] [-p] [-l] [-d] [-t <thread count>] <localsrc> ... <dst>";
     public static final String DESCRIPTION =
-      "Copy files from the local file system " +
-      "into fs. Copying fails if the file already " +
-      "exists, unless the -f flag is given.\n" +
-      "Flags:\n" +
-      "  -p : Preserves access and modification times, ownership and the mode.\n" +
-      "  -f : Overwrites the destination if it already exists.\n" +
-      "  -l : Allow DataNode to lazily persist the file to disk. Forces\n" +
-      "       replication factor of 1. This flag will result in reduced\n" +
-      "       durability. Use with care.\n" +
+        "Copy files from the local file system " +
+        "into fs. Copying fails if the file already " +
+        "exists, unless the -f flag is given.\n" +
+        "Flags:\n" +
+        "  -p : Preserves timestamps, ownership and the mode.\n" +
+        "  -f : Overwrites the destination if it already exists.\n" +
+        "  -t <thread count> : Number of threads to be used, default is 1.\n" +
+        "  -l : Allow DataNode to lazily persist the file to disk. Forces" +
+        "  replication factor of 1. This flag will result in reduced" +
+        "  durability. Use with care.\n" +
         "  -d : Skip creation of temporary file(<dst>._COPYING_).\n";
 
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
       CommandFormat cf =
           new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d");
+      cf.addOptionWithValue("t");
       cf.parse(args);
+      setNumberThreads(cf.getOptValue("t"));
       setOverwrite(cf.getOpt("f"));
       setPreserve(cf.getOpt("p"));
       setLazyPersist(cf.getOpt("l"));
@@ -281,32 +290,22 @@ class CopyCommands {
         copyStreamToTarget(System.in, getTargetPath(args.get(0)));
         return;
       }
-      super.processArguments(args);
-    }
-  }
 
-  public static class CopyFromLocal extends Put {
-    private ThreadPoolExecutor executor = null;
-    private int numThreads = 1;
+      executor = new ThreadPoolExecutor(numThreads, numThreads, 1,
+          TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024),
+          new ThreadPoolExecutor.CallerRunsPolicy());
+      super.processArguments(args);
 
-    private static final int MAX_THREADS =
-        Runtime.getRuntime().availableProcessors() * 2;
-    public static final String NAME = "copyFromLocal";
-    public static final String USAGE =
-        "[-f] [-p] [-l] [-d] [-t <thread count>] <localsrc> ... <dst>";
-    public static final String DESCRIPTION =
-        "Copy files from the local file system " +
-        "into fs. Copying fails if the file already " +
-        "exists, unless the -f flag is given.\n" +
-        "Flags:\n" +
-        "  -p : Preserves access and modification times, ownership and the" +
-        " mode.\n" +
-        "  -f : Overwrites the destination if it already exists.\n" +
-        "  -t <thread count> : Number of threads to be used, default is 1.\n" +
-        "  -l : Allow DataNode to lazily persist the file to disk. Forces" +
-        " replication factor of 1. This flag will result in reduced" +
-        " durability. Use with care.\n" +
-        "  -d : Skip creation of temporary file(<dst>._COPYING_).\n";
+      // issue the command and then wait for it to finish
+      executor.shutdown();
+      try {
+        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES);
+      } catch (InterruptedException e) {
+        executor.shutdownNow();
+        displayError(e);
+        Thread.currentThread().interrupt();
+      }
+    }
 
     private void setNumberThreads(String numberThreadsString) {
       if (numberThreadsString == null) {
@@ -323,22 +322,6 @@ class CopyCommands {
       }
     }
 
-    @Override
-    protected void processOptions(LinkedList<String> args) throws IOException {
-      CommandFormat cf =
-          new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d");
-      cf.addOptionWithValue("t");
-      cf.parse(args);
-      setNumberThreads(cf.getOptValue("t"));
-      setOverwrite(cf.getOpt("f"));
-      setPreserve(cf.getOpt("p"));
-      setLazyPersist(cf.getOpt("l"));
-      setDirectWrite(cf.getOpt("d"));
-      getRemoteDestination(args);
-      // should have a -r option
-      setRecursive(true);
-    }
-
     private void copyFile(PathData src, PathData target) throws IOException {
       if (isPathRecursable(src)) {
         throw new PathIsDirectoryException(src.toString());
@@ -365,25 +348,6 @@ class CopyCommands {
       executor.submit(task);
     }
 
-    @Override
-    protected void processArguments(LinkedList<PathData> args)
-        throws IOException {
-      executor = new ThreadPoolExecutor(numThreads, numThreads, 1,
-          TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024),
-          new ThreadPoolExecutor.CallerRunsPolicy());
-      super.processArguments(args);
-
-      // issue the command and then wait for it to finish
-      executor.shutdown();
-      try {
-        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES);
-      } catch (InterruptedException e) {
-        executor.shutdownNow();
-        displayError(e);
-        Thread.currentThread().interrupt();
-      }
-    }
-
     @VisibleForTesting
     public int getNumThreads() {
       return numThreads;
@@ -394,6 +358,12 @@ class CopyCommands {
       return executor;
     }
   }
+
+  public static class CopyFromLocal extends Put {
+    public static final String NAME = "copyFromLocal";
+    public static final String USAGE = Put.USAGE;
+    public static final String DESCRIPTION = "Identical to the -put command.";
+  }
  
   public static class CopyToLocal extends Get {
     public static final String NAME = "copyToLocal";
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 5ef4277..c20293e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.fs.PathExistsException;
-import org.apache.hadoop.fs.shell.CopyCommands.Put;
+import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal;
 
 /** Various commands for moving files */
 @InterfaceAudience.Private
@@ -41,12 +41,22 @@ class MoveCommands {
   /**
    *  Move local files to a remote filesystem
    */
-  public static class MoveFromLocal extends Put {
+  public static class MoveFromLocal extends CopyFromLocal {
     public static final String NAME = "moveFromLocal";
-    public static final String USAGE = "<localsrc> ... <dst>";
+    public static final String USAGE =
+        "[-f] [-p] [-l] [-d] <localsrc> ... <dst>";
     public static final String DESCRIPTION = 
-      "Same as -put, except that the source is " +
-      "deleted after it's copied.";
+        "Same as -put, except that the source is " +
+        "deleted after it's copied\n" +
+        "and -t option has not yet implemented.";
+
+    @Override
+    protected void processOptions(LinkedList<String> args) throws IOException {
+      if(args.contains("-t")) {
+        throw new CommandFormat.UnknownOptionException("-t");
+      }
+      super.processOptions(args);
+    }
 
     @Override
     protected void processPath(PathData src, PathData target) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 71eec75..c67460a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -506,7 +506,7 @@ Returns 0 on success and -1 on error.
 put
 ---
 
-Usage: `hadoop fs -put  [-f] [-p] [-l] [-d] [ - | <localsrc1>  .. ]. <dst>`
+Usage: `hadoop fs -put  [-f] [-p] [-l] [-d] [-t <thread count>] [ - | <localsrc1>  .. ]. <dst>`
 
 Copy single src, or multiple srcs from local file system to the destination file system.
 Also reads input from stdin and writes to destination file system if the source is set to "-"
@@ -518,6 +518,8 @@ Options:
 * `-p` : Preserves access and modification times, ownership and the permissions.
 (assuming the permissions can be propagated across filesystems)
 * `-f` : Overwrites the destination if it already exists.
+* `-t <thread count>` : Number of threads to be used, default is 1. Useful
+ when uploading a directory containing more than 1 file.
 * `-l` : Allow DataNode to lazily persist the file to disk, Forces a replication
  factor of 1. This flag will result in reduced durability. Use with care.
 * `-d` : Skip creation of temporary file with the suffix `._COPYING_`.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java
index 94930e5..e2a124e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.PathExistsException;
+import org.apache.hadoop.fs.shell.CommandFormat.UnknownOptionException;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -93,6 +94,12 @@ public class TestMove {
     assertTrue("Rename should have failed with path exists exception",
                          cmd.error instanceof PathExistsException);
   }
+
+  @Test(expected = UnknownOptionException.class)
+  public void testMoveFromLocalDoesNotAllowTOption() {
+    new MoveCommands.MoveFromLocal().run("-t", "2",
+        null, null);
+  }
     
   static class MockFileSystem extends FilterFileSystem {
     Configuration conf;
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 6a3d53a..84a3200 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -496,7 +496,10 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-put \[-f\] \[-p\] \[-l\] \[-d\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :( )*</expected-output>
+          <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^-put \[-f\] \[-p\] \[-l\] \[-d\] \[-t &lt;thread count&gt;\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
+          </comparator>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -512,15 +515,19 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-p  Preserves access and modification times, ownership and the mode.( )*</expected-output>
+          <expected-output>^\s*-p                 Preserves timestamps, ownership and the mode.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-f                 Overwrites the destination if it already exists.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-f  Overwrites the destination if it already exists.( )*</expected-output>
+          <expected-output>^\s*-t &lt;thread count&gt;  Number of threads to be used, default is 1.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-l  Allow DataNode to lazily persist the file to disk. Forces( )*</expected-output>
+          <expected-output>^\s*-l                 Allow DataNode to lazily persist the file to disk. Forces( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -532,7 +539,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-d  Skip creation of temporary file\(&lt;dst&gt;\._COPYING_\).( )*</expected-output>
+          <expected-output>^\s*-d                 Skip creation of temporary file\(&lt;dst&gt;\._COPYING_\).( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -551,47 +558,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*Copy files from the local file system into fs.( )*Copying fails if the file already( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^\s*exists, unless the -f flag is given.( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^\s*Flags:( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^\s*-p                 Preserves access and modification times, ownership and the( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^\s*mode.( )*</expected-output>
-        </comparator>
-        <comparator>
-           <type>RegexpComparator</type>
-           <expected-output>^\s*-f                 Overwrites the destination if it already exists.( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^\s*-t &lt;thread count&gt;  Number of threads to be used, default is 1.( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^\s*-l                 Allow DataNode to lazily persist the file to disk. Forces( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^\s*replication factor of 1. This flag will result in reduced( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^\s*durability. Use with care.( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^\s*-d                 Skip creation of temporary file\(&lt;dst&gt;\._COPYING_\).( )*</expected-output>
+          <expected-output>^\s*Identical to the -put command\.\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -606,11 +573,14 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-moveFromLocal &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
+          <expected-output>^-moveFromLocal \[-f\] \[-p\] \[-l\] \[-d\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*Same as -put, except that the source is deleted after it's copied.</expected-output>
+          <expected-output>^( |\t)*Same as -put, except that the source is deleted after it's copied</expected-output>
+        </comparator><comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s* and -t option has not yet implemented.</expected-output>
         </comparator>
       </comparators>
     </test>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 03/03: HDFS-15386. ReplicaNotFoundException keeps happening in DN after removing multiple DN's data directories (#2052)

Posted by so...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit fe89969a9de42be8c7b91048aeb69a8c725515ac
Author: Toshihiro Suzuki <br...@gmail.com>
AuthorDate: Fri Jun 5 19:11:49 2020 +0900

    HDFS-15386. ReplicaNotFoundException keeps happening in DN after removing multiple DN's data directories (#2052)
    
    Contributed by Toshihiro Suzuki.
    
    (cherry picked from commit 545a0a147c5256c44911ba57b4898e01d786d836)
    
     Conflicts:
    	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
    
    (cherry picked from commit e5a02a7499cb124e166fc2765c082efe3b92fe29)
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java     |   5 +-
 .../datanode/fsdataset/impl/TestFsDatasetImpl.java | 100 +++++++++++++++++++--
 2 files changed, 95 insertions(+), 10 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 8ad83c8..2bcb524 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -557,7 +557,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           // Unlike updating the volumeMap in addVolume(), this operation does
           // not scan disks.
           for (String bpid : volumeMap.getBlockPoolList()) {
-            List<ReplicaInfo> blocks = new ArrayList<>();
+            List<ReplicaInfo> blocks = blkToInvalidate
+                .computeIfAbsent(bpid, (k) -> new ArrayList<>());
             for (Iterator<ReplicaInfo> it =
                   volumeMap.replicas(bpid).iterator(); it.hasNext();) {
               ReplicaInfo block = it.next();
@@ -570,9 +571,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
                 it.remove();
               }
             }
-            blkToInvalidate.put(bpid, blocks);
           }
-
           storageToRemove.add(sd.getStorageUuid());
           storageLocationsToRemove.remove(sdLocation);
         }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 865487e..db22d83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -96,6 +96,8 @@ import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import org.slf4j.Logger;
@@ -253,16 +255,24 @@ public class TestFsDatasetImpl {
   }
 
   @Test(timeout = 30000)
-  public void testRemoveVolumes() throws IOException {
+  public void testRemoveOneVolume() throws IOException {
     // Feed FsDataset with block metadata.
-    final int NUM_BLOCKS = 100;
-    for (int i = 0; i < NUM_BLOCKS; i++) {
-      String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
+    final int numBlocks = 100;
+    for (int i = 0; i < numBlocks; i++) {
+      String bpid = BLOCK_POOL_IDS[numBlocks % BLOCK_POOL_IDS.length];
       ExtendedBlock eb = new ExtendedBlock(bpid, i);
-      try (ReplicaHandler replica =
-          dataset.createRbw(StorageType.DEFAULT, null, eb, false)) {
+      ReplicaHandler replica = null;
+      try {
+        replica = dataset.createRbw(StorageType.DEFAULT, null, eb,
+            false);
+      } finally {
+        if (replica != null) {
+          replica.close();
+        }
       }
     }
+
+    // Remove one volume
     final String[] dataDirs =
         conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
     final String volumePathToRemove = dataDirs[0];
@@ -285,6 +295,11 @@ public class TestFsDatasetImpl {
     assertEquals("The volume has been removed from the storageMap.",
         expectedNumVolumes, dataset.storageMap.size());
 
+    // DataNode.notifyNamenodeDeletedBlock() should be called 50 times
+    // as we deleted one volume that has 50 blocks
+    verify(datanode, times(50))
+        .notifyNamenodeDeletedBlock(any(), any());
+
     try {
       dataset.asyncDiskService.execute(volumeToRemove,
           new Runnable() {
@@ -302,10 +317,81 @@ public class TestFsDatasetImpl {
       totalNumReplicas += dataset.volumeMap.size(bpid);
     }
     assertEquals("The replica infos on this volume has been removed from the "
-                 + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES,
+                 + "volumeMap.", numBlocks / NUM_INIT_VOLUMES,
                  totalNumReplicas);
   }
 
+  @Test(timeout = 30000)
+  public void testRemoveTwoVolumes() throws IOException {
+    // Feed FsDataset with block metadata.
+    final int numBlocks = 100;
+    for (int i = 0; i < numBlocks; i++) {
+      String bpid = BLOCK_POOL_IDS[numBlocks % BLOCK_POOL_IDS.length];
+      ExtendedBlock eb = new ExtendedBlock(bpid, i);
+      ReplicaHandler replica = null;
+      try {
+        replica = dataset.createRbw(StorageType.DEFAULT, null, eb,
+            false);
+      } finally {
+        if (replica != null) {
+          replica.close();
+        }
+      }
+    }
+
+    // Remove two volumes
+    final String[] dataDirs =
+        conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
+    Set<StorageLocation> volumesToRemove = new HashSet<>();
+    volumesToRemove.add(StorageLocation.parse(dataDirs[0]));
+    volumesToRemove.add(StorageLocation.parse(dataDirs[1]));
+
+    FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
+    Set<FsVolumeImpl> volumes = new HashSet<>();
+    for (FsVolumeSpi vol: volReferences) {
+      for (StorageLocation volume : volumesToRemove) {
+        if (vol.getStorageLocation().equals(volume)) {
+          volumes.add((FsVolumeImpl) vol);
+        }
+      }
+    }
+    assertEquals(2, volumes.size());
+    volReferences.close();
+
+    dataset.removeVolumes(volumesToRemove, true);
+    int expectedNumVolumes = dataDirs.length - 2;
+    assertEquals("The volume has been removed from the volumeList.",
+        expectedNumVolumes, getNumVolumes());
+    assertEquals("The volume has been removed from the storageMap.",
+        expectedNumVolumes, dataset.storageMap.size());
+
+    // DataNode.notifyNamenodeDeletedBlock() should be called 100 times
+    // as we deleted 2 volumes that have 100 blocks totally
+    verify(datanode, times(100))
+        .notifyNamenodeDeletedBlock(any(), any());
+
+    for (FsVolumeImpl volume : volumes) {
+      try {
+        dataset.asyncDiskService.execute(volume,
+            new Runnable() {
+              @Override
+              public void run() {}
+            });
+        fail("Expect RuntimeException: the volume has been removed from the "
+            + "AsyncDiskService.");
+      } catch (RuntimeException e) {
+        GenericTestUtils.assertExceptionContains("Cannot find volume", e);
+      }
+    }
+
+    int totalNumReplicas = 0;
+    for (String bpid : dataset.volumeMap.getBlockPoolList()) {
+      totalNumReplicas += dataset.volumeMap.size(bpid);
+    }
+    assertEquals("The replica infos on this volume has been removed from the "
+        + "volumeMap.", 0, totalNumReplicas);
+  }
+
   @Test(timeout = 5000)
   public void testRemoveNewlyAddedVolume() throws IOException {
     final int numExistingVolumes = getNumVolumes();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/03: Revert "HDFS-13179. TestLazyPersistReplicaRecovery#testDnRestartWithSavedReplicas fails intermittently. Contributed by Ahmed Hussein."

Posted by so...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b7b3164ce7cb547ee615a910443240e1e1f9a0ed
Author: S O'Donnell <so...@cloudera.com>
AuthorDate: Fri Jun 5 12:48:07 2020 +0100

    Revert "HDFS-13179. TestLazyPersistReplicaRecovery#testDnRestartWithSavedReplicas fails intermittently. Contributed by Ahmed Hussein."
    
    This reverts commit ed59412434027b133f2774af69228510f09a0264.
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java     |  5 ---
 .../impl/TestLazyPersistReplicaRecovery.java       | 41 +++-------------------
 2 files changed, 5 insertions(+), 41 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 2928a52..8ad83c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -3207,11 +3207,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   }
 
   @VisibleForTesting
-  public int getNonPersistentReplicas() {
-    return ramDiskReplicaTracker.numReplicasNotPersisted();
-  }
-
-  @VisibleForTesting
   public void setTimer(Timer newTimer) {
     this.timer = newTimer;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
index 5fa470c..537f9e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
@@ -19,13 +19,6 @@
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.client.BlockReportOptions;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
@@ -34,7 +27,6 @@ import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
-import static org.junit.Assert.assertTrue;
 
 public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
   @Test
@@ -42,10 +34,6 @@ public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
       throws IOException, InterruptedException, TimeoutException {
 
     getClusterBuilder().build();
-    FSNamesystem fsn = cluster.getNamesystem();
-    final DataNode dn = cluster.getDataNodes().get(0);
-    DatanodeDescriptor dnd =
-        NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
 
@@ -54,17 +42,14 @@ public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
 
     // Sleep for a short time to allow the lazy writer thread to do its job.
     // However the block replica should not be evicted from RAM_DISK yet.
-    FsDatasetImpl fsDImpl = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn);
-    GenericTestUtils
-        .waitFor(() -> fsDImpl.getNonPersistentReplicas() == 0, 10,
-            3 * LAZY_WRITER_INTERVAL_SEC * 1000);
+    Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
     ensureFileReplicasOnStorageType(path1, RAM_DISK);
 
     LOG.info("Restarting the DataNode");
-    assertTrue("DN did not restart properly",
-        cluster.restartDataNode(0, true));
-    // wait for blockreport
-    waitForBlockReport(dn, dnd);
+    cluster.restartDataNode(0, true);
+    cluster.waitActive();
+    triggerBlockReport();
+
     // Ensure that the replica is now on persistent storage.
     ensureFileReplicasOnStorageType(path1, DEFAULT);
   }
@@ -88,20 +73,4 @@ public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
     // Ensure that the replica is still on transient storage.
     ensureFileReplicasOnStorageType(path1, RAM_DISK);
   }
-
-  private boolean waitForBlockReport(final DataNode dn,
-      final DatanodeDescriptor dnd) throws IOException, InterruptedException {
-    final DatanodeStorageInfo storage = dnd.getStorageInfos()[0];
-    final long lastCount = storage.getBlockReportCount();
-    dn.triggerBlockReport(
-        new BlockReportOptions.Factory().setIncremental(false).build());
-    try {
-      GenericTestUtils
-          .waitFor(() -> lastCount != storage.getBlockReportCount(), 10, 10000);
-    } catch (TimeoutException te) {
-      LOG.error("Timeout waiting for block report for {}", dnd);
-      return false;
-    }
-    return true;
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org