You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-issues@hadoop.apache.org by GitBox <gi...@apache.org> on 2020/08/22 11:56:29 UTC

[GitHub] [hadoop] steveloughran commented on a change in pull request #2235: HDFS-15484 Add new method batchRename for DistributedFileSystem and W…

steveloughran commented on a change in pull request #2235:
URL: https://github.com/apache/hadoop/pull/2235#discussion_r475080490



##########
File path: hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BatchOpsException.java
##########
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+
+/**
+ * Thrown when break during a batch operation .
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BatchOpsException extends IOException {
+  private static final long serialVersionUID = 1L;
+  private static final String TAG_INDEX = "index";
+  private static final String TAG_TOTAL = "total";
+  private static final String TAG_REASON = "reason";
+
+  /**
+   * Used by RemoteException to instantiate an BatchOpsException.
+   */
+  public BatchOpsException(String msg) {
+    super(msg);
+  }
+
+  public BatchOpsException(long index, long total, Throwable cause) {
+    this(index, total,
+        cause.getClass().getName() + ": " + cause.getMessage());
+  }
+
+  public BatchOpsException(long index, long total,
+                           String cause) {
+    super("Batch operation break! " +

Review comment:
       not sure about "break!"

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchRename.java
##########
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BatchOperations;
+import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BatchOpsException;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestBatchRename {
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static DistributedFileSystem dfs;
+  private static WebHdfsFileSystem webHdfs;
+  private Path root = new Path("/test/batchrename/");
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1)
+        .build();
+    dfs = cluster.getFileSystem();
+
+    webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+        WebHdfsConstants.WEBHDFS_SCHEME);
+  }
+
+  @AfterClass
+  public static void afterClass() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testhasPathCapability() throws Exception {
+    assertTrue("DistributedFileSystem should has batch rename capbility",
+        dfs.hasPathCapability(root, "fs.capability.batch.rename"));
+  }
+
+  private List<String> generateBatchFiles(
+      int totalNum, int createNum, final Path dir, String tag)
+      throws IOException {
+    List<String> files = new ArrayList<>();
+    for (int i = 0; i < totalNum; i++) {
+      Path p = new Path(dir, tag + "_" + i);
+      if (createNum-- > 0) {
+        DFSTestUtil.createFile(dfs, p, 10, (short) 1, 0);
+        assertTrue(dfs.exists(p));
+      } else {
+        assertFalse(dfs.exists(p));
+      }
+      files.add(p.toString());
+    }
+    return files;
+  }
+
+  private void testBatchRename(BatchOperations batchFS) throws Exception {
+    Path testDir = new Path(root, "testBatchRename");
+    assertTrue(dfs.mkdirs(testDir));
+
+    List<String> srcs = generateBatchFiles(
+        2, 2, testDir, "src");
+    List<String> dsts =generateBatchFiles(
+        2, 0, testDir, "dst");
+
+    batchFS.batchRename(
+        srcs.toArray(new String[srcs.size()]),
+        dsts.toArray(new String[dsts.size()]));
+
+    for (String f : srcs) {
+      assertFalse(dfs.exists(new Path(f)));
+    }
+    for (String f : dsts) {
+      assertTrue(dfs.exists(new Path(f)));
+      dfs.delete(new Path(f), true);
+    }
+  }
+
+  @Test
+  public void testBatchRaname() throws Exception {
+    testBatchRename(dfs);
+    testBatchRename(webHdfs);
+  }
+
+  private void testInvalidInput(BatchOperations batchFS) throws Exception {
+    List<String> srcs = new ArrayList<>();
+    srcs.add("/testInvalidInput_Mismatch");
+    List<String> dsts = new ArrayList<>();
+    LambdaTestUtils.intercept(InvalidPathException.class,
+        "mismatch batch path",
+        () -> batchFS.batchRename(
+            srcs.toArray(new String[srcs.size()]),
+            dsts.toArray(new String[dsts.size()])));
+  }
+
+  @Test
+  public void testInvalidInput() throws Exception {
+    testInvalidInput(dfs);
+    testInvalidInput(webHdfs);
+  }
+
+   // rename /src_1:/src_2(not existing) to /dst_1:/dst_2
+  private void testPartialSuccess1(BatchOperations batchFS) throws Exception {
+    Path testDir = new Path(root, "partial_success");
+    assertTrue(dfs.mkdirs(testDir));
+
+    List<String> srcs =  generateBatchFiles(
+        2, 1, testDir, "src");
+    List<String> dsts = generateBatchFiles(
+        2, 0, testDir, "dst");
+    try {
+      batchFS.batchRename(
+          srcs.toArray(new String[srcs.size()]),
+          dsts.toArray(new String[dsts.size()]));
+    } catch (BatchOpsException e) {
+      long index = e.getIndex();
+      assertEquals(1, index);
+      long total = e.getTotal();
+      assertEquals(2, total);
+
+      String reason = e.getReason();
+      assertTrue(reason.contains("FileNotFoundException"));
+
+      for (int i = 0; i < index; i++) {
+        Path p = new Path(testDir, "src_" + i);
+        assertFalse(dfs.exists(p));
+      }
+      for (int i = 0; i < index; i++) {
+        Path p = new Path(testDir, "dst_" + i);
+        assertTrue(dfs.exists(p));
+        dfs.delete(p, true);
+      }
+    }
+  }
+
+   // rename src_1:src_1/subdir to /dst_1:/dst_2
+  private void testPartialSuccess2(BatchOperations batchFS) throws Exception {
+    Path testDir = new Path(root, "partial_success");
+    List<String> srcs = new ArrayList<>();
+    Path src1 = new Path(testDir, "src_1");
+    assertTrue(dfs.mkdirs(src1));
+    srcs.add(src1.toString());
+    Path src1Subdir = new Path(src1, "subdir");
+    assertTrue(dfs.mkdirs(src1Subdir));
+    srcs.add(src1Subdir.toString());
+
+    List<String> dsts = generateBatchFiles(
+        2, 0, testDir, "dst");
+    try {
+      batchFS.batchRename(
+          srcs.toArray(new String[srcs.size()]),
+          dsts.toArray(new String[dsts.size()]));
+    } catch (BatchOpsException e) {
+      long index = e.getIndex();
+      assertEquals(1, index);
+      long total = e.getTotal();
+      assertEquals(2, total);
+      String reason = e.getReason();
+      assertTrue(reason.contains("FileNotFoundException"));
+      for (int i = 0; i < index; i++) {
+        Path p = new Path(testDir, "src_" + i);
+        assertFalse(dfs.exists(p));
+      }
+      for (int i = 0; i < index; i++) {
+        Path p = new Path(testDir, "dst_" + i);
+        assertTrue(dfs.exists(p));
+        dfs.delete(p, true);
+      }
+    }
+  }
+
+  // rename src_1:src_2 /dst_1:/dst_1
+  private void testPartialSuccess3(BatchOperations batchFS) throws Exception {
+    Path testDir = new Path(root, "partial_success_3");
+    List<String> srcs =  generateBatchFiles(
+        2, 2, testDir, "src");
+    List<String> dsts = generateBatchFiles(
+        1, 0, testDir, "dst");
+    dsts.add(dsts.get(0));
+
+    try {
+      batchFS.batchRename(
+          srcs.toArray(new String[srcs.size()]),
+          dsts.toArray(new String[dsts.size()]));
+    } catch (BatchOpsException e) {
+      long index = e.getIndex();
+      assertEquals(1, index);

Review comment:
       all tests to add a message about what a failure means. Imagine you have a yetus test run failure and are trying to debug from the log only
   

##########
File path: hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
##########
@@ -1609,6 +1610,26 @@ public void rename(String src, String dst, Options.Rename... options)
     }
   }
 
+  /**
+   * Rename a batch files or directories.
+   * @see ClientProtocol#batchRename(String[] , String[], Options.Rename...)
+   */
+  public void batchRename(String[] srcs, String[] dsts,

Review comment:
       better as a list of <src, dest> pairs, so it's obvious about the mapping. Add javadocs

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchRename.java
##########
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BatchOperations;
+import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BatchOpsException;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestBatchRename {

Review comment:
       extend AbstractHadoopTestBasefor its setp (thread naming, timeout)
   

##########
File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java
##########
@@ -139,4 +139,11 @@ private CommonPathCapabilities() {
   public static final String FS_MULTIPART_UPLOADER =
       "fs.capability.multipart.uploader";
 
+  /**
+   * Does the store support multipart uploading?

Review comment:
       fix

##########
File path: hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BatchOpsException.java
##########
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+
+/**
+ * Thrown when break during a batch operation .
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BatchOpsException extends IOException {
+  private static final long serialVersionUID = 1L;
+  private static final String TAG_INDEX = "index";
+  private static final String TAG_TOTAL = "total";
+  private static final String TAG_REASON = "reason";
+
+  /**
+   * Used by RemoteException to instantiate an BatchOpsException.
+   */
+  public BatchOpsException(String msg) {
+    super(msg);
+  }
+
+  public BatchOpsException(long index, long total, Throwable cause) {
+    this(index, total,
+        cause.getClass().getName() + ": " + cause.getMessage());

Review comment:
       cause.toString(); message may be null
   

##########
File path: hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BatchOpsException.java
##########
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+
+/**
+ * Thrown when break during a batch operation .
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BatchOpsException extends IOException {
+  private static final long serialVersionUID = 1L;

Review comment:
       needs a real serial version ID; your IDE can help there
   

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchRename.java
##########
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;

Review comment:
       sparate import ordering as usual

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchRename.java
##########
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BatchOperations;
+import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BatchOpsException;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestBatchRename {
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static DistributedFileSystem dfs;
+  private static WebHdfsFileSystem webHdfs;
+  private Path root = new Path("/test/batchrename/");
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1)
+        .build();
+    dfs = cluster.getFileSystem();
+
+    webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+        WebHdfsConstants.WEBHDFS_SCHEME);
+  }
+
+  @AfterClass
+  public static void afterClass() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testhasPathCapability() throws Exception {
+    assertTrue("DistributedFileSystem should has batch rename capbility",
+        dfs.hasPathCapability(root, "fs.capability.batch.rename"));

Review comment:
       refer to constant
   

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchRename.java
##########
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BatchOperations;
+import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BatchOpsException;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestBatchRename {
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static DistributedFileSystem dfs;
+  private static WebHdfsFileSystem webHdfs;
+  private Path root = new Path("/test/batchrename/");
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1)
+        .build();
+    dfs = cluster.getFileSystem();
+
+    webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+        WebHdfsConstants.WEBHDFS_SCHEME);
+  }
+
+  @AfterClass
+  public static void afterClass() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testhasPathCapability() throws Exception {
+    assertTrue("DistributedFileSystem should has batch rename capbility",
+        dfs.hasPathCapability(root, "fs.capability.batch.rename"));
+  }
+
+  private List<String> generateBatchFiles(
+      int totalNum, int createNum, final Path dir, String tag)
+      throws IOException {
+    List<String> files = new ArrayList<>();
+    for (int i = 0; i < totalNum; i++) {
+      Path p = new Path(dir, tag + "_" + i);
+      if (createNum-- > 0) {
+        DFSTestUtil.createFile(dfs, p, 10, (short) 1, 0);
+        assertTrue(dfs.exists(p));

Review comment:
       ContractTestUtils assertFile() (or similar)
   

##########
File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchOperations.java
##########
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+
+/**
+ * Interface filesystems MAY implement to offer a batched operations.
+ */
+
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public interface BatchOperations {

Review comment:
       BatchRename

##########
File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchOperations.java
##########
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+
+/**
+ * Interface filesystems MAY implement to offer a batched operations.
+ */
+
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public interface BatchOperations {
+
+  /**
+   * Batched rename API that rename a batch of files.
+   *
+   * @param srcs source file list.
+   * @param dsts target file list.
+   * @throws IOException failure exception.
+   */
+   void batchRename(String[] srcs, String[] dsts, Options.Rename... options)

Review comment:
       Prefer list or array of pairs. We don't have any pair type in hadoop here and can't use commons-lang as we don't want that in our public API. Maybe we should add one to org.apache.hadoop.common.utils and use it here amongst other places. I could certainly use it (I may be able to add this to HADOOP-16830 for you to pick up)
   
   Return a future<RenameResult> where we define RenameResult as something (class/interface) which implements IOStatisticsSource.

##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
##########
@@ -732,6 +732,14 @@ protected Response put(
         return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
       }
     }
+    case BATCH_RENAME:
+    {
+      validateOpParams(op, destination);
+      final EnumSet<Options.Rename> s = renameOptions.getValue();
+      cp.batchRename(fullpath.split(":"), destination.getValue().split(":"),

Review comment:
       what will be done here if there's a ":" in paths
   




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-issues-help@hadoop.apache.org