You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pinot.apache.org by jl...@apache.org on 2019/01/10 00:05:26 UTC

[incubator-pinot] branch pinotfs-copy-ensure-parent-dir-exist updated (5a78693 -> 737b962)

This is an automated email from the ASF dual-hosted git repository.

jlli pushed a change to branch pinotfs-copy-ensure-parent-dir-exist
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git.


 discard 5a78693  Make different PinotFSes have the same behaviors
     new 737b962  Make different PinotFSes have the same behaviors

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (5a78693)
            \
             N -- N -- N   refs/heads/pinotfs-copy-ensure-parent-dir-exist (737b962)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../linkedin/pinot/filesystem/LocalPinotFS.java    |  3 +-
 .../com/linkedin/pinot/filesystem/PinotFS.java     |  2 +-
 .../pinot/filesystem/LocalPinotFSTest.java         | 18 +++++++---
 .../linkedin/pinot/filesystem/HadoopPinotFS.java   | 42 +++++++++++++++++-----
 .../pinot/filesystem/HadoopPinotFSTest.java        | 18 +++++++---
 5 files changed, 62 insertions(+), 21 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org


[incubator-pinot] 01/01: Make different PinotFSes have the same behaviors

Posted by jl...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jlli pushed a commit to branch pinotfs-copy-ensure-parent-dir-exist
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git

commit 737b962587c682805b6032442675916d807ef056
Author: Jack Li(Analytics Engineering) <jl...@jlli-mn1.linkedin.biz>
AuthorDate: Wed Jan 9 14:28:37 2019 -0800

    Make different PinotFSes have the same behaviors
---
 .../linkedin/pinot/filesystem/LocalPinotFS.java    |   3 +-
 .../com/linkedin/pinot/filesystem/PinotFS.java     |   6 +-
 .../pinot/filesystem/LocalPinotFSTest.java         |  18 ++-
 .../linkedin/pinot/filesystem/HadoopPinotFS.java   | 119 ++++++++++++---
 .../pinot/filesystem/HadoopPinotFSTest.java        | 167 +++++++++++----------
 5 files changed, 204 insertions(+), 109 deletions(-)

diff --git a/pinot-filesystem/src/main/java/com/linkedin/pinot/filesystem/LocalPinotFS.java b/pinot-filesystem/src/main/java/com/linkedin/pinot/filesystem/LocalPinotFS.java
index 7ff2c19..a10cf7c 100644
--- a/pinot-filesystem/src/main/java/com/linkedin/pinot/filesystem/LocalPinotFS.java
+++ b/pinot-filesystem/src/main/java/com/linkedin/pinot/filesystem/LocalPinotFS.java
@@ -94,7 +94,8 @@ public class LocalPinotFS extends PinotFS {
   public boolean copy(URI srcUri, URI dstUri) throws IOException {
     File srcFile = new File(decodeURI(srcUri.getRawPath()));
     File dstFile = new File(decodeURI(dstUri.getRawPath()));
-    if (dstFile.exists()) {
+    // delete dst only if dst isn't under src.
+    if (!dstFile.getCanonicalPath().startsWith(srcFile.getCanonicalPath()) && dstFile.exists()) {
       FileUtils.deleteQuietly(dstFile);
     }
     if (srcFile.isDirectory()) {
diff --git a/pinot-filesystem/src/main/java/com/linkedin/pinot/filesystem/PinotFS.java b/pinot-filesystem/src/main/java/com/linkedin/pinot/filesystem/PinotFS.java
index f5ca54c..b808d5e 100644
--- a/pinot-filesystem/src/main/java/com/linkedin/pinot/filesystem/PinotFS.java
+++ b/pinot-filesystem/src/main/java/com/linkedin/pinot/filesystem/PinotFS.java
@@ -68,7 +68,7 @@ public abstract class PinotFS implements Closeable {
 
   /**
    * Same as move except the srcUri is not retained. For example, if x/y/z is copied to a/b/c, x/y/z will be retained
-   * and x/y/z will also be present as a/b/c
+   * and x/y/z will also be present as a/b/c; if x is copied to x/y, all the original files under x/y will be kept.
    * @param srcUri URI of the original file
    * @param dstUri URI of the final file location
    * @return true if copy is successful
@@ -95,7 +95,7 @@ public abstract class PinotFS implements Closeable {
   public abstract long length(URI fileUri) throws IOException;
 
   /**
-   * Lists all the files at the location provided. Lists recursively.
+   * Lists all the files and directories at the location provided. Lists recursively.
    * Throws exception if this abstract pathname is not valid, or if
    * an I/O error occurs.
    * @param fileUri location of file
@@ -130,7 +130,7 @@ public abstract class PinotFS implements Closeable {
    * @return true if uri is a directory, false otherwise.
    * @throws Exception if uri is not valid or present
    */
-  public abstract boolean isDirectory(URI uri);
+  public abstract boolean isDirectory(URI uri) throws IOException;
 
   /**
    * Returns the age of the file
diff --git a/pinot-filesystem/src/test/java/com/linkedin/pinot/filesystem/LocalPinotFSTest.java b/pinot-filesystem/src/test/java/com/linkedin/pinot/filesystem/LocalPinotFSTest.java
index 665aa2d..d53881a 100644
--- a/pinot-filesystem/src/test/java/com/linkedin/pinot/filesystem/LocalPinotFSTest.java
+++ b/pinot-filesystem/src/test/java/com/linkedin/pinot/filesystem/LocalPinotFSTest.java
@@ -110,8 +110,9 @@ public class LocalPinotFSTest {
     localPinotFS.move(newAbsoluteTempDirPath.toURI(), newAbsoluteTempDirPath3.toURI(), true);
     Assert.assertFalse(localPinotFS.exists(newAbsoluteTempDirPath.toURI()));
     Assert.assertTrue(localPinotFS.exists(newAbsoluteTempDirPath3.toURI()));
-    Assert.assertTrue(localPinotFS.exists(new File(newAbsoluteTempDirPath3, "testDir").toURI()));
-    Assert.assertTrue(localPinotFS.exists(new File(new File(newAbsoluteTempDirPath3, "testDir"), "testFile").toURI()));
+    File testDirUnderNewAbsoluteTempDirPath3 = new File(newAbsoluteTempDirPath3, "testDir");
+    Assert.assertTrue(localPinotFS.exists(testDirUnderNewAbsoluteTempDirPath3.toURI()));
+    Assert.assertTrue(localPinotFS.exists(new File(testDirUnderNewAbsoluteTempDirPath3, "testFile").toURI()));
 
     // Check file copy to location where something already exists still works
     localPinotFS.copy(testFileUri, thirdTestFile.toURI());
@@ -119,6 +120,14 @@ public class LocalPinotFSTest {
     Assert.assertEquals(0, localPinotFS.length(secondTestFileUri));
     Assert.assertTrue(localPinotFS.exists(thirdTestFile.toURI()));
 
+    // Check that destination being directory within the source directory still works
+    File anotherFileUnderAbsoluteThreeDir = new File(newAbsoluteTempDirPath3, "anotherFile");
+    Assert.assertFalse(localPinotFS.exists(anotherFileUnderAbsoluteThreeDir.toURI()));
+    Assert.assertTrue(anotherFileUnderAbsoluteThreeDir.createNewFile());
+    Assert.assertTrue(localPinotFS.exists(anotherFileUnderAbsoluteThreeDir.toURI()));
+    localPinotFS.copy(newAbsoluteTempDirPath3.toURI(), testDirUnderNewAbsoluteTempDirPath3.toURI());
+    Assert.assertEquals(localPinotFS.listFiles(testDirUnderNewAbsoluteTempDirPath3.toURI(), false).length, 3);
+
     // Check that method deletes dst directory during move and is successful by overwriting dir
     Assert.assertTrue(_newTmpDir.exists());
     // create a file in the dst folder
@@ -146,10 +155,9 @@ public class LocalPinotFSTest {
     Assert.assertTrue(srcFile.exists());
     Assert.assertTrue(dstFile.exists());
 
-    //Check that copying a folder to a non-existent destination folder works
+    // Check that copying a folder to a non-existent destination folder works
     FileUtils.deleteQuietly(_nonExistentTmpFolder);
     Assert.assertFalse(_nonExistentTmpFolder.exists());
-    localPinotFS.mkdir(_absoluteTmpDirPath.toURI());
     dstFile = new File(_nonExistentTmpFolder.getPath() + "/srcFile" );
     Assert.assertFalse(dstFile.exists());
     Assert.assertTrue(localPinotFS.copy(_absoluteTmpDirPath.toURI(), _nonExistentTmpFolder.toURI()));
@@ -169,7 +177,7 @@ public class LocalPinotFSTest {
     Assert.assertFalse(srcFile.exists());
     Assert.assertTrue(dstFile.exists());
 
-    //Check that moving a folder to a non-existent destination folder works
+    // Check that moving a folder to a non-existent destination folder works
     FileUtils.deleteQuietly(_nonExistentTmpFolder);
     Assert.assertFalse(_nonExistentTmpFolder.exists());
     srcFile = new File(_absoluteTmpDirPath, "srcFile");
diff --git a/pinot-hadoop-filesystem/src/main/java/com/linkedin/pinot/filesystem/HadoopPinotFS.java b/pinot-hadoop-filesystem/src/main/java/com/linkedin/pinot/filesystem/HadoopPinotFS.java
index a74f6f2..b0e8f26 100644
--- a/pinot-hadoop-filesystem/src/main/java/com/linkedin/pinot/filesystem/HadoopPinotFS.java
+++ b/pinot-hadoop-filesystem/src/main/java/com/linkedin/pinot/filesystem/HadoopPinotFS.java
@@ -22,12 +22,12 @@ import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 import org.apache.commons.configuration.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -71,6 +71,9 @@ public class HadoopPinotFS extends PinotFS {
 
   @Override
   public boolean delete(URI segmentUri, boolean forceDelete) throws IOException {
+    if (!exists(segmentUri)) {
+      return true;
+    }
     // Returns false if we are moving a directory and that directory is not empty
     if (isDirectory(segmentUri)
         && listFiles(segmentUri, false).length > 0
@@ -82,9 +85,18 @@ public class HadoopPinotFS extends PinotFS {
 
   @Override
   public boolean move(URI srcUri, URI dstUri, boolean overwrite) throws IOException {
-    if (exists(dstUri) && !overwrite) {
+    if (!exists(srcUri)) {
+      LOGGER.warn("Source {} does not exist", srcUri);
       return false;
     }
+    if (exists(dstUri)) {
+      if (!overwrite) {
+        return false;
+      } else {
+        delete(dstUri, true);
+        mkdir(dstUri);
+      }
+    }
     return _hadoopFS.rename(new Path(srcUri), new Path(dstUri));
   }
 
@@ -94,28 +106,82 @@ public class HadoopPinotFS extends PinotFS {
    */
   @Override
   public boolean copy(URI srcUri, URI dstUri) throws IOException {
-    Path source = new Path(srcUri);
-    Path target = new Path(dstUri);
-    RemoteIterator<LocatedFileStatus> sourceFiles = _hadoopFS.listFiles(source, true);
-    if (sourceFiles != null) {
-      while (sourceFiles.hasNext()) {
-        boolean succeeded = FileUtil.copy(_hadoopFS, sourceFiles.next().getPath(), _hadoopFS, target, true, _hadoopConf);
-        if (!succeeded) {
-          return false;
+    if (!exists(srcUri)) {
+      LOGGER.warn("Source {} does not exist", srcUri);
+      return false;
+    }
+    if (srcUri.equals(dstUri)) {
+      LOGGER.info("Source {} and destination {} are the same.", srcUri, dstUri);
+      return true;
+    }
+    if (!dstUri.getRawPath().startsWith(srcUri.getRawPath()) && exists(dstUri)) {
+      delete(dstUri, true);
+    }
+    if (isDirectory(srcUri)) {
+      mkdir(dstUri);
+
+      List<String> exclusionList = null;
+      // Cater for destination being directory within the source directory
+      if (dstUri.getRawPath().startsWith(srcUri.getRawPath())) {
+        FileStatus[] srcFiles = listStatus(new Path(srcUri), false);
+        exclusionList = new ArrayList<>(srcFiles.length);
+        for (FileStatus srcFile : srcFiles) {
+          Path dstPath = new Path(dstUri.getRawPath(), srcFile.getPath().getName());
+          exclusionList.add(dstPath.toString());
         }
       }
+      doCopyDirectory(srcUri, dstUri, exclusionList);
+    } else {
+      doCopyFile(srcUri, dstUri);
     }
     return true;
   }
 
+  /**
+   * Does the actual copy behavior on directory.
+   */
+  private void doCopyDirectory(URI srcUri, URI dstUri, List<String> exclusionList) throws IOException {
+    FileStatus[] srcFiles = listStatus(new Path(srcUri), true);
+    for (FileStatus srcFile : srcFiles) {
+      Path srcPath = srcFile.getPath();
+      Path dstPath = new Path(dstUri.getPath(), srcFile.getPath().getName());
+      if (exclusionList == null || !exclusionList.contains(srcPath.toUri().getRawPath())) {
+        if (isDirectory(srcPath.toUri())) {
+          doCopyDirectory(srcPath.toUri(), dstPath.toUri(), exclusionList);
+        } else {
+          doCopyFile(srcPath.toUri(), dstPath.toUri());
+        }
+      }
+    }
+  }
+
+  /**
+   * Does the actual copy behavior on file.
+   */
+  private boolean doCopyFile(URI srcUri, URI dstUri) throws IOException {
+    Path source = new Path(srcUri);
+    Path target = new Path(dstUri);
+    URI parentUri = target.getParent().toUri();
+    if (!exists(parentUri)) {
+      mkdir(parentUri);
+    }
+    return FileUtil.copy(_hadoopFS, source, _hadoopFS, target, false, _hadoopConf);
+  }
+
+  /**
+   * Check if
+   */
   @Override
   public boolean exists(URI fileUri) throws IOException {
-    return _hadoopFS.exists(new Path(fileUri));
+    return fileUri != null && _hadoopFS.exists(new Path(fileUri));
   }
 
   @Override
   public long length(URI fileUri) throws IOException {
-    return _hadoopFS.getLength(new Path(fileUri));
+    if (isDirectory(fileUri)) {
+      throw new IllegalArgumentException("File is directory");
+    }
+    return _hadoopFS.getFileStatus(new Path(fileUri)).getLen();
   }
 
   @Override
@@ -123,10 +189,10 @@ public class HadoopPinotFS extends PinotFS {
     ArrayList<String> filePathStrings = new ArrayList<>();
     Path path = new Path(fileUri);
     if (_hadoopFS.exists(path)) {
-      RemoteIterator<LocatedFileStatus> fileListItr = _hadoopFS.listFiles(path, recursive);
-      while (fileListItr != null && fileListItr.hasNext()) {
-        LocatedFileStatus file = fileListItr.next();
-        filePathStrings.add(file.getPath().toUri().toString());
+      // _hadoopFS.listFiles(path, false) will not return directories as files, thus use listStatus(path) here.
+      FileStatus[] files = listStatus(path, recursive);
+      for (FileStatus file : files) {
+        filePathStrings.add(file.getPath().toUri().getRawPath());
       }
     } else {
       throw new IllegalArgumentException("segmentUri is not valid");
@@ -136,6 +202,20 @@ public class HadoopPinotFS extends PinotFS {
     return retArray;
   }
 
+  private FileStatus[] listStatus(Path path, boolean recursive) throws IOException {
+    List<FileStatus> fileStatuses = new ArrayList<>();
+    FileStatus[] files = _hadoopFS.listStatus(path);
+    for (FileStatus file : files) {
+      fileStatuses.add(file);
+      if (file.isDirectory() && recursive) {
+        List<FileStatus> subFiles = Arrays.asList(listStatus(file.getPath(), true));
+        fileStatuses.addAll(subFiles);
+      }
+    }
+    FileStatus[] fileStatusesArr = new FileStatus[fileStatuses.size()];
+    return fileStatuses.toArray(fileStatusesArr);
+  }
+
   @Override
   public void copyToLocalFile(URI srcUri, File dstFile) throws Exception {
     LOGGER.debug("starting to fetch segment from hdfs");
@@ -172,9 +252,8 @@ public class HadoopPinotFS extends PinotFS {
   }
 
   @Override
-  public boolean isDirectory(URI uri) {
-    FileStatus fileStatus = new FileStatus();
-    fileStatus.setPath(new Path(uri));
+  public boolean isDirectory(URI uri) throws IOException {
+    FileStatus fileStatus = _hadoopFS.getFileStatus(new Path(uri));
     return fileStatus.isDirectory();
   }
 
diff --git a/pinot-filesystem/src/test/java/com/linkedin/pinot/filesystem/LocalPinotFSTest.java b/pinot-hadoop-filesystem/src/test/java/com/linkedin/pinot/filesystem/HadoopPinotFSTest.java
similarity index 52%
copy from pinot-filesystem/src/test/java/com/linkedin/pinot/filesystem/LocalPinotFSTest.java
copy to pinot-hadoop-filesystem/src/test/java/com/linkedin/pinot/filesystem/HadoopPinotFSTest.java
index 665aa2d..a791a00 100644
--- a/pinot-filesystem/src/test/java/com/linkedin/pinot/filesystem/LocalPinotFSTest.java
+++ b/pinot-hadoop-filesystem/src/test/java/com/linkedin/pinot/filesystem/HadoopPinotFSTest.java
@@ -1,55 +1,44 @@
-/**
- * Copyright (C) 2014-2018 LinkedIn Corp. (pinot-core@linkedin.com)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *         http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package com.linkedin.pinot.filesystem;
 
 import java.io.File;
 import java.io.IOException;
 import java.net.URI;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.PropertiesConfiguration;
 import org.apache.commons.io.FileUtils;
 import org.testng.Assert;
 import org.testng.annotations.AfterClass;
 import org.testng.annotations.BeforeClass;
 import org.testng.annotations.Test;
 
-import static org.testng.Assert.*;
 
-
-public class LocalPinotFSTest {
+public class HadoopPinotFSTest {
   private File testFile;
   private File _absoluteTmpDirPath;
   private File _newTmpDir;
   private File _nonExistentTmpFolder;
+  private HadoopPinotFS _hadoopPinotFS;
 
   @BeforeClass
-  public void setUp() {
-    _absoluteTmpDirPath = new File(System.getProperty("java.io.tmpdir"), LocalPinotFSTest.class.getSimpleName() + "first");
+  public void setup() {
+    _absoluteTmpDirPath =
+        new File(System.getProperty("java.io.tmpdir"), HadoopPinotFSTest.class.getSimpleName() + "first");
     FileUtils.deleteQuietly(_absoluteTmpDirPath);
     Assert.assertTrue(_absoluteTmpDirPath.mkdir(), "Could not make directory " + _absoluteTmpDirPath.getPath());
     try {
       testFile = new File(_absoluteTmpDirPath, "testFile");
       Assert.assertTrue(testFile.createNewFile(), "Could not create file " + testFile.getPath());
+      Assert.assertTrue(testFile.exists());
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
 
-    _newTmpDir = new File(System.getProperty("java.io.tmpdir"), LocalPinotFSTest.class.getSimpleName() + "second");
+    _newTmpDir = new File(System.getProperty("java.io.tmpdir"), HadoopPinotFSTest.class.getSimpleName() + "second");
     FileUtils.deleteQuietly(_newTmpDir);
     Assert.assertTrue(_newTmpDir.mkdir(), "Could not make directory " + _newTmpDir.getPath());
 
-    _nonExistentTmpFolder = new File(System.getProperty("java.io.tmpdir"), LocalPinotFSTest.class.getSimpleName() + "nonExistentParent/nonExistent");
+    _nonExistentTmpFolder = new File(System.getProperty("java.io.tmpdir"),
+        HadoopPinotFSTest.class.getSimpleName() + "nonExistentParent/nonExistent");
 
     _absoluteTmpDirPath.deleteOnExit();
     _newTmpDir.deleteOnExit();
@@ -63,27 +52,34 @@ public class LocalPinotFSTest {
   }
 
   @Test
-  public void testFS() throws Exception {
-    LocalPinotFS localPinotFS = new LocalPinotFS();
-    URI testFileUri = testFile.toURI();
+  public void testHadoopPinotFS() throws Exception {
+    _hadoopPinotFS = new HadoopPinotFS();
+
+    final Configuration conf = new PropertiesConfiguration();
+    _hadoopPinotFS.init(conf);
+
     // Check whether a directory exists
-    Assert.assertTrue(localPinotFS.exists(_absoluteTmpDirPath.toURI()));
-    Assert.assertTrue(localPinotFS.lastModified(_absoluteTmpDirPath.toURI()) > 0L);
-    Assert.assertTrue(localPinotFS.isDirectory(_absoluteTmpDirPath.toURI()));
+    Assert.assertTrue(_hadoopPinotFS.exists(_absoluteTmpDirPath.toURI()));
+    Assert.assertTrue(_hadoopPinotFS.lastModified(_absoluteTmpDirPath.toURI()) > 0L);
+    Assert.assertTrue(_hadoopPinotFS.isDirectory(_absoluteTmpDirPath.toURI()));
+
+    URI testFileUri = testFile.toURI();
     // Check whether a file exists
-    Assert.assertTrue(localPinotFS.exists(testFileUri));
-    Assert.assertFalse(localPinotFS.isDirectory(testFileUri));
+    Assert.assertTrue(_hadoopPinotFS.exists(testFileUri));
+    Assert.assertFalse(_hadoopPinotFS.isDirectory(testFileUri));
 
     File file = new File(_absoluteTmpDirPath, "secondTestFile");
     URI secondTestFileUri = file.toURI();
     // Check that file does not exist
-    Assert.assertTrue(!localPinotFS.exists(secondTestFileUri));
-
-    localPinotFS.copy(testFileUri, secondTestFileUri);
-    Assert.assertEquals(2, localPinotFS.listFiles(_absoluteTmpDirPath.toURI(), true).length);
+    Assert.assertFalse(_hadoopPinotFS.exists(secondTestFileUri));
 
+    String[] files = _hadoopPinotFS.listFiles(_absoluteTmpDirPath.toURI(), true);
+    Assert.assertEquals(files.length, 1);
+    _hadoopPinotFS.copy(testFileUri, secondTestFileUri);
+    files = _hadoopPinotFS.listFiles(_absoluteTmpDirPath.toURI(), true);
+    Assert.assertEquals(files.length, 2);
     // Check file copy worked when file was not created
-    Assert.assertTrue(localPinotFS.exists(secondTestFileUri));
+    Assert.assertTrue(_hadoopPinotFS.exists(secondTestFileUri));
 
     // Create another file in the same path
     File thirdTestFile = new File(_absoluteTmpDirPath, "thirdTestFile");
@@ -98,8 +94,9 @@ public class LocalPinotFSTest {
     File testDirFile = new File(testDir, "testFile");
     // Assert that recursive list files and nonrecursive list files are as expected
     Assert.assertTrue(testDirFile.createNewFile(), "Could not create file " + testDir.getAbsolutePath());
-    Assert.assertEquals(localPinotFS.listFiles(newAbsoluteTempDirPath.toURI(), false), new String[]{testDir.getAbsolutePath()});
-    Assert.assertEquals(localPinotFS.listFiles(newAbsoluteTempDirPath.toURI(), true),
+    Assert.assertEquals(_hadoopPinotFS.listFiles(newAbsoluteTempDirPath.toURI(), false),
+        new String[]{testDir.getAbsolutePath()});
+    Assert.assertEquals(_hadoopPinotFS.listFiles(newAbsoluteTempDirPath.toURI(), true),
         new String[]{testDir.getAbsolutePath(), testDirFile.getAbsolutePath()});
 
     // Create another parent dir so we can test recursive move
@@ -107,52 +104,61 @@ public class LocalPinotFSTest {
     Assert.assertTrue(newAbsoluteTempDirPath3.mkdir());
     Assert.assertEquals(newAbsoluteTempDirPath3.listFiles().length, 0);
 
-    localPinotFS.move(newAbsoluteTempDirPath.toURI(), newAbsoluteTempDirPath3.toURI(), true);
-    Assert.assertFalse(localPinotFS.exists(newAbsoluteTempDirPath.toURI()));
-    Assert.assertTrue(localPinotFS.exists(newAbsoluteTempDirPath3.toURI()));
-    Assert.assertTrue(localPinotFS.exists(new File(newAbsoluteTempDirPath3, "testDir").toURI()));
-    Assert.assertTrue(localPinotFS.exists(new File(new File(newAbsoluteTempDirPath3, "testDir"), "testFile").toURI()));
+    _hadoopPinotFS.move(newAbsoluteTempDirPath.toURI(), newAbsoluteTempDirPath3.toURI(), true);
+    Assert.assertFalse(_hadoopPinotFS.exists(newAbsoluteTempDirPath.toURI()));
+    Assert.assertTrue(_hadoopPinotFS.exists(newAbsoluteTempDirPath3.toURI()));
+    File testDirUnderNewAbsoluteTempDirPath3 = new File(newAbsoluteTempDirPath3, "testDir");
+    Assert.assertTrue(_hadoopPinotFS.exists(testDirUnderNewAbsoluteTempDirPath3.toURI()));
+    Assert.assertTrue(
+        _hadoopPinotFS.exists(new File(testDirUnderNewAbsoluteTempDirPath3, "testFile").toURI()));
 
     // Check file copy to location where something already exists still works
-    localPinotFS.copy(testFileUri, thirdTestFile.toURI());
+    _hadoopPinotFS.copy(testFileUri, thirdTestFile.toURI());
     // Check length of file
-    Assert.assertEquals(0, localPinotFS.length(secondTestFileUri));
-    Assert.assertTrue(localPinotFS.exists(thirdTestFile.toURI()));
+    Assert.assertEquals(_hadoopPinotFS.length(secondTestFileUri), 0);
+    Assert.assertTrue(_hadoopPinotFS.exists(thirdTestFile.toURI()));
+
+    // Check that destination being directory within the source directory still works
+    File anotherFileUnderAbsoluteThreeDir = new File(newAbsoluteTempDirPath3, "anotherFile");
+    Assert.assertFalse(_hadoopPinotFS.exists(anotherFileUnderAbsoluteThreeDir.toURI()));
+    Assert.assertTrue(anotherFileUnderAbsoluteThreeDir.createNewFile());
+    Assert.assertTrue(_hadoopPinotFS.exists(anotherFileUnderAbsoluteThreeDir.toURI()));
+    _hadoopPinotFS.copy(newAbsoluteTempDirPath3.toURI(), testDirUnderNewAbsoluteTempDirPath3.toURI());
+    Assert.assertEquals(_hadoopPinotFS.listFiles(testDirUnderNewAbsoluteTempDirPath3.toURI(), false).length, 3);
 
     // Check that method deletes dst directory during move and is successful by overwriting dir
     Assert.assertTrue(_newTmpDir.exists());
     // create a file in the dst folder
-    File dstFile = new File(_newTmpDir.getPath() + "/newFile" );
+    File dstFile = new File(_newTmpDir.getPath() + "/newFile");
     dstFile.createNewFile();
 
-    // Expected that a move without overwrite will not succeed
-    Assert.assertFalse(localPinotFS.move(_absoluteTmpDirPath.toURI(), _newTmpDir.toURI(), false));
+    // Expected that if the target already exists, a move without overwrite will not succeed
+    Assert.assertFalse(_hadoopPinotFS.move(_absoluteTmpDirPath.toURI(), _newTmpDir.toURI(), false));
 
-    int files = _absoluteTmpDirPath.listFiles().length;
-    Assert.assertTrue(localPinotFS.move(_absoluteTmpDirPath.toURI(), _newTmpDir.toURI(), true));
+    int numFiles = _absoluteTmpDirPath.listFiles().length;
+    Assert.assertTrue(_hadoopPinotFS.move(_absoluteTmpDirPath.toURI(), _newTmpDir.toURI(), true));
     Assert.assertEquals(_absoluteTmpDirPath.length(), 0);
-    Assert.assertEquals(_newTmpDir.listFiles().length, files);
+    Assert.assertEquals(_newTmpDir.listFiles().length, numFiles);
     Assert.assertFalse(dstFile.exists());
 
     // Check that copying a file to a non-existent destination folder will work
     FileUtils.deleteQuietly(_nonExistentTmpFolder);
     Assert.assertFalse(_nonExistentTmpFolder.exists());
     File srcFile = new File(_absoluteTmpDirPath, "srcFile");
-    localPinotFS.mkdir(_absoluteTmpDirPath.toURI());
+    _hadoopPinotFS.mkdir(_absoluteTmpDirPath.toURI());
     Assert.assertTrue(srcFile.createNewFile());
-    dstFile = new File(_nonExistentTmpFolder.getPath() + "/newFile" );
+    dstFile = new File(_nonExistentTmpFolder.getPath() + "/newFile");
     Assert.assertFalse(dstFile.exists());
-    Assert.assertTrue(localPinotFS.copy(srcFile.toURI(), dstFile.toURI()));
+    Assert.assertTrue(_hadoopPinotFS.copy(srcFile.toURI(), dstFile.toURI()));
     Assert.assertTrue(srcFile.exists());
     Assert.assertTrue(dstFile.exists());
 
-    //Check that copying a folder to a non-existent destination folder works
+    // Check that copying a folder to a non-existent destination folder works
     FileUtils.deleteQuietly(_nonExistentTmpFolder);
     Assert.assertFalse(_nonExistentTmpFolder.exists());
-    localPinotFS.mkdir(_absoluteTmpDirPath.toURI());
-    dstFile = new File(_nonExistentTmpFolder.getPath() + "/srcFile" );
+    dstFile = new File(_nonExistentTmpFolder.getPath() + "/srcFile");
     Assert.assertFalse(dstFile.exists());
-    Assert.assertTrue(localPinotFS.copy(_absoluteTmpDirPath.toURI(), _nonExistentTmpFolder.toURI()));
+    Assert.assertTrue(_hadoopPinotFS.copy(_absoluteTmpDirPath.toURI(), _nonExistentTmpFolder.toURI()));
     Assert.assertTrue(dstFile.exists());
     FileUtils.deleteQuietly(srcFile);
     Assert.assertFalse(srcFile.exists());
@@ -161,38 +167,38 @@ public class LocalPinotFSTest {
     FileUtils.deleteQuietly(_nonExistentTmpFolder);
     Assert.assertFalse(_nonExistentTmpFolder.exists());
     srcFile = new File(_absoluteTmpDirPath, "srcFile");
-    localPinotFS.mkdir(_absoluteTmpDirPath.toURI());
+    _hadoopPinotFS.mkdir(_absoluteTmpDirPath.toURI());
     Assert.assertTrue(srcFile.createNewFile());
-    dstFile = new File(_nonExistentTmpFolder.getPath() + "/newFile" );
+    dstFile = new File(_nonExistentTmpFolder.getPath() + "/newFile");
     Assert.assertFalse(dstFile.exists());
-    Assert.assertTrue(localPinotFS.move(srcFile.toURI(), dstFile.toURI(), true)); // overwrite flag has no impact
+    Assert.assertTrue(_hadoopPinotFS.move(srcFile.toURI(), dstFile.toURI(), true)); // overwrite flag has no impact
     Assert.assertFalse(srcFile.exists());
     Assert.assertTrue(dstFile.exists());
 
-    //Check that moving a folder to a non-existent destination folder works
+    // Check that moving a folder to a non-existent destination folder works
     FileUtils.deleteQuietly(_nonExistentTmpFolder);
     Assert.assertFalse(_nonExistentTmpFolder.exists());
     srcFile = new File(_absoluteTmpDirPath, "srcFile");
-    localPinotFS.mkdir(_absoluteTmpDirPath.toURI());
+    _hadoopPinotFS.mkdir(_absoluteTmpDirPath.toURI());
     Assert.assertTrue(srcFile.createNewFile());
-    dstFile = new File(_nonExistentTmpFolder.getPath() + "/srcFile" );
+    dstFile = new File(_nonExistentTmpFolder.getPath() + "/srcFile");
     Assert.assertFalse(dstFile.exists());
-    Assert.assertTrue(localPinotFS.move(_absoluteTmpDirPath.toURI(), _nonExistentTmpFolder.toURI(),
+    Assert.assertTrue(_hadoopPinotFS.move(_absoluteTmpDirPath.toURI(), _nonExistentTmpFolder.toURI(),
         true)); // overwrite flag has no impact
     Assert.assertTrue(dstFile.exists());
 
-    localPinotFS.delete(secondTestFileUri, true);
+    _hadoopPinotFS.delete(secondTestFileUri, true);
     // Check deletion from final location worked
-    Assert.assertTrue(!localPinotFS.exists(secondTestFileUri));
+    Assert.assertFalse(_hadoopPinotFS.exists(secondTestFileUri));
 
     File firstTempDir = new File(_absoluteTmpDirPath, "firstTempDir");
     File secondTempDir = new File(_absoluteTmpDirPath, "secondTempDir");
-    localPinotFS.mkdir(firstTempDir.toURI());
+    _hadoopPinotFS.mkdir(firstTempDir.toURI());
     Assert.assertTrue(firstTempDir.exists(), "Could not make directory " + firstTempDir.getPath());
 
     // Check that directory only copy worked
-    localPinotFS.copy(firstTempDir.toURI(), secondTempDir.toURI());
-    Assert.assertTrue(localPinotFS.exists(secondTempDir.toURI()));
+    _hadoopPinotFS.copy(firstTempDir.toURI(), secondTempDir.toURI());
+    Assert.assertTrue(_hadoopPinotFS.exists(secondTempDir.toURI()));
 
     // Copying directory with files to directory with files
     File testFile = new File(firstTempDir, "testFile");
@@ -200,22 +206,23 @@ public class LocalPinotFSTest {
     File newTestFile = new File(secondTempDir, "newTestFile");
     Assert.assertTrue(newTestFile.createNewFile(), "Could not create file " + newTestFile.getPath());
 
-    localPinotFS.copy(firstTempDir.toURI(), secondTempDir.toURI());
-    Assert.assertEquals(localPinotFS.listFiles(secondTempDir.toURI(), true).length, 1);
+    _hadoopPinotFS.copy(firstTempDir.toURI(), secondTempDir.toURI());
+    files = _hadoopPinotFS.listFiles(secondTempDir.toURI(), true);
+    Assert.assertEquals(files.length, 1);
 
-    // len of dir = exception
+    // len of a directory should throw an exception.
     try {
-      localPinotFS.length(firstTempDir.toURI());
-      fail();
+      _hadoopPinotFS.length(firstTempDir.toURI());
+      Assert.fail();
     } catch (IllegalArgumentException e) {
 
     }
 
     Assert.assertTrue(testFile.exists());
 
-    localPinotFS.copyFromLocalFile(testFile, secondTestFileUri);
-    Assert.assertTrue(localPinotFS.exists(secondTestFileUri));
-    localPinotFS.copyToLocalFile(testFile.toURI(), new File(secondTestFileUri));
-    Assert.assertTrue(localPinotFS.exists(secondTestFileUri));
+    _hadoopPinotFS.copyFromLocalFile(testFile, secondTestFileUri);
+    Assert.assertTrue(_hadoopPinotFS.exists(secondTestFileUri));
+    _hadoopPinotFS.copyToLocalFile(testFile.toURI(), new File(secondTestFileUri));
+    Assert.assertTrue(_hadoopPinotFS.exists(secondTestFileUri));
   }
-}
\ No newline at end of file
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org