You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by av...@apache.org on 2020/02/12 23:17:01 UTC

[hadoop-ozone] branch revert-540-HDDS-2914-master created (now 24d3111)

This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a change to branch revert-540-HDDS-2914-master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


      at 24d3111  Revert "HDDS-2914. Certain Hive queries started to fail on generating splits (#540)"

This branch includes the following new commits:

     new 24d3111  Revert "HDDS-2914. Certain Hive queries started to fail on generating splits (#540)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 01/01: Revert "HDDS-2914. Certain Hive queries started to fail on generating splits (#540)"

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch revert-540-HDDS-2914-master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 24d311161270555f88f53352b1f6c56f0ec5d47b
Author: avijayanhwx <14...@users.noreply.github.com>
AuthorDate: Wed Feb 12 15:16:54 2020 -0800

    Revert "HDDS-2914. Certain Hive queries started to fail on generating splits (#540)"
    
    This reverts commit 25d2338b6753fb769d25502758991f98cc1cea37.
---
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  2 -
 .../hadoop/ozone/om/helpers/OzoneFileStatus.java   | 14 ++---
 .../src/main/proto/OzoneManagerProtocol.proto      |  1 -
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   | 21 -------
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 59 ++++++++-----------
 .../protocolPB/OzoneManagerRequestHandler.java     |  2 -
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      | 66 +---------------------
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      | 22 +-------
 .../apache/hadoop/fs/ozone/FileStatusAdapter.java  | 12 +---
 .../hadoop/fs/ozone/FilteredClassLoader.java       |  1 -
 10 files changed, 31 insertions(+), 169 deletions(-)

diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 2a48e68..2982e38 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1000,7 +1000,6 @@ public class RpcClient implements ClientProtocol {
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
-        .setRefreshPipeline(true)
         .build();
     return ozoneManagerClient.getFileStatus(keyArgs);
   }
@@ -1082,7 +1081,6 @@ public class RpcClient implements ClientProtocol {
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
-        .setRefreshPipeline(true)
         .build();
     return ozoneManagerClient
         .listStatus(keyArgs, recursive, startKey, numEntries);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java
index 1f4c0e5..8717946 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java
@@ -44,9 +44,8 @@ public class OzoneFileStatus extends FileStatus {
     keyInfo = key;
   }
 
-  public OzoneFileStatus(FileStatus status, OmKeyInfo key) throws IOException {
+  public OzoneFileStatus(FileStatus status) throws IOException {
     super(status);
-    keyInfo = key;
   }
 
   // Use this constructor only for directories
@@ -55,18 +54,13 @@ public class OzoneFileStatus extends FileStatus {
   }
 
   public OzoneFileStatusProto getProtobuf() throws IOException {
-    OzoneFileStatusProto.Builder builder = OzoneFileStatusProto.newBuilder()
-        .setStatus(PBHelper.convert(this));
-    if (keyInfo != null) {
-      builder.setKeyInfo(keyInfo.getProtobuf());
-    }
-    return builder.build();
+    return OzoneFileStatusProto.newBuilder().setStatus(PBHelper.convert(this))
+        .build();
   }
 
   public static OzoneFileStatus getFromProtobuf(OzoneFileStatusProto response)
       throws IOException {
-    return new OzoneFileStatus(PBHelper.convert(response.getStatus()),
-        OmKeyInfo.getFromProtobuf(response.getKeyInfo()));
+    return new OzoneFileStatus(PBHelper.convert(response.getStatus()));
   }
 
   public static Path getPath(String keyName) {
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
index aabe9e4..b9315d3 100644
--- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -754,7 +754,6 @@ message RepeatedKeyInfo {
 
 message OzoneFileStatusProto {
     required hadoop.fs.FileStatusProto status = 1;
-    optional KeyInfo keyInfo = 2;
 }
 
 message GetFileStatusRequest {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index 984293b..2a72101 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -25,14 +25,12 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.GlobalStorageStatistics;
-import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -315,25 +313,6 @@ public class TestOzoneFileInterfaces {
   }
 
   @Test
-  public void testOzoneManagerLocatedFileStatus() throws IOException {
-    String data = RandomStringUtils.randomAlphanumeric(20);
-    String filePath = RandomStringUtils.randomAlphanumeric(5);
-    Path path = createPath("/" + filePath);
-    try (FSDataOutputStream stream = fs.create(path)) {
-      stream.writeBytes(data);
-    }
-    FileStatus status = fs.getFileStatus(path);
-    assertTrue(status instanceof LocatedFileStatus);
-    LocatedFileStatus locatedFileStatus = (LocatedFileStatus) status;
-    assertTrue(locatedFileStatus.getBlockLocations().length >= 1);
-
-    for (BlockLocation blockLocation : locatedFileStatus.getBlockLocations()) {
-      assertTrue(blockLocation.getNames().length >= 1);
-      assertTrue(blockLocation.getHosts().length >= 1);
-    }
-  }
-
-  @Test
   public void testPathToKey() throws Exception {
 
     assertEquals("a/b/1", o3fs.pathToKey(new Path("/a/b/1")));
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index a0c6b85..55e40d1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -37,7 +37,6 @@ import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
-import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
@@ -687,35 +686,31 @@ public class KeyManagerImpl implements KeyManager {
    */
   @VisibleForTesting
   protected void refreshPipeline(OmKeyInfo value) throws IOException {
-    if (value != null &&
-        CollectionUtils.isNotEmpty(value.getKeyLocationVersions())) {
-      Map<Long, ContainerWithPipeline> containerWithPipelineMap =
-          new HashMap<>();
-      for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) {
-        for (OmKeyLocationInfo k : key.getLocationList()) {
-          // TODO: fix Some tests that may not initialize container client
-          // The production should always have containerClient initialized.
-          if (scmClient.getContainerClient() != null) {
-            try {
-              if (!containerWithPipelineMap.containsKey(k.getContainerID())) {
-                ContainerWithPipeline containerWithPipeline = scmClient
-                    .getContainerClient()
-                    .getContainerWithPipeline(k.getContainerID());
-                containerWithPipelineMap.put(k.getContainerID(),
-                    containerWithPipeline);
-              }
-            } catch (IOException ioEx) {
-              LOG.debug("Get containerPipeline failed for volume:{} bucket:{} "
-                      + "key:{}", value.getVolumeName(), value.getBucketName(),
-                  value.getKeyName(), ioEx);
-              throw new OMException(ioEx.getMessage(),
-                  SCM_GET_PIPELINE_EXCEPTION);
-            }
-            ContainerWithPipeline cp =
-                containerWithPipelineMap.get(k.getContainerID());
-            if (!cp.getPipeline().equals(k.getPipeline())) {
-              k.setPipeline(cp.getPipeline());
+    Map<Long, ContainerWithPipeline> containerWithPipelineMap = new HashMap<>();
+    for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) {
+      for (OmKeyLocationInfo k : key.getLocationList()) {
+        // TODO: fix Some tests that may not initialize container client
+        // The production should always have containerClient initialized.
+        if (scmClient.getContainerClient() != null) {
+          try {
+            if (!containerWithPipelineMap.containsKey(k.getContainerID())) {
+              ContainerWithPipeline containerWithPipeline = scmClient
+                  .getContainerClient()
+                  .getContainerWithPipeline(k.getContainerID());
+              containerWithPipelineMap.put(k.getContainerID(),
+                  containerWithPipeline);
             }
+          } catch (IOException ioEx) {
+            LOG.debug("Get containerPipeline failed for volume:{} bucket:{} " +
+                    "key:{}", value.getVolumeName(), value.getBucketName(),
+                value.getKeyName(), ioEx);
+            throw new OMException(ioEx.getMessage(),
+                SCM_GET_PIPELINE_EXCEPTION);
+          }
+          ContainerWithPipeline cp =
+              containerWithPipelineMap.get(k.getContainerID());
+          if (!cp.getPipeline().equals(k.getPipeline())) {
+            k.setPipeline(cp.getPipeline());
           }
         }
       }
@@ -1692,9 +1687,6 @@ public class KeyManagerImpl implements KeyManager {
           volumeName, bucketName, keyName);
       OmKeyInfo fileKeyInfo = metadataManager.getKeyTable().get(fileKeyBytes);
       if (fileKeyInfo != null) {
-        if (args.getRefreshPipeline()) {
-          refreshPipeline(fileKeyInfo);
-        }
         // this is a file
         return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false);
       }
@@ -2032,9 +2024,6 @@ public class KeyManagerImpl implements KeyManager {
       for (Map.Entry<String, OzoneFileStatus> entry : cacheKeyMap.entrySet()) {
         // No need to check if a key is deleted or not here, this is handled
         // when adding entries to cacheKeyMap from DB.
-        if (args.getRefreshPipeline()) {
-          refreshPipeline(entry.getValue().getKeyInfo());
-        }
         fileStatusList.add(entry.getValue());
         countEntries++;
         if (countEntries >= numEntries) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
index 03b8715..f5e2398 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
@@ -557,7 +557,6 @@ public class OzoneManagerRequestHandler implements RequestHandler {
         .setVolumeName(keyArgs.getVolumeName())
         .setBucketName(keyArgs.getBucketName())
         .setKeyName(keyArgs.getKeyName())
-        .setRefreshPipeline(true)
         .build();
 
     GetFileStatusResponse.Builder rb = GetFileStatusResponse.newBuilder();
@@ -589,7 +588,6 @@ public class OzoneManagerRequestHandler implements RequestHandler {
         .setVolumeName(keyArgs.getVolumeName())
         .setBucketName(keyArgs.getBucketName())
         .setKeyName(keyArgs.getKeyName())
-        .setRefreshPipeline(true)
         .build();
     List<OzoneFileStatus> statuses =
         impl.listStatus(omKeyArgs, request.getRecursive(),
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index ab5837f..317aff0 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -25,18 +25,15 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.commons.collections.CollectionUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ozone.OmUtils;
@@ -49,9 +46,6 @@ import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
@@ -80,7 +74,6 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
   private ReplicationType replicationType;
   private ReplicationFactor replicationFactor;
   private boolean securityEnabled;
-  private int configuredDnPort;
 
   /**
    * Create new OzoneClientAdapter implementation.
@@ -175,9 +168,6 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
       this.bucket = volume.getBucket(bucketStr);
       this.replicationType = ReplicationType.valueOf(replicationTypeConf);
       this.replicationFactor = ReplicationFactor.valueOf(replicationCountConf);
-      this.configuredDnPort = conf.getInt(
-          OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
     } finally {
       Thread.currentThread().setContextClassLoader(contextClassLoader);
     }
@@ -450,61 +440,7 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
         status.getPermission().toShort(),
         status.getOwner(),
         status.getGroup(),
-        status.getPath(),
-        getBlockLocations(status)
+        status.getPath()
     );
   }
-
-  /**
-   * Helper method to get List of BlockLocation from OM Key info.
-   * @param fileStatus Ozone key file status.
-   * @return list of block locations.
-   */
-  private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) {
-
-    if (fileStatus == null) {
-      return new BlockLocation[0];
-    }
-
-    OmKeyInfo keyInfo = fileStatus.getKeyInfo();
-    if (keyInfo == null || CollectionUtils.isEmpty(
-        keyInfo.getKeyLocationVersions())) {
-      return new BlockLocation[0];
-    }
-    List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups =
-        keyInfo.getKeyLocationVersions();
-    if (CollectionUtils.isEmpty(omKeyLocationInfoGroups)) {
-      return new BlockLocation[0];
-    }
-
-    OmKeyLocationInfoGroup omKeyLocationInfoGroup =
-        keyInfo.getLatestVersionLocations();
-    BlockLocation[] blockLocations = new BlockLocation[
-        omKeyLocationInfoGroup.getBlocksLatestVersionOnly().size()];
-
-    int i = 0;
-    for (OmKeyLocationInfo omKeyLocationInfo :
-        omKeyLocationInfoGroup.getBlocksLatestVersionOnly()) {
-      List<String> hostList = new ArrayList<>();
-      List<String> nameList = new ArrayList<>();
-      omKeyLocationInfo.getPipeline().getNodes()
-          .forEach(dn -> {
-            hostList.add(dn.getHostName());
-            int port = dn.getPort(
-                DatanodeDetails.Port.Name.STANDALONE).getValue();
-            if (port == 0) {
-              port = configuredDnPort;
-            }
-            nameList.add(dn.getHostName() + ":" + port);
-          });
-
-      String[] hosts = hostList.toArray(new String[hostList.size()]);
-      String[] names = nameList.toArray(new String[nameList.size()]);
-      BlockLocation blockLocation = new BlockLocation(
-          names, hosts, i, omKeyLocationInfo.getLength());
-      blockLocations[i++] = blockLocation;
-    }
-    return blockLocations;
-  }
-
 }
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 72279c3..a51bafd 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -35,14 +35,12 @@ import java.util.stream.Collectors;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -166,7 +164,6 @@ public class BasicOzoneFileSystem extends FileSystem {
       }
       this.workingDir = new Path(OZONE_USER_DIR, this.userName)
           .makeQualified(this.uri, this.workingDir);
-
     } catch (URISyntaxException ue) {
       final String msg = "Invalid Ozone endpoint " + name;
       LOG.error(msg, ue);
@@ -646,17 +643,6 @@ public class BasicOzoneFileSystem extends FileSystem {
     return fileStatus;
   }
 
-  @Override
-  public BlockLocation[] getFileBlockLocations(FileStatus fileStatus,
-                                               long start, long len)
-      throws IOException {
-    if (fileStatus instanceof LocatedFileStatus) {
-      return ((LocatedFileStatus) fileStatus).getBlockLocations();
-    } else {
-      return super.getFileBlockLocations(fileStatus, start, len);
-    }
-  }
-
   /**
    * Turn a path (relative or otherwise) into an Ozone key.
    *
@@ -798,7 +784,7 @@ public class BasicOzoneFileSystem extends FileSystem {
       //NOOP: If not symlink symlink remains null.
     }
 
-    FileStatus fileStatus =  new FileStatus(
+    return new FileStatus(
         fileStatusAdapter.getLength(),
         fileStatusAdapter.isDir(),
         fileStatusAdapter.getBlockReplication(),
@@ -812,11 +798,5 @@ public class BasicOzoneFileSystem extends FileSystem {
         fileStatusAdapter.getPath()
     );
 
-    BlockLocation[] blockLocations = fileStatusAdapter.getBlockLocations();
-    if (blockLocations == null || blockLocations.length == 0) {
-      return fileStatus;
-    }
-    return new LocatedFileStatus(fileStatus, blockLocations);
   }
-
 }
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
index 64e43f5..9159783 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java
@@ -17,11 +17,8 @@
  */
 package org.apache.hadoop.fs.ozone;
 
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.Path;
 
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
 /**
  * Class to hold the internal information of a FileStatus.
  * <p>
@@ -45,13 +42,12 @@ public final class FileStatusAdapter {
   private final String owner;
   private final String group;
   private final Path symlink;
-  private final BlockLocation[] blockLocations;
 
   @SuppressWarnings("checkstyle:ParameterNumber")
   public FileStatusAdapter(long length, Path path, boolean isdir,
       short blockReplication, long blocksize, long modificationTime,
       long accessTime, short permission, String owner,
-      String group, Path symlink, BlockLocation[] locations) {
+      String group, Path symlink) {
     this.length = length;
     this.path = path;
     this.isdir = isdir;
@@ -63,7 +59,6 @@ public final class FileStatusAdapter {
     this.owner = owner;
     this.group = group;
     this.symlink = symlink;
-    this.blockLocations = locations.clone();
   }
 
   public Path getPath() {
@@ -110,9 +105,4 @@ public final class FileStatusAdapter {
     return length;
   }
 
-  @SuppressFBWarnings("EI_EXPOSE_REP")
-  public BlockLocation[] getBlockLocations() {
-    return blockLocations;
-  }
-
 }
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java
index e115251..a90797e 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java
@@ -63,7 +63,6 @@ public class FilteredClassLoader extends URLClassLoader {
     delegatedClasses.add("org.apache.hadoop.fs.Seekable");
     delegatedClasses.add("org.apache.hadoop.io.Text");
     delegatedClasses.add("org.apache.hadoop.fs.Path");
-    delegatedClasses.add("org.apache.hadoop.fs.BlockLocation");
     delegatedClasses.addAll(StringUtils.getTrimmedStringCollection(
         System.getenv("HADOOP_OZONE_DELEGATED_CLASSES")));
     this.delegate = parent;


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org