You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2009/11/02 22:45:43 UTC

svn commit: r832118 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/fs/ src/test/hdfs/org/apache/hadoop/hdfs/

Author: suresh
Date: Mon Nov  2 21:45:43 2009
New Revision: 832118

URL: http://svn.apache.org/viewvc?rev=832118&view=rev
Log:
HDFS-702. Add HDFS implementation of AbstractFileSystem. Contributed by Sanjay Radia.

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=832118&r1=832117&r2=832118&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Mon Nov  2 21:45:43 2009
@@ -139,6 +139,9 @@
 
     HDFS-731. Support new Syncable interface in HDFS. (hairong)
 
+    HDFS-702. Add HDFS implementation of AbstractFileSystem. 
+    (Sanjay Radio via suresh)
+
   IMPROVEMENTS
 
     HDFS-381. Remove blocks from DataNode maps when corresponding file

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java?rev=832118&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java Mon Nov  2 21:45:43 2009
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.util.Progressable;
+
+public class Hdfs extends AbstractFileSystem {
+
+  DFSClient dfs;
+  private boolean verifyChecksum = true;
+
+  static {
+    Configuration.addDefaultResource("hdfs-default.xml");
+    Configuration.addDefaultResource("hdfs-site.xml");
+  }
+
+  /**
+   * This constructor has the signature needed by
+   * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}
+   * 
+   * @param theUri
+   *          which must be that of Hdfs
+   * @param conf
+   * @throws IOException
+   */
+  Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
+    super(theUri, FSConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
+
+    if (!theUri.getScheme().equalsIgnoreCase(FSConstants.HDFS_URI_SCHEME)) {
+      throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
+    }
+    String host = theUri.getHost();
+    if (host == null) {
+      throw new IOException("Incomplete HDFS URI, no host: " + theUri);
+    }
+
+    InetSocketAddress namenode = NameNode.getAddress(theUri.getAuthority());
+    this.dfs = new DFSClient(namenode, conf, getStatistics());
+  }
+
+  @Override
+  protected int getUriDefaultPort() {
+    return NameNode.DEFAULT_PORT;
+  }
+
+  @Override
+  protected FSDataOutputStream createInternal(Path f,
+      EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
+      int bufferSize, short replication, long blockSize, Progressable progress,
+      int bytesPerChecksum, boolean createParent) throws IOException {
+    return new FSDataOutputStream(dfs.primitiveCreate(getUriPath(f),
+        absolutePermission, createFlag, createParent, replication, blockSize,
+        progress, bufferSize, bytesPerChecksum), getStatistics());
+  }
+
+  @Override
+  protected boolean delete(Path f, boolean recursive) throws IOException {
+    return dfs.delete(getUriPath(f), recursive);
+  }
+
+  @Override
+  protected BlockLocation[] getFileBlockLocations(Path p, long start, long len)
+      throws IOException {
+    return dfs.getBlockLocations(p.toString(), start, len);
+  }
+
+  @Override
+  protected FileChecksum getFileChecksum(Path f) throws IOException {
+    return dfs.getFileChecksum(getUriPath(f));
+  }
+
+  @Override
+  protected FileStatus getFileStatus(Path f) throws IOException {
+    FileStatus fi = dfs.getFileInfo(getUriPath(f));
+    if (fi != null) {
+      fi.setPath(fi.getPath().makeQualified(getUri(), null));
+      return fi;
+    } else {
+      throw new FileNotFoundException("File does not exist: " + f.toString());
+    }
+  }
+
+  @Override
+  protected FsStatus getFsStatus() throws IOException {
+    return dfs.getDiskStatus();
+  }
+
+  @Override
+  protected FsServerDefaults getServerDefaults() throws IOException {
+    return dfs.getServerDefaults();
+  }
+
+  @Override
+  protected FileStatus[] listStatus(Path f) throws IOException {
+    FileStatus[] infos = dfs.listPaths(getUriPath(f));
+    if (infos == null)
+      throw new FileNotFoundException("File " + f + " does not exist.");
+
+    for (int i = 0; i < infos.length; i++) {
+      infos[i].setPath(infos[i].getPath().makeQualified(getUri(), null));
+    }
+    return infos;
+  }
+
+  @Override
+  protected void mkdir(Path dir, FsPermission permission, boolean createParent)
+    throws IOException {
+    dfs.mkdirs(getUriPath(dir), permission, createParent);
+
+  }
+
+  @Override
+  protected FSDataInputStream open(Path f, int bufferSize) throws IOException {
+    return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
+        bufferSize, verifyChecksum));
+  }
+
+  @Override
+  protected void renameInternal(Path src, Path dst) throws IOException {
+    dfs.rename(getUriPath(src), getUriPath(dst));
+  }
+
+  @Override
+  protected void renameInternal(Path src, Path dst, boolean overwrite)
+      throws IOException {
+    dfs.rename(getUriPath(src), getUriPath(dst),
+        overwrite ? Options.Rename.OVERWRITE : Options.Rename.NONE);
+  }
+
+  @Override
+  protected void setOwner(Path f, String username, String groupname)
+    throws IOException {
+    dfs.setOwner(getUriPath(f), username, groupname);
+
+  }
+
+  @Override
+  protected void setPermission(Path f, FsPermission permission)
+    throws IOException {
+    dfs.setPermission(getUriPath(f), permission);
+
+  }
+
+  @Override
+  protected boolean setReplication(Path f, short replication)
+    throws IOException {
+    return dfs.setReplication(getUriPath(f), replication);
+  }
+
+  @Override
+  protected void setTimes(Path f, long mtime, long atime) throws IOException {
+    dfs.setTimes(getUriPath(f), mtime, atime);
+
+  }
+
+  @Override
+  protected void setVerifyChecksum(boolean verifyChecksum) throws IOException {
+    this.verifyChecksum = verifyChecksum;
+  }
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=832118&r1=832117&r2=832118&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Mon Nov  2 21:45:43 2009
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -418,10 +419,24 @@
    * namenode, and then reads from all the right places.  Creates
    * inner subclass of InputStream that does the right out-of-band
    * work.
+   * @deprecated Use {@link #open(String, int, boolean)} instead.
    */
-  DFSInputStream open(String src, int buffersize, boolean verifyChecksum,
+  @Deprecated
+  public DFSInputStream open(String src, int buffersize, boolean verifyChecksum,
                       FileSystem.Statistics stats
       ) throws IOException {
+    return open(src, buffersize, verifyChecksum);
+  }
+  
+
+  /**
+   * Create an input stream that obtains a nodelist from the
+   * namenode, and then reads from all the right places.  Creates
+   * inner subclass of InputStream that does the right out-of-band
+   * work.
+   */
+  public DFSInputStream open(String src, int buffersize, boolean verifyChecksum
+      ) throws IOException {
     checkOpen();
     //    Get block info from namenode
     return new DFSInputStream(src, buffersize, verifyChecksum);
@@ -594,11 +609,6 @@
                              int bytesPerChecksum)
     throws IOException {
     checkOpen();
-    if (absPermission == null) {
-      absPermission = 
-        FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf));
-    } 
-    LOG.debug(src + ": masked=" + absPermission);
     OutputStream result = new DFSOutputStream(src, absPermission,
         flag, createParent, replication, blockSize, progress, buffersize,
         bytesPerChecksum);
@@ -757,7 +767,7 @@
    * @return The checksum 
    * @see DistributedFileSystem#getFileChecksum(Path)
    */
-  MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
+  public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
     checkOpen();
     return getFileChecksum(src, namenode, socketFactory, socketTimeout);    
   }
@@ -2374,8 +2384,8 @@
     }
   }
     
-  static class DFSDataInputStream extends FSDataInputStream {
-    DFSDataInputStream(DFSInputStream in)
+  public static class DFSDataInputStream extends FSDataInputStream {
+    public DFSDataInputStream(DFSInputStream in)
       throws IOException {
       super(in);
     }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=832118&r1=832117&r2=832118&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Mon Nov  2 21:45:43 2009
@@ -191,6 +191,7 @@
     this.verifyChecksum = verifyChecksum;
   }
 
+  @SuppressWarnings("deprecation")
   @Override
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     return new DFSClient.DFSDataInputStream(

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java?rev=832118&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java (added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java Mon Nov  2 21:45:43 2009
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+public class TestFcHdfsCreateMkdir extends
+                    FileContextCreateMkdirBaseTest {
+  
+  private static MiniDFSCluster cluster;
+  private static Path defaultWorkingDirectory;
+  
+  @BeforeClass
+  public static void clusterSetupAtBegining()
+                                    throws IOException, LoginException, URISyntaxException  {
+    Configuration conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster(conf, 2, true, null);
+    fc = FileContext.getFileContext(cluster.getURI(), conf);
+    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
+        UnixUserGroupInformation.login().getUserName()));
+    fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
+  }
+
+      
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    cluster.shutdown();   
+  }
+  
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+}

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java?rev=832118&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java (added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java Mon Nov  2 21:45:43 2009
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+public class TestFcHdfsPermission extends FileContextPermissionBase {
+  
+  private static MiniDFSCluster cluster;
+  private static Path defaultWorkingDirectory;
+  
+  @BeforeClass
+  public static void clusterSetupAtBegining()
+                                    throws IOException, LoginException, URISyntaxException  {
+    Configuration conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster(conf, 2, true, null);
+    fc = FileContext.getFileContext(cluster.getURI(), conf);
+    defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
+        UnixUserGroupInformation.login().getUserName()));
+    fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
+  }
+
+      
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    cluster.shutdown();   
+  }
+  
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  /*
+   * HDFS ignore the "x" bit if the permission.
+   * 
+   */
+  static final FsPermission FILE_MASK_IGNORE_X_BIT = 
+    new FsPermission((short) ~0666);
+  FsPermission getFileMask() {
+    return FILE_MASK_IGNORE_X_BIT;
+  }
+}

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=832118&r1=832117&r2=832118&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Mon Nov  2 21:45:43 2009
@@ -19,9 +19,11 @@
 package org.apache.hadoop.fs;
 
 import java.io.IOException;
+import java.net.URISyntaxException;
 
 import javax.security.auth.login.LoginException;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -42,10 +44,11 @@
   private static Path defaultWorkingDirectory;
   
   @BeforeClass
-  public static void clusterSetupAtBegining()
-                                    throws IOException, LoginException  {
-    cluster = new MiniDFSCluster(new HdfsConfiguration(), 2, true, null);
-    fc = FileContext.getFileContext(cluster.getFileSystem());
+  public static void clusterSetupAtBegining() throws IOException,
+      LoginException, URISyntaxException {
+    Configuration conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster(conf, 2, true, null);
+    fc = FileContext.getFileContext(cluster.getURI(), conf);
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
         UnixUserGroupInformation.login().getUserName()));
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=832118&r1=832117&r2=832118&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Mon Nov  2 21:45:43 2009
@@ -22,6 +22,7 @@
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.nio.channels.FileChannel;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -71,7 +72,7 @@
       this.dnArgs = args;
     }
   }
-
+  private URI myUri = null;
   private Configuration conf;
   private NameNode nameNode;
   private int numDataNodes;
@@ -289,6 +290,20 @@
     startDataNodes(conf, numDataNodes, manageDataDfsDirs, 
                     operation, racks, hosts, simulatedCapacities);
     waitClusterUp();
+    String myUriStr = "hdfs://localhost:"+ Integer.toString(this.getNameNodePort());
+    try {
+      this.myUri = new URI(myUriStr);
+    } catch (URISyntaxException e) {
+      NameNode.LOG.warn("unexpected URISyntaxException: " + e );
+    }
+  }
+  
+  /**
+   * 
+   * @return URI of this MiniDFSCluster
+   */
+  public URI getURI() {
+    return myUri;
   }
 
   /**