You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2013/06/21 08:37:39 UTC
svn commit: r1495297 [36/46] - in /hadoop/common/branches/branch-1-win: ./
bin/ conf/ ivy/ lib/jdiff/ src/c++/libhdfs/docs/
src/c++/libhdfs/tests/conf/ src/contrib/capacity-scheduler/ivy/
src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred...
Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOIVCanReadOldVersions.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.hadoop.hdfs.tools.offlineImageViewer.SpotCheckImageVisitor.ImageInfo;
+import org.junit.Test;
+
+public class TestOIVCanReadOldVersions {
+ // Location of fsimage files during testing.
+ public static final String TEST_CACHE_DATA_DIR =
+ System.getProperty("test.cache.data", "build/test/cache");
+
+ // Verify that the image processor can correctly process prior Hadoop
+ // layout versions. These fsimages were previously generated and stored
+ // with the test. Test success indicates that no changes have been made
+ // to the OIV that causes older fsimages to be incorrectly processed.
+ @Test
+ public void testOldFSImages() {
+ // Define the expected values from the prior versions, as they were created
+ // and verified at time of creation
+ Set<String> pathNames = new HashSet<String>();
+ Collections.addAll(pathNames, "", /* root */
+ "/bar",
+ "/bar/dir0",
+ "/bar/dir0/file0",
+ "/bar/dir0/file1",
+ "/bar/dir1",
+ "/bar/dir1/file0",
+ "/bar/dir1/file1",
+ "/bar/dir2",
+ "/bar/dir2/file0",
+ "/bar/dir2/file1",
+ "/foo",
+ "/foo/dir0",
+ "/foo/dir0/file0",
+ "/foo/dir0/file1",
+ "/foo/dir0/file2",
+ "/foo/dir0/file3",
+ "/foo/dir1",
+ "/foo/dir1/file0",
+ "/foo/dir1/file1",
+ "/foo/dir1/file2",
+ "/foo/dir1/file3");
+
+ Set<String> INUCpaths = new HashSet<String>();
+ Collections.addAll(INUCpaths, "/bar/dir0/file0",
+ "/bar/dir0/file1",
+ "/bar/dir1/file0",
+ "/bar/dir1/file1",
+ "/bar/dir2/file0",
+ "/bar/dir2/file1");
+
+ ImageInfo v18Inodes = new ImageInfo(); // Hadoop version 18 inodes
+ v18Inodes.totalNumBlocks = 12;
+ v18Inodes.totalFileSize = 1069548540l;
+ v18Inodes.pathNames = pathNames;
+ v18Inodes.totalReplications = 14;
+
+ ImageInfo v18INUCs = new ImageInfo(); // Hadoop version 18 inodes under construction
+ v18INUCs.totalNumBlocks = 0;
+ v18INUCs.totalFileSize = 0;
+ v18INUCs.pathNames = INUCpaths;
+ v18INUCs.totalReplications = 6;
+
+ ImageInfo v19Inodes = new ImageInfo(); // Hadoop version 19 inodes
+ v19Inodes.totalNumBlocks = 12;
+ v19Inodes.totalFileSize = 1069548540l;
+ v19Inodes.pathNames = pathNames;
+ v19Inodes.totalReplications = 14;
+
+ ImageInfo v19INUCs = new ImageInfo(); // Hadoop version 19 inodes under construction
+ v19INUCs.totalNumBlocks = 0;
+ v19INUCs.totalFileSize = 0;
+ v19INUCs.pathNames = INUCpaths;
+ v19INUCs.totalReplications = 6;
+
+
+ spotCheck("18", TEST_CACHE_DATA_DIR + "/fsimageV18", v18Inodes, v18INUCs);
+ spotCheck("19", TEST_CACHE_DATA_DIR + "/fsimageV19", v19Inodes, v19INUCs);
+ }
+
+ // Check that running the processor now gives us the same values as before
+ private void spotCheck(String hadoopVersion, String input,
+ ImageInfo inodes, ImageInfo INUCs) {
+ SpotCheckImageVisitor v = new SpotCheckImageVisitor();
+ OfflineImageViewer oiv = new OfflineImageViewer(input, v, false);
+ try {
+ oiv.go();
+ } catch (IOException e) {
+ fail("Error processing file: " + input);
+ }
+
+ compareSpotCheck(hadoopVersion, v.getINodesInfo(), inodes);
+ compareSpotCheck(hadoopVersion, v.getINUCsInfo(), INUCs);
+ System.out.println("Successfully processed fsimage file from Hadoop version " +
+ hadoopVersion);
+ }
+
+ // Compare the spot check results of what we generated from the image
+ // processor and what we expected to receive
+ private void compareSpotCheck(String hadoopVersion,
+ ImageInfo generated, ImageInfo expected) {
+ assertEquals("Version " + hadoopVersion + ": Same number of total blocks",
+ expected.totalNumBlocks, generated.totalNumBlocks);
+ assertEquals("Version " + hadoopVersion + ": Same total file size",
+ expected.totalFileSize, generated.totalFileSize);
+ assertEquals("Version " + hadoopVersion + ": Same total replication factor",
+ expected.totalReplications, generated.totalReplications);
+ assertEquals("Version " + hadoopVersion + ": One-to-one matching of path names",
+ expected.pathNames, generated.pathNames);
+ }
+}
Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,470 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.security.token.Token;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+/**
+ * Test function of OfflineImageViewer by:
+ * * confirming it can correctly process a valid fsimage file and that
+ * the processing generates a correct representation of the namespace
+ * * confirming it correctly fails to process an fsimage file with a layout
+ * version it shouldn't be able to handle
+ * * confirm it correctly bails on malformed image files, in particular, a
+ * file that ends suddenly.
+ */
+public class TestOfflineImageViewer {
+ private static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
+ private static final int NUM_DIRS = 3;
+ private static final int FILES_PER_DIR = 4;
+ private static final String TEST_RENEWER = "JobTracker";
+ private static File originalFsimage = null;
+
+ // Elements of lines of ls-file output to be compared to FileStatus instance
+ private static class LsElements {
+ public String perms;
+ public int replication;
+ public String username;
+ public String groupname;
+ public long filesize;
+ public char dir; // d if dir, - otherwise
+ }
+
+ // namespace as written to dfs, to be compared with viewer's output
+ final static HashMap<String, FileStatus> writtenFiles =
+ new HashMap<String, FileStatus>();
+
+ private static String ROOT = System.getProperty("test.build.data",
+ "build/test/data");
+
+ // Create a populated namespace for later testing. Save its contents to a
+ // data structure and store its fsimage location.
+ // We only want to generate the fsimage file once and use it for
+ // multiple tests.
+ @BeforeClass
+ public static void createOriginalFSImage() throws IOException {
+ MiniDFSCluster cluster = null;
+ try {
+ Configuration conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
+ conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+ conf.set("hadoop.security.auth_to_local",
+ "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
+ cluster = new MiniDFSCluster(conf, 4, true, null);
+ cluster.waitActive();
+ FileSystem hdfs = cluster.getFileSystem();
+
+ int filesize = 256;
+
+ // Create a reasonable namespace
+ for(int i = 0; i < NUM_DIRS; i++) {
+ Path dir = new Path("/dir" + i);
+ hdfs.mkdirs(dir);
+ writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
+ for(int j = 0; j < FILES_PER_DIR; j++) {
+ Path file = new Path(dir, "file" + j);
+ FSDataOutputStream o = hdfs.create(file);
+ o.write(new byte[ filesize++ ]);
+ o.close();
+
+ writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString()));
+ }
+ }
+
+ // Get delegation tokens so we log the delegation token op
+ Token<?> delegationToken = hdfs.getDelegationToken(TEST_RENEWER);
+ LOG.debug("got token " + delegationToken);
+
+ // Write results to the fsimage file
+ cluster.getNameNode().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ cluster.getNameNode().saveNamespace();
+
+ // Determine location of fsimage file
+ File [] files = cluster.getNameDirs().toArray(new File[0]);
+ originalFsimage = new File(files[0], "current/fsimage");
+
+ if(!originalFsimage.exists())
+ fail("Didn't generate or can't find fsimage.");
+
+ } finally {
+ if(cluster != null)
+ cluster.shutdown();
+ }
+ }
+
+ @AfterClass
+ public static void deleteOriginalFSImage() throws IOException {
+ if(originalFsimage != null && originalFsimage.exists()) {
+ originalFsimage.delete();
+ }
+ }
+
+ // Convenience method to generate a file status from file system for
+ // later comparison
+ private static FileStatus pathToFileEntry(FileSystem hdfs, String file)
+ throws IOException {
+ return hdfs.getFileStatus(new Path(file));
+ }
+
+ // Verify that we can correctly generate an ls-style output for a valid
+ // fsimage
+ @Test
+ public void outputOfLSVisitor() throws IOException {
+ File testFile = new File(ROOT, "/basicCheck");
+ File outputFile = new File(ROOT, "/basicCheckOutput");
+
+ try {
+ copyFile(originalFsimage, testFile);
+
+ ImageVisitor v = new LsImageVisitor(outputFile.getPath(), true);
+ OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v, false);
+
+ oiv.go();
+
+ HashMap<String, LsElements> fileOutput = readLsfile(outputFile);
+
+ compareNamespaces(writtenFiles, fileOutput);
+ } finally {
+ if(testFile.exists()) testFile.delete();
+ if(outputFile.exists()) outputFile.delete();
+ }
+ LOG.debug("Correctly generated ls-style output.");
+ }
+
+ // Confirm that attempting to read an fsimage file with an unsupported
+ // layout results in an error
+ @Test
+ public void unsupportedFSLayoutVersion() throws IOException {
+ File testFile = new File(ROOT, "/invalidLayoutVersion");
+ File outputFile = new File(ROOT, "invalidLayoutVersionOutput");
+
+ try {
+ int badVersionNum = -432;
+ changeLayoutVersion(originalFsimage, testFile, badVersionNum);
+ ImageVisitor v = new LsImageVisitor(outputFile.getPath(), true);
+ OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v, false);
+
+ try {
+ oiv.go();
+ fail("Shouldn't be able to read invalid laytout version");
+ } catch(IOException e) {
+ if(!e.getMessage().contains(Integer.toString(badVersionNum)))
+ throw e; // wasn't error we were expecting
+ LOG.debug("Correctly failed at reading bad image version.");
+ }
+ } finally {
+ if(testFile.exists()) testFile.delete();
+ if(outputFile.exists()) outputFile.delete();
+ }
+ }
+
+ // Verify that image viewer will bail on a file that ends unexpectedly
+ @Test
+ public void truncatedFSImage() throws IOException {
+ File testFile = new File(ROOT, "/truncatedFSImage");
+ File outputFile = new File(ROOT, "/trucnatedFSImageOutput");
+ try {
+ copyPartOfFile(originalFsimage, testFile);
+ assertTrue("Created truncated fsimage", testFile.exists());
+
+ ImageVisitor v = new LsImageVisitor(outputFile.getPath(), true);
+ OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v, false);
+
+ try {
+ oiv.go();
+ fail("Managed to process a truncated fsimage file");
+ } catch (EOFException e) {
+ LOG.debug("Correctly handled EOF");
+ }
+
+ } finally {
+ if(testFile.exists()) testFile.delete();
+ if(outputFile.exists()) outputFile.delete();
+ }
+ }
+
+ // Test that our ls file has all the same compenents of the original namespace
+ private void compareNamespaces(HashMap<String, FileStatus> written,
+ HashMap<String, LsElements> fileOutput) {
+ assertEquals( "Should be the same number of files in both, plus one for root"
+ + " in fileoutput", fileOutput.keySet().size(),
+ written.keySet().size() + 1);
+ Set<String> inFile = fileOutput.keySet();
+
+ // For each line in the output file, verify that the namespace had a
+ // filestatus counterpart
+ for (String path : inFile) {
+ if (path.equals("/")) // root's not included in output from system call
+ continue;
+
+ assertTrue("Path in file (" + path + ") was written to fs", written
+ .containsKey(path));
+
+ compareFiles(written.get(path), fileOutput.get(path));
+
+ written.remove(path);
+ }
+
+ assertEquals("No more files were written to fs", 0, written.size());
+ }
+
+ // Compare two files as listed in the original namespace FileStatus and
+ // the output of the ls file from the image processor
+ private void compareFiles(FileStatus fs, LsElements elements) {
+ assertEquals("directory listed as such",
+ fs.isDir() ? 'd' : '-', elements.dir);
+ assertEquals("perms string equal",
+ fs.getPermission().toString(), elements.perms);
+ assertEquals("replication equal", fs.getReplication(), elements.replication);
+ assertEquals("owner equal", fs.getOwner(), elements.username);
+ assertEquals("group equal", fs.getGroup(), elements.groupname);
+ assertEquals("lengths equal", fs.getLen(), elements.filesize);
+ }
+
+ // Read the contents of the file created by the Ls processor
+ private HashMap<String, LsElements> readLsfile(File lsFile) throws IOException {
+ BufferedReader br = new BufferedReader(new FileReader(lsFile));
+ String line = null;
+ HashMap<String, LsElements> fileContents = new HashMap<String, LsElements>();
+
+ while((line = br.readLine()) != null)
+ readLsLine(line, fileContents);
+
+ return fileContents;
+ }
+
+ // Parse a line from the ls output. Store permissions, replication,
+ // username, groupname and filesize in hashmap keyed to the path name
+ private void readLsLine(String line, HashMap<String, LsElements> fileContents) {
+ String elements [] = line.split("\\s+");
+
+ assertEquals("Not enough elements in ls output", 8, elements.length);
+
+ LsElements lsLine = new LsElements();
+
+ lsLine.dir = elements[0].charAt(0);
+ lsLine.perms = elements[0].substring(1);
+ lsLine.replication = elements[1].equals("-")
+ ? 0 : Integer.valueOf(elements[1]);
+ lsLine.username = elements[2];
+ lsLine.groupname = elements[3];
+ lsLine.filesize = Long.valueOf(elements[4]);
+ // skipping date and time
+
+ String path = elements[7];
+
+ // Check that each file in the ls output was listed once
+ assertFalse("LS file had duplicate file entries",
+ fileContents.containsKey(path));
+
+ fileContents.put(path, lsLine);
+ }
+
+ // Copy one fsimage to another, changing the layout version in the process
+ private void changeLayoutVersion(File src, File dest, int newVersion)
+ throws IOException {
+ DataInputStream in = null;
+ DataOutputStream out = null;
+
+ try {
+ in = new DataInputStream(new FileInputStream(src));
+ out = new DataOutputStream(new FileOutputStream(dest));
+
+ in.readInt();
+ out.writeInt(newVersion);
+
+ byte [] b = new byte[1024];
+ while( in.read(b) > 0 ) {
+ out.write(b);
+ }
+ } finally {
+ if(in != null) in.close();
+ if(out != null) out.close();
+ }
+ }
+
+ // Only copy part of file into the other. Used for testing truncated fsimage
+ private void copyPartOfFile(File src, File dest) throws IOException {
+ InputStream in = null;
+ OutputStream out = null;
+
+ byte [] b = new byte[256];
+ int bytesWritten = 0;
+ int count;
+ int maxBytes = 700;
+
+ try {
+ in = new FileInputStream(src);
+ out = new FileOutputStream(dest);
+
+ while( (count = in.read(b)) > 0 && bytesWritten < maxBytes ) {
+ out.write(b);
+ bytesWritten += count;
+ }
+ } finally {
+ if(in != null) in.close();
+ if(out != null) out.close();
+ }
+ }
+
+ // Copy one file's contents into the other
+ private void copyFile(File src, File dest) throws IOException {
+ InputStream in = null;
+ OutputStream out = null;
+
+ try {
+ in = new FileInputStream(src);
+ out = new FileOutputStream(dest);
+
+ byte [] b = new byte[1024];
+ while( in.read(b) > 0 ) {
+ out.write(b);
+ }
+ } finally {
+ if(in != null) in.close();
+ if(out != null) out.close();
+ }
+ }
+
+ @Test
+ public void outputOfFileDistributionVisitor() throws IOException {
+ File testFile = new File(ROOT, "/basicCheck");
+ File outputFile = new File(ROOT, "/fileDistributionCheckOutput");
+
+ int totalFiles = 0;
+ try {
+ copyFile(originalFsimage, testFile);
+ ImageVisitor v = new FileDistributionVisitor(outputFile.getPath(), 0, 0);
+ OfflineImageViewer oiv =
+ new OfflineImageViewer(testFile.getPath(), v, false);
+
+ oiv.go();
+
+ BufferedReader reader = new BufferedReader(new FileReader(outputFile));
+ String line = reader.readLine();
+ assertEquals(line, "Size\tNumFiles");
+ while((line = reader.readLine()) != null) {
+ String[] row = line.split("\t");
+ assertEquals(row.length, 2);
+ totalFiles += Integer.parseInt(row[1]);
+ }
+ } finally {
+ if(testFile.exists()) testFile.delete();
+ if(outputFile.exists()) outputFile.delete();
+ }
+ assertEquals(totalFiles, NUM_DIRS * FILES_PER_DIR);
+ }
+
+ private static class TestImageVisitor extends ImageVisitor {
+ private List<String> delegationTokenRenewers = new LinkedList<String>();
+ TestImageVisitor() {
+ }
+
+ List<String> getDelegationTokenRenewers() {
+ return delegationTokenRenewers;
+ }
+
+ @Override
+ void start() throws IOException {
+ }
+
+ @Override
+ void finish() throws IOException {
+ }
+
+ @Override
+ void finishAbnormally() throws IOException {
+ }
+
+ @Override
+ void visit(ImageElement element, String value) throws IOException {
+ if (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_RENEWER) {
+ delegationTokenRenewers.add(value);
+ }
+ }
+
+ @Override
+ void visitEnclosingElement(ImageElement element) throws IOException {
+ }
+
+ @Override
+ void visitEnclosingElement(ImageElement element, ImageElement key,
+ String value) throws IOException {
+ }
+
+ @Override
+ void leaveEnclosingElement() throws IOException {
+ }
+ }
+
+ @Test
+ public void outputOfTestVisitor() throws IOException {
+ File testFile = new File(ROOT, "/basicCheck");
+
+ try {
+ copyFile(originalFsimage, testFile);
+ TestImageVisitor v = new TestImageVisitor();
+ OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v, true);
+ oiv.go();
+
+ // Validated stored delegation token identifiers.
+ List<String> dtrs = v.getDelegationTokenRenewers();
+ assertEquals(1, dtrs.size());
+ assertEquals(TEST_RENEWER, dtrs.get(0));
+ } finally {
+ if(testFile.exists()) testFile.delete();
+ }
+ LOG.debug("Passed TestVisitor validation.");
+ }
+}
Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/fsimageV18
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/fsimageV18?rev=1495297&view=auto
==============================================================================
Binary file - no diff available.
Propchange: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/fsimageV18
------------------------------------------------------------------------------
svn:mime-type = application/octet-stream
Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/fsimageV19
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/fsimageV19?rev=1495297&view=auto
==============================================================================
Binary file - no diff available.
Propchange: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/fsimageV19
------------------------------------------------------------------------------
svn:mime-type = application/octet-stream
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/util/TestGSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/util/TestGSet.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/util/TestGSet.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/util/TestGSet.java Fri Jun 21 06:37:27 2013
@@ -386,6 +386,11 @@ public class TestGSet {
return String.format(" iterate=%5d, contain=%5d, time elapsed=%5d.%03ds",
iterate_count, contain_count, t/1000, t%1000);
}
+
+ @Override
+ public void clear() {
+ gset.clear();
+ }
}
/** Test data set */
@@ -451,4 +456,81 @@ public class TestGSet {
next = e;
}
}
+
+ /**
+ * Test for {@link LightWeightGSet#computeCapacity(double, String)}
+ * with invalid percent less than 0.
+ */
+ @Test(expected=IllegalArgumentException.class)
+ public void testComputeCapacityNegativePercent() {
+ LightWeightGSet.computeCapacity(1024, -1.0, "testMap");
+ }
+
+ /**
+ * Test for {@link LightWeightGSet#computeCapacity(double, String)}
+ * with invalid percent greater than 100.
+ */
+ @Test(expected=IllegalArgumentException.class)
+ public void testComputeCapacityInvalidPercent() {
+ LightWeightGSet.computeCapacity(1024, 101.0, "testMap");
+ }
+
+ /**
+ * Test for {@link LightWeightGSet#computeCapacity(double, String)}
+ * with invalid negative max memory
+ */
+ @Test(expected=IllegalArgumentException.class)
+ public void testComputeCapacityInvalidMemory() {
+ LightWeightGSet.computeCapacity(-1, 50.0, "testMap");
+ }
+
+ private static boolean isPowerOfTwo(int num) {
+ return num == 0 || (num > 0 && Integer.bitCount(num) == 1);
+ }
+
+ /** Return capacity as percentage of total memory */
+ private static int getPercent(long total, int capacity) {
+ // Reference size in bytes
+ double referenceSize =
+ System.getProperty("sun.arch.data.model").equals("32") ? 4.0 : 8.0;
+ return (int)(((capacity * referenceSize)/total) * 100.0);
+ }
+
+ /** Return capacity as percentage of total memory */
+ private static void testCapacity(long maxMemory, double percent) {
+ int capacity = LightWeightGSet.computeCapacity(maxMemory, percent, "map");
+ LightWeightGSet.LOG.info("Validating - total memory " + maxMemory + " percent "
+ + percent + " returned capacity " + capacity);
+ // Returned capacity is zero or power of two
+ Assert.assertTrue(isPowerOfTwo(capacity));
+
+ // Ensure the capacity returned is the nearest to the asked perecentage
+ int capacityPercent = getPercent(maxMemory, capacity);
+ if (capacityPercent == percent) {
+ return;
+ } else if (capacityPercent > percent) {
+ Assert.assertTrue(getPercent(maxMemory, capacity * 2) > percent);
+ } else {
+ Assert.assertTrue(getPercent(maxMemory, capacity / 2) < percent);
+ }
+ }
+
+ /**
+ * Test for {@link LightWeightGSet#computeCapacity(double, String)}
+ */
+ @Test
+ public void testComputeCapacity() {
+ // Tests for boundary conditions where percent or memory are zero
+ testCapacity(0, 0.0);
+ testCapacity(100, 0.0);
+ testCapacity(0, 100.0);
+
+ // Compute capacity for some 100 random max memory and percentage
+ Random r = new Random();
+ for (int i = 0; i < 100; i++) {
+ long maxMemory = r.nextInt(Integer.MAX_VALUE);
+ double percent = r.nextInt(101);
+ testCapacity(maxMemory, percent);
+ }
+ }
}
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java Fri Jun 21 06:37:27 2013
@@ -28,11 +28,13 @@ import java.security.PrivilegedException
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSMainOperationsBaseTest;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
@@ -61,6 +63,7 @@ public class TestFSMainOperationsWebHdfs
final Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
try {
cluster = new MiniDFSCluster(conf, 2, true, null);
cluster.waitActive();
@@ -102,6 +105,30 @@ public class TestFSMainOperationsWebHdfs
return defaultWorkingDirectory;
}
+ @Test
+ public void testConcat() throws Exception {
+ Path[] paths = {new Path("/test/hadoop/file1"),
+ new Path("/test/hadoop/file2"),
+ new Path("/test/hadoop/file3")};
+
+ DFSTestUtil.createFile(fSys, paths[0], 1024, (short) 3, 0);
+ DFSTestUtil.createFile(fSys, paths[1], 1024, (short) 3, 0);
+ DFSTestUtil.createFile(fSys, paths[2], 1024, (short) 3, 0);
+
+ Path catPath = new Path("/test/hadoop/catFile");
+ DFSTestUtil.createFile(fSys, catPath, 1024, (short) 3, 0);
+ Assert.assertTrue(exists(fSys, catPath));
+
+ fSys.concat(catPath, paths);
+
+ Assert.assertFalse(exists(fSys, paths[0]));
+ Assert.assertFalse(exists(fSys, paths[1]));
+ Assert.assertFalse(exists(fSys, paths[2]));
+
+ FileStatus fileStatus = fSys.getFileStatus(catPath);
+ Assert.assertEquals(1024*4, fileStatus.getLen());
+ }
+
//copied from trunk.
@Test
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java Fri Jun 21 06:37:27 2013
@@ -18,22 +18,10 @@
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
import java.io.IOException;
-import java.net.URI;
import java.net.URL;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.TestByteRangeInputStream.MockHttpURLConnection;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.OffsetUrlInputStream;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.OffsetUrlOpener;
import org.junit.Test;
public class TestOffsetUrlInputStream {
@@ -74,64 +62,4 @@ public class TestOffsetUrlInputStream {
}
}
- @Test
- public void testByteRange() throws Exception {
- final Configuration conf = new Configuration();
- final String uri = WebHdfsFileSystem.SCHEME + "://localhost:50070/";
- final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
-
- OffsetUrlOpener ospy = spy(webhdfs.new OffsetUrlOpener(new URL("http://test/")));
- doReturn(new MockHttpURLConnection(ospy.getURL())).when(ospy)
- .openConnection();
- OffsetUrlOpener rspy = spy(webhdfs.new OffsetUrlOpener((URL) null));
- doReturn(new MockHttpURLConnection(rspy.getURL())).when(rspy)
- .openConnection();
- final OffsetUrlInputStream is = new OffsetUrlInputStream(ospy, rspy);
-
- assertEquals("getPos wrong", 0, is.getPos());
-
- is.read();
-
- assertNull("Initial call made incorrectly (Range Check)", ospy
- .openConnection().getRequestProperty("Range"));
-
- assertEquals("getPos should be 1 after reading one byte", 1, is.getPos());
-
- is.read();
-
- assertEquals("getPos should be 2 after reading two bytes", 2, is.getPos());
-
- // No additional connections should have been made (no seek)
-
- rspy.setURL(new URL("http://resolvedurl/"));
-
- is.seek(100);
- is.read();
-
- assertEquals("getPos should be 101 after reading one byte", 101,
- is.getPos());
-
- verify(rspy, times(1)).openConnection();
-
- is.seek(101);
- is.read();
-
- verify(rspy, times(1)).openConnection();
-
- // Seek to 101 should not result in another request"
-
- is.seek(2500);
- is.read();
-
- ((MockHttpURLConnection) rspy.openConnection()).setResponseCode(206);
- is.seek(0);
-
- try {
- is.read();
- fail("Exception should be thrown when 206 response is given "
- + "but 200 is expected");
- } catch (IOException e) {
- WebHdfsFileSystem.LOG.info(e.toString());
- }
- }
}
Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestWebHDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestWebHDFS.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestWebHDFS.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestWebHDFS.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.log4j.Level;
+import org.junit.Assert;
+import org.junit.Test;
+
+/** Test WebHDFS */
+public class TestWebHDFS {
+ static final Log LOG = LogFactory.getLog(TestWebHDFS.class);
+
+ static final Random RANDOM = new Random();
+
+ static final long systemStartTime = System.nanoTime();
+
+ /** A timer for measuring performance. */
+ static class Ticker {
+ final String name;
+ final long startTime = System.nanoTime();
+ private long previousTick = startTime;
+
+ Ticker(final String name, String format, Object... args) {
+ this.name = name;
+ LOG.info(String.format("\n\n%s START: %s\n",
+ name, String.format(format, args)));
+ }
+
+ void tick(final long nBytes, String format, Object... args) {
+ final long now = System.nanoTime();
+ if (now - previousTick > 10000000000L) {
+ previousTick = now;
+ final double mintues = (now - systemStartTime)/60000000000.0;
+ LOG.info(String.format("\n\n%s %.2f min) %s %s\n", name, mintues,
+ String.format(format, args), toMpsString(nBytes, now)));
+ }
+ }
+
+ void end(final long nBytes) {
+ final long now = System.nanoTime();
+ final double seconds = (now - startTime)/1000000000.0;
+ LOG.info(String.format("\n\n%s END: duration=%.2fs %s\n",
+ name, seconds, toMpsString(nBytes, now)));
+ }
+
+ String toMpsString(final long nBytes, final long now) {
+ final double mb = nBytes/(double)(1<<20);
+ final double mps = mb*1000000000.0/(now - startTime);
+ return String.format("[nBytes=%.2fMB, speed=%.2fMB/s]", mb, mps);
+ }
+ }
+
+ @Test
+ public void testLargeFile() throws Exception {
+ largeFileTest(200L << 20); //200MB file length
+ }
+
+ /** Test read and write large files. */
+ static void largeFileTest(final long fileLength) throws Exception {
+ final Configuration conf = WebHdfsTestUtil.createConf();
+
+ final MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+ try {
+ cluster.waitActive();
+
+ final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
+ final Path dir = new Path("/test/largeFile");
+ Assert.assertTrue(fs.mkdirs(dir));
+
+ final byte[] data = new byte[1 << 20];
+ RANDOM.nextBytes(data);
+
+ final byte[] expected = new byte[2 * data.length];
+ System.arraycopy(data, 0, expected, 0, data.length);
+ System.arraycopy(data, 0, expected, data.length, data.length);
+
+ final Path p = new Path(dir, "file");
+ final Ticker t = new Ticker("WRITE", "fileLength=" + fileLength);
+ final FSDataOutputStream out = fs.create(p);
+ try {
+ long remaining = fileLength;
+ for(; remaining > 0;) {
+ t.tick(fileLength - remaining, "remaining=%d", remaining);
+
+ final int n = (int)Math.min(remaining, data.length);
+ out.write(data, 0, n);
+ remaining -= n;
+ }
+ } finally {
+ out.close();
+ }
+ t.end(fileLength);
+
+ Assert.assertEquals(fileLength, fs.getFileStatus(p).getLen());
+
+ final long smallOffset = RANDOM.nextInt(1 << 20) + (1 << 20);
+ final long largeOffset = fileLength - smallOffset;
+ final byte[] buf = new byte[data.length];
+
+ verifySeek(fs, p, largeOffset, fileLength, buf, expected);
+ verifySeek(fs, p, smallOffset, fileLength, buf, expected);
+
+ verifyPread(fs, p, largeOffset, fileLength, buf, expected);
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ static void checkData(long offset, long remaining, int n,
+ byte[] actual, byte[] expected) {
+ if (RANDOM.nextInt(100) == 0) {
+ int j = (int)(offset % actual.length);
+ for(int i = 0; i < n; i++) {
+ if (expected[j] != actual[i]) {
+ Assert.fail("expected[" + j + "]=" + expected[j]
+ + " != actual[" + i + "]=" + actual[i]
+ + ", offset=" + offset + ", remaining=" + remaining + ", n=" + n);
+ }
+ j++;
+ }
+ }
+ }
+
+ /** test seek */
+ static void verifySeek(FileSystem fs, Path p, long offset, long length,
+ byte[] buf, byte[] expected) throws IOException {
+ long remaining = length - offset;
+ long checked = 0;
+ LOG.info("XXX SEEK: offset=" + offset + ", remaining=" + remaining);
+
+ final Ticker t = new Ticker("SEEK", "offset=%d, remaining=%d",
+ offset, remaining);
+ final FSDataInputStream in = fs.open(p, 64 << 10);
+ in.seek(offset);
+ for(; remaining > 0; ) {
+ t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
+ final int n = (int)Math.min(remaining, buf.length);
+ in.readFully(buf, 0, n);
+ checkData(offset, remaining, n, buf, expected);
+
+ offset += n;
+ remaining -= n;
+ checked += n;
+ }
+ in.close();
+ t.end(checked);
+ }
+
+ static void verifyPread(FileSystem fs, Path p, long offset, long length,
+ byte[] buf, byte[] expected) throws IOException {
+ long remaining = length - offset;
+ long checked = 0;
+ LOG.info("XXX PREAD: offset=" + offset + ", remaining=" + remaining);
+
+ final Ticker t = new Ticker("PREAD", "offset=%d, remaining=%d",
+ offset, remaining);
+ final FSDataInputStream in = fs.open(p, 64 << 10);
+ for(; remaining > 0; ) {
+ t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
+ final int n = (int)Math.min(remaining, buf.length);
+ in.readFully(offset, buf, 0, n);
+ checkData(offset, remaining, n, buf, expected);
+
+ offset += n;
+ remaining -= n;
+ checked += n;
+ }
+ in.close();
+ t.end(checked);
+ }
+
+ /** Test client retry with namenode restarting. */
+ @Test
+ public void testNamenodeRestart() throws Exception {
+ ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
+ final Configuration conf = WebHdfsTestUtil.createConf();
+ TestDFSClientRetries.namenodeRestartTest(conf, true);
+ }
+}
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Fri Jun 21 06:37:27 2013
@@ -25,6 +25,7 @@ import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Map;
+import java.util.Random;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
@@ -212,15 +213,20 @@ public class TestWebHdfsFileSystemContra
assertEquals(0, count);
}
+ final byte[] mydata = new byte[1 << 20];
+ new Random().nextBytes(mydata);
+
final Path p = new Path(dir, "file");
- createFile(p);
+ FSDataOutputStream out = fs.create(p, false, 4096, (short)3, 1L << 17);
+ out.write(mydata);
+ out.close();
- final int one_third = data.length/3;
+ final int one_third = mydata.length/3;
final int two_third = one_third*2;
{ //test seek
final int offset = one_third;
- final int len = data.length - offset;
+ final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
@@ -232,13 +238,13 @@ public class TestWebHdfsFileSystemContra
for (int i = 0; i < buf.length; i++) {
assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
- data[i + offset], buf[i]);
+ mydata[i + offset], buf[i]);
}
}
{ //test position read (read the data after the two_third location)
final int offset = two_third;
- final int len = data.length - offset;
+ final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
@@ -247,7 +253,7 @@ public class TestWebHdfsFileSystemContra
for (int i = 0; i < buf.length; i++) {
assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
- data[i + offset], buf[i]);
+ mydata[i + offset], buf[i]);
}
}
}
@@ -293,6 +299,10 @@ public class TestWebHdfsFileSystemContra
final Path root = new Path("/");
final Path dir = new Path("/test/testUrl");
assertTrue(webhdfs.mkdirs(dir));
+ final Path file = new Path("/test/file");
+ final FSDataOutputStream out = webhdfs.create(file);
+ out.write(1);
+ out.close();
{//test GETHOMEDIRECTORY
final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root);
@@ -354,5 +364,21 @@ public class TestWebHdfsFileSystemContra
assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
conn.disconnect();
}
+
+ {//test jsonParse with non-json type.
+ final HttpOpParam.Op op = GetOpParam.Op.OPEN;
+ final URL url = webhdfs.toUrl(op, file);
+ final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod(op.getType().toString());
+ conn.connect();
+
+ try {
+ WebHdfsFileSystem.jsonParse(conn, false);
+ fail();
+ } catch(IOException ioe) {
+ WebHdfsFileSystem.LOG.info("GOOD", ioe);
+ }
+ conn.disconnect();
+ }
}
}
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java Fri Jun 21 06:37:27 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.security.UserGroupInformation;
@@ -39,6 +40,12 @@ import org.junit.Assert;
public class WebHdfsTestUtil {
public static final Log LOG = LogFactory.getLog(WebHdfsTestUtil.class);
+ public static Configuration createConf() {
+ final Configuration conf = new Configuration();
+ conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
+ return conf;
+ }
+
public static WebHdfsFileSystem getWebHdfsFileSystem(final Configuration conf
) throws IOException, URISyntaxException {
final String uri = WebHdfsFileSystem.SCHEME + "://"
@@ -48,7 +55,7 @@ public class WebHdfsTestUtil {
public static WebHdfsFileSystem getWebHdfsFileSystemAs(
final UserGroupInformation ugi, final Configuration conf
- ) throws IOException, URISyntaxException, InterruptedException {
+ ) throws IOException, InterruptedException {
return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
@Override
public WebHdfsFileSystem run() throws Exception {
@@ -69,16 +76,12 @@ public class WebHdfsTestUtil {
final int expectedResponseCode) throws IOException {
conn.connect();
Assert.assertEquals(expectedResponseCode, conn.getResponseCode());
- return WebHdfsFileSystem.jsonParse(conn.getInputStream());
+ return WebHdfsFileSystem.jsonParse(conn, false);
}
- public static HttpURLConnection twoStepWrite(HttpURLConnection conn,
- final HttpOpParam.Op op) throws IOException {
- conn.setRequestMethod(op.getType().toString());
- conn = WebHdfsFileSystem.twoStepWrite(conn, op);
- conn.setDoOutput(true);
- conn.connect();
- return conn;
+ public static HttpURLConnection twoStepWrite(final WebHdfsFileSystem webhdfs,
+ final HttpOpParam.Op op, HttpURLConnection conn) throws IOException {
+ return webhdfs.new Runner(op, conn).twoStepWrite();
}
public static FSDataOutputStream write(final WebHdfsFileSystem webhdfs,
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/http/TestHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/http/TestHttpServer.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/http/TestHttpServer.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/http/TestHttpServer.java Fri Jun 21 06:37:27 2013
@@ -37,6 +37,7 @@ import java.util.TreeSet;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
@@ -52,12 +53,16 @@ import org.apache.hadoop.fs.CommonConfig
import org.apache.hadoop.http.resource.JerseyResource;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.Mockito;
import org.mortbay.util.ajax.JSON;
+
public class TestHttpServer {
static final Log LOG = LogFactory.getLog(TestHttpServer.class);
@@ -305,6 +310,9 @@ public class TestHttpServer {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
true);
+ conf.setBoolean(
+ CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+ true);
conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
DummyFilterInitializer.class.getName());
@@ -349,4 +357,72 @@ public class TestHttpServer {
assertEquals("bar", m.get(JerseyResource.OP));
LOG.info("END testJersey()");
}
+
+ @Test
+ public void testHasAdministratorAccess() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false);
+ ServletContext context = Mockito.mock(ServletContext.class);
+ Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+ Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(null);
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ Mockito.when(request.getRemoteUser()).thenReturn(null);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+ //authorization OFF
+ Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+
+ //authorization ON & user NULL
+ response = Mockito.mock(HttpServletResponse.class);
+ conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
+ Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+ Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
+
+ //authorization ON & user NOT NULL & ACLs NULL
+ response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(request.getRemoteUser()).thenReturn("foo");
+ Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+
+ //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs
+ response = Mockito.mock(HttpServletResponse.class);
+ AccessControlList acls = Mockito.mock(AccessControlList.class);
+ Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
+ Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+ Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+ Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
+
+ //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs
+ response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true);
+ Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+ Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+
+ }
+
+ @Test
+ public void testRequiresAuthorizationAccess() throws Exception {
+ Configuration conf = new Configuration();
+ ServletContext context = Mockito.mock(ServletContext.class);
+ Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE))
+ .thenReturn(conf);
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+ // requires admin access to instrumentation, FALSE by default
+ Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context,
+ request, response));
+
+ // requires admin access to instrumentation, TRUE
+ conf.setBoolean(
+ CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+ true);
+ conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
+ AccessControlList acls = Mockito.mock(AccessControlList.class);
+ Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation> any()))
+ .thenReturn(false);
+ Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+ Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context,
+ request, response));
+ }
+
}
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/http/TestServletFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/http/TestServletFilter.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/http/TestServletFilter.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/http/TestServletFilter.java Fri Jun 21 06:37:27 2013
@@ -35,6 +35,7 @@ import javax.servlet.http.HttpServletReq
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
public class TestServletFilter extends junit.framework.TestCase {
static final Log LOG = LogFactory.getLog(HttpServer.class);
@@ -44,7 +45,7 @@ public class TestServletFilter extends j
static public class SimpleFilter implements Filter {
private FilterConfig filterConfig = null;
- public void init(FilterConfig filterConfig) {
+ public void init(FilterConfig filterConfig) throws ServletException {
this.filterConfig = filterConfig;
}
@@ -135,4 +136,53 @@ public class TestServletFilter extends j
http.stop();
}
}
+
+ static private class ErrorFilter extends SimpleFilter {
+ @Override
+ public void init(FilterConfig arg0) throws ServletException {
+ throw new ServletException("Throwing the exception from Filter init");
+ }
+
+ /** Configuration for the filter */
+ static public class Initializer extends FilterInitializer {
+ public void initFilter(FilterContainer container, Configuration conf) {
+ container.addFilter("simple", ErrorFilter.class.getName(), null);
+ }
+ }
+ }
+
+ @Test
+ public void testServletFilterWhenInitThrowsException() throws Exception {
+ Configuration conf = new Configuration();
+ // start a http server with ErrorFilter
+ conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+ ErrorFilter.Initializer.class.getName());
+ HttpServer http = new HttpServer("datanode", "localhost", 0, true, conf);
+ try {
+ http.start();
+ fail("expecting exception");
+ } catch (IOException e) {
+ assertTrue(e.getMessage().contains(
+ "Problem in starting http server. Server handlers failed"));
+ }
+ }
+
+ /**
+ * Similar to the above test case, except that it uses a different API to add
+ * the filter. Regression test for HADOOP-8786.
+ */
+ @Test
+ public void testContextSpecificServletFilterWhenInitThrowsException()
+ throws Exception {
+ Configuration conf = new Configuration();
+ HttpServer http = new HttpServer("datanode", "localhost", 0, true, conf);
+ http.defineFilter(http.webAppContext, "ErrorFilter", ErrorFilter.class
+ .getName(), null, null);
+ try {
+ http.start();
+ fail("expecting exception");
+ } catch (IOException e) {
+ assertTrue(e.getMessage().contains("Unable to initialize WebAppContext"));
+ }
+ }
}
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestBloomMapFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestBloomMapFile.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestBloomMapFile.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestBloomMapFile.java Fri Jun 21 06:37:27 2013
@@ -18,6 +18,10 @@
package org.apache.hadoop.io;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -67,4 +71,41 @@ public class TestBloomMapFile extends Te
assertTrue(falsePos < 2);
}
+ private void checkMembershipVaryingSizedKeys(String name, List<Text> keys) throws Exception {
+ Path dirName = new Path(System.getProperty("test.build.data",".") +
+ name + ".bloommapfile");
+ FileSystem fs = FileSystem.getLocal(conf);
+ Path qualifiedDirName = fs.makeQualified(dirName);
+ BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs,
+ qualifiedDirName.toString(), Text.class, NullWritable.class);
+ for (Text key : keys) {
+ writer.append(key, NullWritable.get());
+ }
+ writer.close();
+
+ // will check for membership in the opposite order of how keys were inserted
+ BloomMapFile.Reader reader = new BloomMapFile.Reader(fs,
+ qualifiedDirName.toString(), conf);
+ Collections.reverse(keys);
+ for (Text key : keys) {
+ assertTrue("False negative for existing key " + key, reader.probablyHasKey(key));
+ }
+ reader.close();
+ fs.delete(qualifiedDirName, true);
+ }
+
+ public void testMembershipVaryingSizedKeysTest1() throws Exception {
+ ArrayList<Text> list = new ArrayList<Text>();
+ list.add(new Text("A"));
+ list.add(new Text("BB"));
+ checkMembershipVaryingSizedKeys(getName(), list);
+ }
+
+ public void testMembershipVaryingSizedKeysTest2() throws Exception {
+ ArrayList<Text> list = new ArrayList<Text>();
+ list.add(new Text("AA"));
+ list.add(new Text("B"));
+ checkMembershipVaryingSizedKeys(getName(), list);
+ }
+
}
Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestIOUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestIOUtils.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestIOUtils.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestIOUtils.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class TestIOUtils {
+
+ @Test
+ public void testWrappedReadForCompressedData() throws IOException {
+ byte[] buf = new byte[2];
+ InputStream mockStream = Mockito.mock(InputStream.class);
+ Mockito.when(mockStream.read(buf, 0, 1)).thenReturn(1);
+ Mockito.when(mockStream.read(buf, 0, 2)).thenThrow(
+ new java.lang.InternalError());
+
+ try {
+ assertEquals("Check expected value", 1,
+ IOUtils.wrappedReadForCompressedData(mockStream, buf, 0, 1));
+ } catch (IOException ioe) {
+ fail("Unexpected error while reading");
+ }
+ try {
+ IOUtils.wrappedReadForCompressedData(mockStream, buf, 0, 2);
+ } catch (IOException ioe) {
+ assertTrue("Unexpected exception caught",
+ ioe.getMessage().contains("Error while reading compressed data"));
+ }
+ }
+}
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestSortedMapWritable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestSortedMapWritable.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestSortedMapWritable.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestSortedMapWritable.java Fri Jun 21 06:37:27 2013
@@ -19,15 +19,21 @@
*/
package org.apache.hadoop.io;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
import java.util.Map;
-import junit.framework.TestCase;
+import org.junit.Test;
/**
* Tests SortedMapWritable
*/
-public class TestSortedMapWritable extends TestCase {
+public class TestSortedMapWritable {
/** the test */
+ @Test
@SuppressWarnings("unchecked")
public void testSortedMapWritable() {
Text[] keys = {
@@ -92,6 +98,7 @@ public class TestSortedMapWritable exten
/**
* Test that number of "unknown" classes is propagated across multiple copies.
*/
+ @Test
@SuppressWarnings("deprecation")
public void testForeignClass() {
SortedMapWritable inMap = new SortedMapWritable();
@@ -101,4 +108,74 @@ public class TestSortedMapWritable exten
SortedMapWritable copyOfCopy = new SortedMapWritable(outMap);
assertEquals(1, copyOfCopy.getNewClasses());
}
+
+ /**
+ * Tests if equal and hashCode method still hold the contract.
+ */
+ @Test
+ public void testEqualsAndHashCode() {
+ String failureReason;
+ SortedMapWritable mapA = new SortedMapWritable();
+ SortedMapWritable mapB = new SortedMapWritable();
+
+ // Sanity checks
+ failureReason = "SortedMapWritable couldn't be initialized. Got null reference";
+ assertNotNull(failureReason, mapA);
+ assertNotNull(failureReason, mapB);
+
+ // Basic null check
+ assertFalse("equals method returns true when passed null",
+ mapA.equals(null));
+
+ // When entry set is empty, they should be equal
+ assertTrue("Two empty SortedMapWritables are no longer equal",
+ mapA.equals(mapB));
+
+ // Setup
+ Text[] keys = { new Text("key1"), new Text("key2") };
+
+ BytesWritable[] values = { new BytesWritable("value1".getBytes()),
+ new BytesWritable("value2".getBytes()) };
+
+ mapA.put(keys[0], values[0]);
+ mapB.put(keys[1], values[1]);
+
+ // entrySets are different
+ failureReason = "Two SortedMapWritables with different data are now equal";
+ assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
+ assertTrue(failureReason, !mapA.equals(mapB));
+ assertTrue(failureReason, !mapB.equals(mapA));
+
+ mapA.put(keys[1], values[1]);
+ mapB.put(keys[0], values[0]);
+
+ // entrySets are now same
+ failureReason = "Two SortedMapWritables with same entry sets formed in different order are now different";
+ assertEquals(failureReason, mapA.hashCode(), mapB.hashCode());
+ assertTrue(failureReason, mapA.equals(mapB));
+ assertTrue(failureReason, mapB.equals(mapA));
+
+ // Let's check if entry sets of same keys but different values
+ mapA.put(keys[0], values[1]);
+ mapA.put(keys[1], values[0]);
+
+ failureReason = "Two SortedMapWritables with different content are now equal";
+ assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
+ assertTrue(failureReason, !mapA.equals(mapB));
+ assertTrue(failureReason, !mapB.equals(mapA));
+ }
+
+ @Test(timeout = 1000)
+ public void testPutAll() {
+ SortedMapWritable map1 = new SortedMapWritable();
+ SortedMapWritable map2 = new SortedMapWritable();
+ map1.put(new Text("key"), new Text("value"));
+ map2.putAll(map1);
+
+ assertEquals("map1 entries don't match map2 entries", map1, map2);
+ assertTrue(
+ "map2 doesn't have class information from map1",
+ map2.classToIdMap.containsKey(Text.class)
+ && map2.idToClassMap.containsValue(Text.class));
+ }
}
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestUTF8.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestUTF8.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestUTF8.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/TestUTF8.java Fri Jun 21 06:37:27 2013
@@ -19,16 +19,22 @@
package org.apache.hadoop.io;
import junit.framework.TestCase;
+import java.io.IOException;
+import java.io.UTFDataFormatException;
import java.util.Random;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
+
/** Unit tests for UTF8. */
+@SuppressWarnings("deprecation")
public class TestUTF8 extends TestCase {
public TestUTF8(String name) { super(name); }
private static final Random RANDOM = new Random();
public static String getTestString() throws Exception {
- StringBuffer buffer = new StringBuffer();
+ StringBuilder buffer = new StringBuilder();
int length = RANDOM.nextInt(100);
for (int i = 0; i < length; i++) {
buffer.append((char)(RANDOM.nextInt(Character.MAX_VALUE)));
@@ -37,13 +43,13 @@ public class TestUTF8 extends TestCase {
}
public void testWritable() throws Exception {
- for (int i = 0; i < 10; i++) {
+ for (int i = 0; i < 10000; i++) {
TestWritable.testWritable(new UTF8(getTestString()));
}
}
public void testGetBytes() throws Exception {
- for (int i = 0; i < 10; i++) {
+ for (int i = 0; i < 10000; i++) {
// generate a random string
String before = getTestString();
@@ -57,7 +63,7 @@ public class TestUTF8 extends TestCase {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
- for (int i = 0; i < 10; i++) {
+ for (int i = 0; i < 10000; i++) {
// generate a random string
String before = getTestString();
@@ -68,19 +74,96 @@ public class TestUTF8 extends TestCase {
// test that it reads correctly
in.reset(out.getData(), out.getLength());
String after = UTF8.readString(in);
- assertTrue(before.equals(after));
+ assertEquals(before, after);
// test that it reads correctly with DataInput
in.reset(out.getData(), out.getLength());
String after2 = in.readUTF();
- assertTrue(before.equals(after2));
+ assertEquals(before, after2);
// test that it is compatible with Java's other decoder
String after3 = new String(out.getData(), 2, out.getLength()-2, "UTF-8");
- assertTrue(before.equals(after3));
+ assertEquals(before, after3);
}
}
-
+
+ public void testNullEncoding() throws Exception {
+ String s = new String(new char[] { 0 });
+
+ DataOutputBuffer dob = new DataOutputBuffer();
+ new UTF8(s).write(dob);
+
+ assertEquals(s, new String(dob.getData(), 2, dob.getLength()-2, "UTF-8"));
+ }
+
+ /**
+ * Test encoding and decoding of UTF8 outside the basic multilingual plane.
+ *
+ * This is a regression test for HADOOP-9103.
+ */
+ public void testNonBasicMultilingualPlane() throws Exception {
+ // Test using the "CAT FACE" character (U+1F431)
+ // See http://www.fileformat.info/info/unicode/char/1f431/index.htm
+ String catFace = "\uD83D\uDC31";
+
+ // This encodes to 4 bytes in UTF-8:
+ byte[] encoded = catFace.getBytes("UTF-8");
+ assertEquals(4, encoded.length);
+ assertEquals("f09f90b1", StringUtils.byteToHexString(encoded));
+
+ // Decode back to String using our own decoder
+ String roundTrip = UTF8.fromBytes(encoded);
+ assertEquals(catFace, roundTrip);
+ }
+
+ /**
+ * Test that decoding invalid UTF8 throws an appropriate error message.
+ */
+ public void testInvalidUTF8() throws Exception {
+ byte[] invalid = new byte[] {
+ 0x01, 0x02, (byte)0xff, (byte)0xff, 0x01, 0x02, 0x03, 0x04, 0x05 };
+ try {
+ UTF8.fromBytes(invalid);
+ fail("did not throw an exception");
+ } catch (UTFDataFormatException utfde) {
+ GenericTestUtils.assertExceptionContains(
+ "Invalid UTF8 at ffff01020304", utfde);
+ }
+ }
+
+ /**
+ * Test for a 5-byte UTF8 sequence, which is now considered illegal.
+ */
+ public void test5ByteUtf8Sequence() throws Exception {
+ byte[] invalid = new byte[] {
+ 0x01, 0x02, (byte)0xf8, (byte)0x88, (byte)0x80,
+ (byte)0x80, (byte)0x80, 0x04, 0x05 };
+ try {
+ UTF8.fromBytes(invalid);
+ fail("did not throw an exception");
+ } catch (UTFDataFormatException utfde) {
+ GenericTestUtils.assertExceptionContains(
+ "Invalid UTF8 at f88880808004", utfde);
+ }
+ }
+
+ /**
+ * Test that decoding invalid UTF8 due to truncation yields the correct
+ * exception type.
+ */
+ public void testInvalidUTF8Truncated() throws Exception {
+ // Truncated CAT FACE character -- this is a 4-byte sequence, but we
+ // only have the first three bytes.
+ byte[] truncated = new byte[] {
+ (byte)0xF0, (byte)0x9F, (byte)0x90 };
+ try {
+ UTF8.fromBytes(truncated);
+ fail("did not throw an exception");
+ } catch (UTFDataFormatException utfde) {
+ GenericTestUtils.assertExceptionContains(
+ "Truncated UTF8 at f09f90", utfde);
+ }
+ }
}
Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/compress/TestCodec.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/compress/TestCodec.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/compress/TestCodec.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/io/compress/TestCodec.java Fri Jun 21 06:37:27 2013
@@ -21,6 +21,7 @@ import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
+import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
@@ -37,16 +38,17 @@ import java.util.Random;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
-import junit.framework.TestCase;
-
+import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.RandomDatum;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
@@ -59,32 +61,39 @@ import org.apache.hadoop.io.compress.zli
import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.ReflectionUtils;
-public class TestCodec extends TestCase {
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestCodec {
private static final Log LOG= LogFactory.getLog(TestCodec.class);
private Configuration conf = new Configuration();
private int count = 10000;
private int seed = new Random().nextInt();
-
+
+ @Test
public void testDefaultCodec() throws IOException {
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.DefaultCodec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.DefaultCodec");
}
-
+
+ @Test
public void testGzipCodec() throws IOException {
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
}
-
+
+ @Test
public void testBZip2Codec() throws IOException {
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.BZip2Codec");
}
-
+ @Test
public void testSnappyCodec() throws IOException {
if (LoadSnappy.isAvailable()) {
if (LoadSnappy.isLoaded()) {
@@ -97,6 +106,7 @@ public class TestCodec extends TestCase
}
}
+ @Test
public void testGzipCodecWithParam() throws IOException {
Configuration conf = new Configuration(this.conf);
ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
@@ -130,10 +140,7 @@ public class TestCodec extends TestCase
key.write(data);
value.write(data);
}
- DataInputBuffer originalData = new DataInputBuffer();
- DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData));
- originalData.reset(data.getData(), 0, data.getLength());
-
+
LOG.info("Generated " + count + " records");
// Compress data
@@ -157,6 +164,9 @@ public class TestCodec extends TestCase
new DataInputStream(new BufferedInputStream(inflateFilter));
// Check
+ DataInputBuffer originalData = new DataInputBuffer();
+ originalData.reset(data.getData(), 0, data.getLength());
+ DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData));
for(int i=0; i < count; ++i) {
RandomDatum k1 = new RandomDatum();
RandomDatum v1 = new RandomDatum();
@@ -170,9 +180,129 @@ public class TestCodec extends TestCase
assertTrue("original and compressed-then-decompressed-output not equal",
k1.equals(k2) && v1.equals(v2));
}
+
+ // De-compress data byte-at-a-time
+ originalData.reset(data.getData(), 0, data.getLength());
+ deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
+ compressedDataBuffer.getLength());
+ inflateFilter =
+ codec.createInputStream(deCompressedDataBuffer);
+
+ // Check
+ originalIn = new DataInputStream(new BufferedInputStream(originalData));
+ int expected;
+ do {
+ expected = originalIn.read();
+ assertEquals("Inflated stream read by byte does not match",
+ expected, inflateFilter.read());
+ } while (expected != -1);
+
LOG.info("SUCCESS! Completed checking " + count + " records");
}
+ @Test
+ public void testSplitableCodecs() throws Exception {
+ testSplitableCodec(BZip2Codec.class);
+ }
+
+ private void testSplitableCodec(
+ Class<? extends SplittableCompressionCodec> codecClass)
+ throws IOException {
+ final long DEFLBYTES = 2 * 1024 * 1024;
+ final Configuration conf = new Configuration();
+ final Random rand = new Random();
+ final long seed = rand.nextLong();
+ LOG.info("seed: " + seed);
+ rand.setSeed(seed);
+ SplittableCompressionCodec codec =
+ ReflectionUtils.newInstance(codecClass, conf);
+ final FileSystem fs = FileSystem.getLocal(conf);
+ final FileStatus infile =
+ fs.getFileStatus(writeSplitTestFile(fs, rand, codec, DEFLBYTES));
+ if (infile.getLen() > Integer.MAX_VALUE) {
+ fail("Unexpected compression: " + DEFLBYTES + " -> " + infile.getLen());
+ }
+ final int flen = (int) infile.getLen();
+ final Text line = new Text();
+ final Decompressor dcmp = CodecPool.getDecompressor(codec);
+ try {
+ for (int pos = 0; pos < infile.getLen(); pos += rand.nextInt(flen / 8)) {
+ // read from random positions, verifying that there exist two sequential
+ // lines as written in writeSplitTestFile
+ final SplitCompressionInputStream in =
+ codec.createInputStream(fs.open(infile.getPath()), dcmp,
+ pos, flen, SplittableCompressionCodec.READ_MODE.BYBLOCK);
+ if (in.getAdjustedStart() >= flen) {
+ break;
+ }
+ LOG.info("SAMPLE " + in.getAdjustedStart() + "," + in.getAdjustedEnd());
+ final LineReader lreader = new LineReader(in);
+ lreader.readLine(line); // ignore; likely partial
+ if (in.getPos() >= flen) {
+ break;
+ }
+ lreader.readLine(line);
+ final int seq1 = readLeadingInt(line);
+ lreader.readLine(line);
+ if (in.getPos() >= flen) {
+ break;
+ }
+ final int seq2 = readLeadingInt(line);
+ assertEquals("Mismatched lines", seq1 + 1, seq2);
+ }
+ } finally {
+ CodecPool.returnDecompressor(dcmp);
+ }
+ // remove on success
+ fs.delete(infile.getPath().getParent(), true);
+ }
+
+ private static int readLeadingInt(Text txt) throws IOException {
+ DataInputStream in =
+ new DataInputStream(new ByteArrayInputStream(txt.getBytes()));
+ return in.readInt();
+ }
+
+ /** Write infLen bytes (deflated) to file in test dir using codec.
+ * Records are of the form
+ * <i><b64 rand><i+i><b64 rand>
+ */
+ private static Path writeSplitTestFile(FileSystem fs, Random rand,
+ CompressionCodec codec, long infLen) throws IOException {
+ final int REC_SIZE = 1024;
+ final Path wd = new Path(new Path(
+ System.getProperty("test.build.data", "/tmp")).makeQualified(fs),
+ codec.getClass().getSimpleName());
+ final Path file = new Path(wd, "test" + codec.getDefaultExtension());
+ final byte[] b = new byte[REC_SIZE];
+ final Base64 b64 = new Base64(0, null);
+ DataOutputStream fout = null;
+ Compressor cmp = CodecPool.getCompressor(codec);
+ try {
+ fout = new DataOutputStream(codec.createOutputStream(
+ fs.create(file, true), cmp));
+ final DataOutputBuffer dob = new DataOutputBuffer(REC_SIZE * 4 / 3 + 4);
+ int seq = 0;
+ while (infLen > 0) {
+ rand.nextBytes(b);
+ final byte[] b64enc = b64.encode(b); // ensures rand printable, no LF
+ dob.reset();
+ dob.writeInt(seq);
+ System.arraycopy(dob.getData(), 0, b64enc, 0, dob.getLength());
+ fout.write(b64enc);
+ fout.write('\n');
+ ++seq;
+ infLen -= b64enc.length;
+ }
+ LOG.info("Wrote " + seq + " records to " + file);
+ } finally {
+ IOUtils.cleanup(LOG, fout);
+ CodecPool.returnCompressor(cmp);
+ }
+ return file;
+ }
+
+ @Test
public void testCodecPoolGzipReuse() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean("hadoop.native.lib", true);
@@ -257,6 +387,7 @@ public class TestCodec extends TestCase
outbytes.length >= b.length);
}
+ @Test
public void testCodecInitWithCompressionLevel() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean("io.native.lib.available", true);
@@ -276,6 +407,7 @@ public class TestCodec extends TestCase
"org.apache.hadoop.io.compress.DefaultCodec");
}
+ @Test
public void testCodecPoolCompressorReinit() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean("hadoop.native.lib", true);
@@ -289,13 +421,15 @@ public class TestCodec extends TestCase
DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
gzipReinitTest(conf, dfc);
}
-
+
+ @Test
public void testSequenceFileDefaultCodec() throws IOException, ClassNotFoundException,
InstantiationException, IllegalAccessException {
sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.DefaultCodec", 100);
sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.DefaultCodec", 1000000);
}
-
+
+ @Test
public void testSequenceFileBZip2Codec() throws IOException, ClassNotFoundException,
InstantiationException, IllegalAccessException {
sequenceFileCodecTest(conf, 0, "org.apache.hadoop.io.compress.BZip2Codec", 100);
@@ -383,6 +517,7 @@ public class TestCodec extends TestCase
}
+ @Test
public void testGzipCompatibility() throws IOException {
Random r = new Random();
long seed = r.nextLong();
@@ -450,12 +585,14 @@ public class TestCodec extends TestCase
assertTrue(java.util.Arrays.equals(chk, dflchk));
}
+ @Test
public void testBuiltInGzipConcat() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("hadoop.native.lib", false);
GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
+ @Test
public void testNativeGzipConcat() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("hadoop.native.lib", true);
@@ -466,10 +603,7 @@ public class TestCodec extends TestCase
GzipConcatTest(conf, GzipCodec.GzipZlibDecompressor.class);
}
- public TestCodec(String name) {
- super(name);
- }
-
+ @Test
public void testCodecPoolAndGzipDecompressor() {
// BuiltInZlibInflater should not be used as the GzipCodec decompressor.
// Assert that this is the case.
@@ -520,6 +654,7 @@ public class TestCodec extends TestCase
}
}
+ @Test
public void testGzipCodecRead() throws IOException {
// Create a gzipped file and try to read it back, using a decompressor
// from the CodecPool.
@@ -572,6 +707,7 @@ public class TestCodec extends TestCase
}
}
+ @Test
public void testGzipCodecWrite() throws IOException {
// Create a gzipped file using a compressor from the CodecPool,
// and try to read it back via the regular GZIPInputStream.
@@ -623,6 +759,60 @@ public class TestCodec extends TestCase
verifyGzipFile(fileName, msg);
}
+ /**
+ * Regression test for HADOOP-8423: seeking in a block-compressed
+ * stream would not properly reset the block decompressor state.
+ */
+ @Test
+ public void testSnappyMapFile() throws Exception {
+ if (SnappyCodec.isNativeSnappyLoaded(conf)) {
+ codecTestMapFile(SnappyCodec.class, CompressionType.BLOCK, 100);
+ } else {
+ System.err.println(
+ "Could not find the snappy codec to test MapFiles with!");
+ }
+ }
+
+ private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
+ CompressionType type, int records) throws Exception {
+ FileSystem fs = FileSystem.get(conf);
+ LOG.info("Creating MapFiles with " + records +
+ " records using codec " + clazz.getSimpleName());
+ Path path = new Path(new Path(
+ System.getProperty("test.build.data", "/tmp")),
+ clazz.getSimpleName() + "-" + type + "-" + records);
+
+ LOG.info("Writing " + path);
+ createMapFile(conf, fs, path, clazz.newInstance(), type, records);
+ MapFile.Reader reader = new MapFile.Reader(fs, path.toString(), conf);
+ Text key1 = new Text("002");
+ assertNotNull(reader.get(key1, new Text()));
+ Text key2 = new Text("004");
+ assertNotNull(reader.get(key2, new Text()));
+ }
+
+ private static void createMapFile(Configuration conf, FileSystem fs,
+ Path path, CompressionCodec codec, CompressionType type, int records)
+ throws IOException {
+ MapFile.Writer writer =
+ new MapFile.Writer(
+ conf,
+ fs,
+ path.toString(),
+ Text.class,
+ Text.class,
+ type,
+ codec,
+ null);
+ Text key = new Text();
+ for (int j = 0; j < records; j++) {
+ key.set(String.format("%03d", j));
+ writer.append(key, key);
+ }
+ writer.close();
+ }
+
+ @Test
public void testGzipLongOverflow() throws IOException {
LOG.info("testGzipLongOverflow");