You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/31 19:34:57 UTC
svn commit: r1404285 - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./
src/contrib/libwebhdfs/src/ src/main/java/
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/main/java/org/apache/hadoop/hdfs/server/namenode...
Author: szetszwo
Date: Wed Oct 31 18:34:51 2012
New Revision: 1404285
URL: http://svn.apache.org/viewvc?rev=1404285&view=rev
Log:
Merge r1403306 through r1404284 from trunk.
Added:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
- copied unchanged from r1404284, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
Removed:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1403306-1404284
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Oct 31 18:34:51 2012
@@ -103,18 +103,11 @@ Trunk (Unreleased)
HDFS-3510. Editlog pre-allocation is performed prior to writing edits
to avoid partial edits case disk out of space.(Colin McCabe via suresh)
- HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd)
-
HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay)
HDFS-3768. Exception in TestJettyHelper is incorrect.
(Eli Reisman via jghoman)
- HDFS-3695. Genericize format() to non-file JournalManagers. (todd)
-
- HDFS-3789. JournalManager#format() should be able to throw IOException
- (Ivan Kelly via todd)
-
HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
suresh)
@@ -152,12 +145,12 @@ Trunk (Unreleased)
HDFS-4110. Refine a log printed in JNStorage. (Liang Xie via suresh)
- HDFS-4122. Cleanup HDFS logs and reduce the size of logged messages.
- (suresh)
-
HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable
returningmore than INode array. (Jing Zhao via suresh)
+ HDFS-4129. Add utility methods to dump NameNode in memory tree for
+ testing. (szetszwo via suresh)
+
OPTIMIZATIONS
BUG FIXES
@@ -356,6 +349,9 @@ Release 2.0.3-alpha - Unreleased
INCOMPATIBLE CHANGES
+ HDFS-4122. Cleanup HDFS logs and reduce the size of logged messages.
+ (suresh)
+
NEW FEATURES
HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
@@ -440,6 +436,15 @@ Release 2.0.3-alpha - Unreleased
HDFS-4121. Add namespace declarations in hdfs .proto files for languages
other than java. (Binglin Chang via suresh)
+ HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd)
+
+ HDFS-3695. Genericize format() to non-file JournalManagers. (todd)
+
+ HDFS-3789. JournalManager#format() should be able to throw IOException
+ (Ivan Kelly via todd)
+
+ HDFS-3916. libwebhdfs testing code cleanup. (Jing Zhao via suresh)
+
OPTIMIZATIONS
BUG FIXES
@@ -522,6 +527,15 @@ Release 2.0.3-alpha - Unreleased
HDFS-3616. Fix a ConcurrentModificationException bug that BP actor threads
may not be shutdown properly in DataNode. (Jing Zhao via szetszwo)
+ HDFS-4127. Log message is not correct in case of short of replica.
+ (Junping Du via suresh)
+
+ HADOOP-8994. TestDFSShell creates file named "noFileHere", making further
+ tests hard to understand (Andy Isaacson via daryn)
+
+ HDFS-3809. Make BKJM use protobufs for all serialization with ZK.
+ (Ivan Kelly via umamahesh)
+
Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES
@@ -1048,8 +1062,6 @@ Release 2.0.2-alpha - 2012-09-07
HDFS-3828. Block Scanner rescans blocks too frequently.
(Andy Isaacson via eli)
- HDFS-3809. Make BKJM use protobufs for all serialization with ZK.(Ivan Kelly via umamahesh)
-
HDFS-3895. hadoop-client must include commons-cli (tucu)
HDFS-2757. Cannot read a local block that's being written to when
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c Wed Oct 31 18:34:51 2012
@@ -17,6 +17,7 @@
*/
#include "hdfs.h"
+#include "native_mini_dfs.h"
#include <inttypes.h>
#include <jni.h>
@@ -26,228 +27,254 @@
#include <time.h>
#include <unistd.h>
-void permission_disp(short permissions, char *rtr) {
+static struct NativeMiniDfsCluster *cluster;
+
+void permission_disp(short permissions, char *rtr)
+{
rtr[9] = '\0';
int i;
- for(i=2;i>=0;i--)
+ short perm;
+ for(i = 2; i >= 0; i--)
{
- short permissionsId = permissions >> (i * 3) & (short)7;
- char* perm;
- switch(permissionsId) {
- case 7:
- perm = "rwx"; break;
- case 6:
- perm = "rw-"; break;
- case 5:
- perm = "r-x"; break;
- case 4:
- perm = "r--"; break;
- case 3:
- perm = "-wx"; break;
- case 2:
- perm = "-w-"; break;
- case 1:
- perm = "--x"; break;
- case 0:
- perm = "---"; break;
- default:
- perm = "???";
- }
- strncpy(rtr, perm, 3);
- rtr+=3;
+ perm = permissions >> (i * 3);
+ rtr[0] = perm & 4 ? 'r' : '-';
+ rtr[1] = perm & 2 ? 'w' : '-';
+ rtr[2] = perm & 1 ? 'x' : '-';
+ rtr += 3;
}
}
-int main(int argc, char **argv) {
+int main(int argc, char **argv)
+{
+ char buffer[32];
+ tSize num_written_bytes;
+ const char* slashTmp = "/tmp";
+ int nnPort;
+ char *rwTemplate, *rwTemplate2, *newDirTemplate,
+ *appendTemplate, *userTemplate, *rwPath = NULL;
+ const char* fileContents = "Hello, World!";
+ const char* nnHost = NULL;
+
if (argc != 2) {
fprintf(stderr, "usage: test_libwebhdfs_ops <username>\n");
- return -1;
+ exit(1);
}
- char buffer[32];
- tSize num_written_bytes;
+ struct NativeMiniDfsConf conf = {
+ .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070,
+ };
+ cluster = nmdCreate(&conf);
+ if (!cluster) {
+ fprintf(stderr, "Failed to create the NativeMiniDfsCluster.\n");
+ exit(1);
+ }
+ if (nmdWaitClusterUp(cluster)) {
+ fprintf(stderr, "Error when waiting for cluster to be ready.\n");
+ exit(1);
+ }
+ if (nmdGetNameNodeHttpAddress(cluster, &nnPort, &nnHost)) {
+ fprintf(stderr, "Error when retrieving namenode host address.\n");
+ exit(1);
+ }
- hdfsFS fs = hdfsConnectAsUserNewInstance("default", 50070, argv[1]);
+ hdfsFS fs = hdfsConnectAsUserNewInstance(nnHost, nnPort, argv[1]);
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
- const char* writePath = "/tmp/testfile.txt";
- const char* fileContents = "Hello, World!";
-
{
- //Write tests
-
- hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+ // Write tests
+ rwTemplate = strdup("/tmp/helloWorldXXXXXX");
+ if (!rwTemplate) {
+ fprintf(stderr, "Failed to create rwTemplate!\n");
+ exit(1);
+ }
+ rwPath = mktemp(rwTemplate);
+ // hdfsOpenFile
+ hdfsFile writeFile = hdfsOpenFile(fs, rwPath,
+ O_WRONLY|O_CREAT, 0, 0, 0);
+
if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
- exit(-1);
+ fprintf(stderr, "Failed to open %s for writing!\n", rwPath);
+ exit(1);
}
- fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
- num_written_bytes = hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents) + 1);
+ fprintf(stderr, "Opened %s for writing successfully...\n", rwPath);
+ // hdfsWrite
+ num_written_bytes = hdfsWrite(fs, writeFile, (void*)fileContents,
+ (int) strlen(fileContents) + 1);
if (num_written_bytes != strlen(fileContents) + 1) {
- fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
- (int)(strlen(fileContents) + 1), (int)num_written_bytes);
- exit(-1);
+ fprintf(stderr, "Failed to write correct number of bytes - "
+ "expected %d, got %d\n",
+ (int)(strlen(fileContents) + 1), (int) num_written_bytes);
+ exit(1);
}
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+ // hdfsTell
tOffset currentPos = -1;
if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
fprintf(stderr,
- "Failed to get current file position correctly! Got %lld!\n",
- currentPos);
- exit(-1);
- }
- fprintf(stderr, "Current position: %lld\n", currentPos);
-
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
- exit(-1);
+ "Failed to get current file position correctly. Got %"
+ PRId64 "!\n", currentPos);
+ exit(1);
}
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
-
- if (hdfsHFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
- exit(-1);
- }
- fprintf(stderr, "HFlushed %s successfully!\n", writePath);
+ fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
hdfsCloseFile(fs, writeFile);
+ // Done test write
}
+ sleep(1);
+
{
//Read tests
- sleep(1);
- const char* readPath = "/tmp/testfile.txt";
- int exists = hdfsExists(fs, readPath);
+ int available = 0, exists = 0;
+ // hdfsExists
+ exists = hdfsExists(fs, rwPath);
if (exists) {
- fprintf(stderr, "Failed to validate existence of %s\n", readPath);
- exists = hdfsExists(fs, readPath);
+ fprintf(stderr, "Failed to validate existence of %s\n", rwPath);
+ exists = hdfsExists(fs, rwPath);
if (exists) {
- fprintf(stderr, "Still failed to validate existence of %s\n", readPath);
- exit(-1);
+ fprintf(stderr,
+ "Still failed to validate existence of %s\n", rwPath);
+ exit(1);
}
}
- hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+ hdfsFile readFile = hdfsOpenFile(fs, rwPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
- fprintf(stderr, "Failed to open %s for reading!\n", readPath);
- exit(-1);
+ fprintf(stderr, "Failed to open %s for reading!\n", rwPath);
+ exit(1);
}
-
if (!hdfsFileIsOpenForRead(readFile)) {
fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
"with O_RDONLY, and it did not show up as 'open for "
"read'\n");
- exit(-1);
+ exit(1);
}
- fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
+ available = hdfsAvailable(fs, readFile);
+ fprintf(stderr, "hdfsAvailable: %d\n", available);
+ // hdfsSeek, hdfsTell
tOffset seekPos = 1;
if(hdfsSeek(fs, readFile, seekPos)) {
- fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
- exit(-1);
+ fprintf(stderr, "Failed to seek %s for reading!\n", rwPath);
+ exit(1);
}
tOffset currentPos = -1;
if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
fprintf(stderr,
- "Failed to get current file position correctly! Got %lld!\n",
- currentPos);
- exit(-1);
- }
- fprintf(stderr, "Current position: %lld\n", currentPos);
-
- if (!hdfsFileUsesDirectRead(readFile)) {
- fprintf(stderr, "Direct read support incorrectly not detected "
- "for HDFS filesystem\n");
- exit(-1);
+ "Failed to get current file position correctly! Got %"
+ PRId64 "!\n", currentPos);
+
+ exit(1);
}
+ fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
- fprintf(stderr, "Direct read support detected for HDFS\n");
-
- // Test the direct read path
if(hdfsSeek(fs, readFile, 0)) {
- fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
- exit(-1);
+ fprintf(stderr, "Failed to seek %s for reading!\n", rwPath);
+ exit(1);
}
+
+ // hdfsRead
memset(buffer, 0, sizeof(buffer));
- tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
- sizeof(buffer));
+ tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer));
if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
- fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
+ fprintf(stderr, "Failed to read (direct). "
+ "Expected %s but got %s (%d bytes)\n",
fileContents, buffer, num_read_bytes);
- exit(-1);
+ exit(1);
}
fprintf(stderr, "Read following %d bytes:\n%s\n",
num_read_bytes, buffer);
+
if (hdfsSeek(fs, readFile, 0L)) {
fprintf(stderr, "Failed to seek to file start!\n");
- exit(-1);
+ exit(1);
}
- // Disable the direct read path so that we really go through the slow
- // read path
- hdfsFileDisableDirectRead(readFile);
-
- num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
- sizeof(buffer));
- fprintf(stderr, "Read following %d bytes:\n%s\n",
- num_read_bytes, buffer);
-
+ // hdfsPread
memset(buffer, 0, strlen(fileContents + 1));
-
- num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer,
- sizeof(buffer));
+ num_read_bytes = hdfsPread(fs, readFile, 0, buffer, sizeof(buffer));
fprintf(stderr, "Read following %d bytes:\n%s\n",
num_read_bytes, buffer);
hdfsCloseFile(fs, readFile);
+ // Done test read
}
int totalResult = 0;
int result = 0;
{
//Generic file-system operations
-
- const char* srcPath = "/tmp/testfile.txt";
- const char* dstPath = "/tmp/testfile2.txt";
- const char* copyPath = "/tmp/testfile_copy.txt";
- const char* movePath = "/tmp/testfile_move.txt";
-
- fprintf(stderr, "hdfsCopy: %s\n", ((result = hdfsCopy(fs, srcPath, fs, copyPath)) ? "Failed!" : "Success!"));
- totalResult += result;
- fprintf(stderr, "hdfsMove: %s\n", ((result = hdfsMove(fs, copyPath, fs, movePath)) ? "Failed!" : "Success!"));
- totalResult += result;
-
- fprintf(stderr, "hdfsGetDefaultBlockSize: %lld\n", hdfsGetDefaultBlockSize(fs));
-
- fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, srcPath, dstPath)) ? "Failed!" : "Success!"));
- totalResult += result;
- fprintf(stderr, "hdfsRename back: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
- totalResult += result;
-
- const char* slashTmp = "/tmp";
- const char* newDirectory = "/tmp/newdir";
- fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
- totalResult += result;
-
- fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 1)) ? "Failed!" : "Success!"));
- totalResult += result;
-
+ char *srcPath = rwPath;
char buffer[256];
const char *resp;
- fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
- totalResult += (resp ? 0 : 1);
- fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
+ rwTemplate2 = strdup("/tmp/helloWorld2XXXXXX");
+ if (!rwTemplate2) {
+ fprintf(stderr, "Failed to create rwTemplate2!\n");
+ exit(1);
+ }
+ char *dstPath = mktemp(rwTemplate2);
+ newDirTemplate = strdup("/tmp/newdirXXXXXX");
+ if (!newDirTemplate) {
+ fprintf(stderr, "Failed to create newDirTemplate!\n");
+ exit(1);
+ }
+ char *newDirectory = mktemp(newDirTemplate);
+
+ // hdfsRename
+ fprintf(stderr, "hdfsRename: %s\n",
+ ((result = hdfsRename(fs, rwPath, dstPath)) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+ fprintf(stderr, "hdfsRename back: %s\n",
+ ((result = hdfsRename(fs, dstPath, srcPath)) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+
+ // hdfsCreateDirectory
+ fprintf(stderr, "hdfsCreateDirectory: %s\n",
+ ((result = hdfsCreateDirectory(fs, newDirectory)) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+
+ // hdfsSetReplication
+ fprintf(stderr, "hdfsSetReplication: %s\n",
+ ((result = hdfsSetReplication(fs, srcPath, 1)) ?
+ "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+
+ // hdfsGetWorkingDirectory, hdfsSetWorkingDirectory
+ fprintf(stderr, "hdfsGetWorkingDirectory: %s\n",
+ ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ?
+ buffer : "Failed!"));
totalResult += (resp ? 0 : 1);
-
+
+ const char* path[] = {"/foo", "/foo/bar", "foobar", "//foo/bar//foobar",
+ "foo//bar", "foo/bar///", "/", "////"};
+ for (int i = 0; i < 8; i++) {
+ fprintf(stderr, "hdfsSetWorkingDirectory: %s, %s\n",
+ ((result = hdfsSetWorkingDirectory(fs, path[i])) ?
+ "Failed!" : "Success!"),
+ hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer)));
+ totalResult += result;
+ }
+
+ fprintf(stderr, "hdfsSetWorkingDirectory: %s\n",
+ ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+ fprintf(stderr, "hdfsGetWorkingDirectory: %s\n",
+ ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ?
+ buffer : "Failed!"));
+ totalResult += (resp ? 0 : 1);
+
+ // hdfsGetPathInfo
hdfsFileInfo *fileInfo = NULL;
if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
@@ -261,13 +288,15 @@ int main(int argc, char **argv) {
fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
char permissions[10];
permission_disp(fileInfo->mPermissions, permissions);
- fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
+ fprintf(stderr, "Permissions: %d (%s)\n",
+ fileInfo->mPermissions, permissions);
hdfsFreeFileInfo(fileInfo, 1);
} else {
totalResult++;
- fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
+ fprintf(stderr, "hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
}
+ // hdfsListDirectory
hdfsFileInfo *fileList = 0;
int numEntries = 0;
if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
@@ -283,7 +312,8 @@ int main(int argc, char **argv) {
fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
char permissions[10];
permission_disp(fileList[i].mPermissions, permissions);
- fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
+ fprintf(stderr, "Permissions: %d (%s)\n",
+ fileList[i].mPermissions, permissions);
}
hdfsFreeFileInfo(fileList, numEntries);
} else {
@@ -295,203 +325,220 @@ int main(int argc, char **argv) {
}
}
- // char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1);
- // if(hosts) {
- // fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
- // int i=0;
- // while(hosts[i]) {
- // int j = 0;
- // while(hosts[i][j]) {
- // fprintf(stderr,
- // "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
- // ++j;
- // }
- // ++i;
- // }
- // } else {
- // totalResult++;
- // fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
- // }
-
char *newOwner = "root";
- // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
+ // Setting tmp dir to 777 so later when connectAsUser nobody,
+ // we can write to it
short newPerm = 0666;
- // chown write
- fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!"));
- totalResult += result;
- fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!"));
+ // hdfsChown
+ fprintf(stderr, "hdfsChown: %s\n",
+ ((result = hdfsChown(fs, rwPath, NULL, "users")) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+ fprintf(stderr, "hdfsChown: %s\n",
+ ((result = hdfsChown(fs, rwPath, newOwner, NULL)) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+ // hdfsChmod
+ fprintf(stderr, "hdfsChmod: %s\n",
+ ((result = hdfsChmod(fs, rwPath, newPerm)) ?
+ "Failed!" : "Success!"));
totalResult += result;
- // chmod write
- fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!"));
- totalResult += result;
-
-
sleep(2);
tTime newMtime = time(NULL);
tTime newAtime = time(NULL);
// utime write
- fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!"));
-
+ fprintf(stderr, "hdfsUtime: %s\n",
+ ((result = hdfsUtime(fs, rwPath, newMtime, newAtime)) ?
+ "Failed!" : "Success!"));
totalResult += result;
// chown/chmod/utime read
- hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+ hdfsFileInfo *finfo = hdfsGetPathInfo(fs, rwPath);
- fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChown read: %s\n",
+ ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ?
+ "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChmod read: %s\n",
+ ((result = (finfo->mPermissions != newPerm)) ?
+ "Failed!" : "Success!"));
totalResult += result;
// will later use /tmp/ as a different user so enable it
- fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChmod: %s\n",
+ ((result = hdfsChmod(fs, slashTmp, 0777)) ?
+ "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr,"newMTime=%ld\n",newMtime);
fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
- fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime / 1000)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsUtime read (mtime): %s\n",
+ ((result = (finfo->mLastMod != newMtime / 1000)) ?
+ "Failed!" : "Success!"));
totalResult += result;
- hdfsFreeFileInfo(finfo, 1);
-
// Clean up
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) ? "Failed!" : "Success!"));
- totalResult += result;
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) ? "Failed!" : "Success!"));
- totalResult += result;
-// fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, movePath, 1)) ? "Failed!" : "Success!"));
-// totalResult += result;
- fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
+ hdfsFreeFileInfo(finfo, 1);
+ fprintf(stderr, "hdfsDelete: %s\n",
+ ((result = hdfsDelete(fs, newDirectory, 1)) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+ fprintf(stderr, "hdfsDelete: %s\n",
+ ((result = hdfsDelete(fs, srcPath, 1)) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+ fprintf(stderr, "hdfsExists: %s\n",
+ ((result = hdfsExists(fs, newDirectory)) ?
+ "Success!" : "Failed!"));
totalResult += (result ? 0 : 1);
+ // Done test generic operations
}
{
- // TEST APPENDS
- const char *writePath = "/tmp/appends";
+ // Test Appends
+ appendTemplate = strdup("/tmp/appendsXXXXXX");
+ if (!appendTemplate) {
+ fprintf(stderr, "Failed to create appendTemplate!\n");
+ exit(1);
+ }
+ char *appendPath = mktemp(appendTemplate);
+ const char* helloBuffer = "Hello,";
+ hdfsFile writeFile = NULL;
- // CREATE
- hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
+ // Create
+ writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
- exit(-1);
+ fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+ exit(1);
}
- fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+ fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
- const char* buffer = "Hello,";
- tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer));
+ num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer,
+ (int) strlen(helloBuffer));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
-
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
- exit(-1);
- }
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
-
hdfsCloseFile(fs, writeFile);
- fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, writePath, 1)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsSetReplication: %s\n",
+ ((result = hdfsSetReplication(fs, appendPath, 1)) ?
+ "Failed!" : "Success!"));
totalResult += result;
- // RE-OPEN
- writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0);
+ // Re-Open for Append
+ writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY | O_APPEND, 0, 0, 0);
if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
- exit(-1);
+ fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+ exit(1);
}
- fprintf(stderr, "Opened %s for appending successfully...\n", writePath);
+ fprintf(stderr, "Opened %s for appending successfully...\n",
+ appendPath);
- buffer = " World";
- num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1);
+ helloBuffer = " World";
+ num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer,
+ (int)strlen(helloBuffer) + 1);
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
- exit(-1);
- }
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
-
hdfsCloseFile(fs, writeFile);
- // CHECK size
- hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
- fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!"));
+ // Check size
+ hdfsFileInfo *finfo = hdfsGetPathInfo(fs, appendPath);
+ fprintf(stderr, "fileinfo->mSize: == total %s\n",
+ ((result = (finfo->mSize == strlen("Hello, World") + 1)) ?
+ "Success!" : "Failed!"));
totalResult += (result ? 0 : 1);
- // READ and check data
- hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0);
+ // Read and check data
+ hdfsFile readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
- fprintf(stderr, "Failed to open %s for reading!\n", writePath);
- exit(-1);
+ fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
+ exit(1);
}
- char rdbuffer[32];
- tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+ tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer));
fprintf(stderr, "Read following %d bytes:\n%s\n",
- num_read_bytes, rdbuffer);
-
- fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!");
-
+ num_read_bytes, buffer);
+ fprintf(stderr, "read == Hello, World %s\n",
+ (result = (strcmp(buffer, "Hello, World") == 0)) ?
+ "Success!" : "Failed!");
hdfsCloseFile(fs, readFile);
- // DONE test appends
+ // Cleanup
+ fprintf(stderr, "hdfsDelete: %s\n",
+ ((result = hdfsDelete(fs, appendPath, 1)) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+ // Done test appends
}
-
totalResult += (hdfsDisconnect(fs) != 0);
{
//
// Now test as connecting as a specific user
- // This is only meant to test that we connected as that user, not to test
+ // This only meant to test that we connected as that user, not to test
// the actual fs user capabilities. Thus just create a file and read
// the owner is correct.
-
const char *tuser = "nobody";
- const char* writePath = "/tmp/usertestfile.txt";
+ userTemplate = strdup("/tmp/usertestXXXXXX");
+ if (!userTemplate) {
+ fprintf(stderr, "Failed to create userTemplate!\n");
+ exit(1);
+ }
+ char* userWritePath = mktemp(userTemplate);
+ hdfsFile writeFile = NULL;
fs = hdfsConnectAsUserNewInstance("default", 50070, tuser);
if(!fs) {
- fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
- exit(-1);
+ fprintf(stderr,
+ "Oops! Failed to connect to hdfs as user %s!\n",tuser);
+ exit(1);
}
- hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+ writeFile = hdfsOpenFile(fs, userWritePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
- exit(-1);
+ fprintf(stderr, "Failed to open %s for writing!\n", userWritePath);
+ exit(1);
}
- fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+ fprintf(stderr, "Opened %s for writing successfully...\n",
+ userWritePath);
- char* buffer = "Hello, World!";
- tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
+ num_written_bytes = hdfsWrite(fs, writeFile, fileContents,
+ (int)strlen(fileContents) + 1);
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
-
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
- exit(-1);
- }
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
-
hdfsCloseFile(fs, writeFile);
- hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+ hdfsFileInfo *finfo = hdfsGetPathInfo(fs, userWritePath);
if (finfo) {
- fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfs new file user is correct: %s\n",
+ ((result = (strcmp(finfo->mOwner, tuser) != 0)) ?
+ "Failed!" : "Success!"));
} else {
- fprintf(stderr, "hdfsFileInfo returned by hdfsGetPathInfo is NULL\n");
+ fprintf(stderr,
+ "hdfsFileInfo returned by hdfsGetPathInfo is NULL\n");
result = -1;
}
totalResult += result;
+
+ // Cleanup
+ fprintf(stderr, "hdfsDelete: %s\n",
+ ((result = hdfsDelete(fs, userWritePath, 1)) ?
+ "Failed!" : "Success!"));
+ totalResult += result;
+ // Done test specific user
}
-
+
totalResult += (hdfsDisconnect(fs) != 0);
- fprintf(stderr, "totalResult == %d\n", totalResult);
+ // Shutdown the native minidfscluster
+ nmdShutdown(cluster);
+ nmdFree(cluster);
+
+ fprintf(stderr, "totalResult == %d\n", totalResult);
if (totalResult != 0) {
return -1;
} else {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c Wed Oct 31 18:34:51 2012
@@ -22,43 +22,52 @@
#include <stdlib.h>
int main(int argc, char **argv) {
+
+ const char* rfile;
+ tSize fileTotalSize, bufferSize, curSize, totalReadSize;
+ hdfsFS fs;
+ hdfsFile readFile;
+ char *buffer = NULL;
if (argc != 4) {
- fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
- exit(-1);
+ fprintf(stderr, "Usage: test_libwebhdfs_read"
+ " <filename> <filesize> <buffersize>\n");
+ exit(1);
}
- hdfsFS fs = hdfsConnect("0.0.0.0", 50070);
+ fs = hdfsConnect("localhost", 50070);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
- exit(-1);
+ exit(1);
}
- const char* rfile = argv[1];
- tSize fileTotalSize = strtoul(argv[2], NULL, 10);
- tSize bufferSize = strtoul(argv[3], NULL, 10);
+ rfile = argv[1];
+ fileTotalSize = strtoul(argv[2], NULL, 10);
+ bufferSize = strtoul(argv[3], NULL, 10);
- hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
+ readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for writing!\n", rfile);
- exit(-2);
+ exit(1);
}
// data to be written to the file
- char* buffer = malloc(sizeof(char) * bufferSize);
+ buffer = malloc(sizeof(char) * bufferSize);
if(buffer == NULL) {
- return -2;
+ fprintf(stderr, "Failed to allocate buffer.\n");
+ exit(1);
}
// read from the file
- tSize curSize = bufferSize;
- tSize totalReadSize = 0;
- for (; (curSize = hdfsRead(fs, readFile, (void*)buffer, bufferSize)) == bufferSize ;) {
+ curSize = bufferSize;
+ totalReadSize = 0;
+ for (; (curSize = hdfsRead(fs, readFile, buffer, bufferSize)) == bufferSize; ) {
totalReadSize += curSize;
}
totalReadSize += curSize;
- fprintf(stderr, "size of the file: %d; reading size: %d\n", fileTotalSize, totalReadSize);
+ fprintf(stderr, "size of the file: %d; reading size: %d\n",
+ fileTotalSize, totalReadSize);
free(buffer);
hdfsCloseFile(fs, readFile);
@@ -67,7 +76,3 @@ int main(int argc, char **argv) {
return 0;
}
-/**
- * vim: ts=4: sw=4: et:
- */
-
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c Wed Oct 31 18:34:51 2012
@@ -18,6 +18,7 @@
#include "expect.h"
#include "hdfs.h"
+#include "native_mini_dfs.h"
#include <errno.h>
#include <semaphore.h>
@@ -28,11 +29,9 @@
#define TLH_MAX_THREADS 100
-static sem_t *tlhSem;
+static struct NativeMiniDfsCluster* cluster;
-static const char *nn;
static const char *user;
-static int port;
struct tlhThreadInfo {
/** Thread index */
@@ -43,19 +42,24 @@ struct tlhThreadInfo {
pthread_t thread;
};
-static int hdfsSingleNameNodeConnect(const char *nn, int port, const char *user, hdfsFS *fs)
+static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cluster,
+ hdfsFS *fs)
{
+ int nnPort;
+ const char *nnHost;
hdfsFS hdfs;
- if (port < 0) {
- fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
- "returned error %d\n", port);
- return port;
+
+ if (nmdGetNameNodeHttpAddress(cluster, &nnPort, &nnHost)) {
+ fprintf(stderr, "Error when retrieving namenode host address.\n");
+ return 1;
}
- hdfs = hdfsConnectAsUserNewInstance(nn, port, user);
- if (!hdfs) {
- return -errno;
+ hdfs = hdfsConnectAsUser(nnHost, nnPort, user);
+ if(!hdfs) {
+ fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+ return 1;
}
+
*fs = hdfs;
return 0;
}
@@ -65,6 +69,7 @@ static int doTestHdfsOperations(struct t
char prefix[256], tmp[256];
hdfsFile file;
int ret, expected;
+ hdfsFileInfo *fileInfo;
snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx);
@@ -74,18 +79,13 @@ static int doTestHdfsOperations(struct t
EXPECT_ZERO(hdfsCreateDirectory(fs, prefix));
snprintf(tmp, sizeof(tmp), "%s/file", prefix);
- /*
- * Although there should not be any file to open for reading,
- * the right now implementation only construct a local
- * information struct when opening file
- */
EXPECT_NONNULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0));
file = hdfsOpenFile(fs, tmp, O_WRONLY, 0, 0, 0);
EXPECT_NONNULL(file);
/* TODO: implement writeFully and use it here */
- expected = strlen(prefix);
+ expected = (int)strlen(prefix);
ret = hdfsWrite(fs, file, prefix, expected);
if (ret < 0) {
ret = errno;
@@ -118,9 +118,28 @@ static int doTestHdfsOperations(struct t
}
EXPECT_ZERO(memcmp(prefix, tmp, expected));
EXPECT_ZERO(hdfsCloseFile(fs, file));
-
- // TODO: Non-recursive delete should fail?
- //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
+
+ snprintf(tmp, sizeof(tmp), "%s/file", prefix);
+ EXPECT_NONZERO(hdfsChown(fs, tmp, NULL, NULL));
+ EXPECT_ZERO(hdfsChown(fs, tmp, NULL, "doop"));
+ fileInfo = hdfsGetPathInfo(fs, tmp);
+ EXPECT_NONNULL(fileInfo);
+ EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
+ hdfsFreeFileInfo(fileInfo, 1);
+
+ EXPECT_ZERO(hdfsChown(fs, tmp, "ha", "doop2"));
+ fileInfo = hdfsGetPathInfo(fs, tmp);
+ EXPECT_NONNULL(fileInfo);
+ EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
+ EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+ hdfsFreeFileInfo(fileInfo, 1);
+
+ EXPECT_ZERO(hdfsChown(fs, tmp, "ha2", NULL));
+ fileInfo = hdfsGetPathInfo(fs, tmp);
+ EXPECT_NONNULL(fileInfo);
+ EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
+ EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
+ hdfsFreeFileInfo(fileInfo, 1);
EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
return 0;
@@ -134,7 +153,7 @@ static void *testHdfsOperations(void *v)
fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
ti->threadIdx);
- ret = hdfsSingleNameNodeConnect(nn, port, user, &fs);
+ ret = hdfsSingleNameNodeConnect(cluster, &fs);
if (ret) {
fprintf(stderr, "testHdfsOperations(threadIdx=%d): "
"hdfsSingleNameNodeConnect failed with error %d.\n",
@@ -181,19 +200,23 @@ static int checkFailures(struct tlhThrea
*/
int main(int argc, const char *args[])
{
- if (argc != 4) {
- fprintf(stderr, "usage: test_libhdfs_threaded <namenode> <port> <username>");
- return -1;
- }
-
- nn = args[1];
- port = atoi(args[2]);
- user = args[3];
-
int i, tlhNumThreads;
const char *tlhNumThreadsStr;
struct tlhThreadInfo ti[TLH_MAX_THREADS];
+ if (argc != 2) {
+ fprintf(stderr, "usage: test_libwebhdfs_threaded <username>\n");
+ exit(1);
+ }
+ user = args[1];
+
+ struct NativeMiniDfsConf conf = {
+ .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070,
+ };
+ cluster = nmdCreate(&conf);
+ EXPECT_NONNULL(cluster);
+ EXPECT_ZERO(nmdWaitClusterUp(cluster));
+
tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
if (!tlhNumThreadsStr) {
tlhNumThreadsStr = "3";
@@ -210,8 +233,6 @@ int main(int argc, const char *args[])
ti[i].threadIdx = i;
}
-// tlhSem = sem_open("sem", O_CREAT, 0644, tlhNumThreads);
-
for (i = 0; i < tlhNumThreads; i++) {
EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
testHdfsOperations, &ti[i]));
@@ -220,6 +241,7 @@ int main(int argc, const char *args[])
EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
}
-// EXPECT_ZERO(sem_close(tlhSem));
+ EXPECT_ZERO(nmdShutdown(cluster));
+ nmdFree(cluster);
return checkFailures(ti, tlhNumThreads);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c Wed Oct 31 18:34:51 2012
@@ -22,97 +22,90 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <inttypes.h>
int main(int argc, char **argv) {
+ hdfsFS fs;
+ const char* writeFileName;
+ off_t fileTotalSize;
+ long long tmpBufferSize;
+ tSize bufferSize = 0, totalWriteSize = 0, toWrite = 0, written = 0;
+ hdfsFile writeFile = NULL;
+ int append, i = 0;
+ char* buffer = NULL;
if (argc != 6) {
- fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize> <username> <append>\n");
- exit(-1);
+ fprintf(stderr, "Usage: test_libwebhdfs_write <filename> <filesize> "
+ "<buffersize> <username> <append>\n");
+ exit(1);
}
- hdfsFS fs = hdfsConnectAsUser("0.0.0.0", 50070, argv[4]);
+ fs = hdfsConnectAsUser("default", 50070, argv[4]);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
- exit(-1);
+ exit(1);
}
- const char* writeFileName = argv[1];
- off_t fileTotalSize = strtoul(argv[2], NULL, 10);
- long long tmpBufferSize = strtoul(argv[3], NULL, 10);
+ writeFileName = argv[1];
+ fileTotalSize = strtoul(argv[2], NULL, 10);
+ tmpBufferSize = strtoul(argv[3], NULL, 10);
// sanity check
if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
- fprintf(stderr, "invalid file size %s - must be <= %lu\n", argv[2], ULONG_MAX);
- exit(-3);
+ fprintf(stderr, "invalid file size %s - must be <= %lu\n",
+ argv[2], ULONG_MAX);
+ exit(1);
}
// currently libhdfs writes are of tSize which is int32
if(tmpBufferSize > INT_MAX) {
- fprintf(stderr, "invalid buffer size libhdfs API write chunks must be <= %d\n",INT_MAX);
- exit(-3);
+ fprintf(stderr,
+ "invalid buffer size libhdfs API write chunks must be <= %d\n",
+ INT_MAX);
+ exit(1);
}
- tSize bufferSize = tmpBufferSize;
-
- hdfsFile writeFile = NULL;
- int append = atoi(argv[5]);
+ bufferSize = (tSize) tmpBufferSize;
+ append = atoi(argv[5]);
if (!append) {
writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 2, 0);
} else {
- writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND, bufferSize, 2, 0);
+ writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND,
+ bufferSize, 2, 0);
}
if (!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
- exit(-2);
+ exit(1);
}
// data to be written to the file
- char* buffer = malloc(sizeof(char) * bufferSize + 1);
+ buffer = malloc(sizeof(char) * bufferSize + 1);
if(buffer == NULL) {
fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
- return -2;
+ exit(1);
}
- int i = 0;
- for (i=0; i < bufferSize; ++i) {
+ for (i = 0; i < bufferSize; ++i) {
buffer[i] = 'a' + (i%26);
}
buffer[bufferSize] = '\0';
- size_t totalWriteSize = 0;
+ // write to the file
+ totalWriteSize = 0;
for (; totalWriteSize < fileTotalSize; ) {
- tSize toWrite = bufferSize < (fileTotalSize - totalWriteSize) ? bufferSize : (fileTotalSize - totalWriteSize);
- size_t written = hdfsWrite(fs, writeFile, (void*)buffer, toWrite);
- fprintf(stderr, "written size %ld, to write size %d\n", written, toWrite);
+ toWrite = bufferSize < (fileTotalSize - totalWriteSize) ?
+ bufferSize : (fileTotalSize - totalWriteSize);
+ written = hdfsWrite(fs, writeFile, (void*)buffer, toWrite);
+ fprintf(stderr, "written size %d, to write size %d\n",
+ written, toWrite);
totalWriteSize += written;
- //sleep(1);
}
+ // cleanup
free(buffer);
hdfsCloseFile(fs, writeFile);
-
- fprintf(stderr, "file total size: %lld, total write size: %ld\n", fileTotalSize, totalWriteSize);
-
- hdfsFile readFile = hdfsOpenFile(fs, writeFileName, O_RDONLY, 0, 0, 0);
- //sleep(1);
- fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
-
- hdfsFile writeFile2 = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND, 0, 2, 0);
- fprintf(stderr, "Opened %s for writing successfully...\n", writeFileName);
- const char *content = "Hello, World!";
- size_t num_written_bytes = hdfsWrite(fs, writeFile2, content, strlen(content) + 1);
- if (num_written_bytes != strlen(content) + 1) {
- fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
- (int)(strlen(content) + 1), (int)num_written_bytes);
- exit(-1);
- }
- fprintf(stderr, "Wrote %zd bytes\n", num_written_bytes);
-
+ fprintf(stderr, "file total size: %" PRId64 ", total write size: %d\n",
+ fileTotalSize, totalWriteSize);
hdfsDisconnect(fs);
return 0;
}
-
-/**
- * vim: ts=4: sw=4: et:
- */
-
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1403306-1404284
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Wed Oct 31 18:34:51 2012
@@ -185,7 +185,7 @@ public class BlockPlacementPolicyDefault
if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
return writer;
}
- int totalReplicasExpected = numOfReplicas;
+ int totalReplicasExpected = numOfReplicas + results.size();
int numOfResults = results.size();
boolean newBlock = (numOfResults==0);
@@ -231,7 +231,8 @@ public class BlockPlacementPolicyDefault
maxNodesPerRack, results, avoidStaleNodes);
} catch (NotEnoughReplicasException e) {
LOG.warn("Not able to place enough replicas, still in need of "
- + numOfReplicas + " to reach " + totalReplicasExpected + "\n"
+ + (totalReplicasExpected - results.size()) + " to reach "
+ + totalReplicasExpected + "\n"
+ e.getMessage());
if (avoidStaleNodes) {
// ecxludedNodes now has - initial excludedNodes, any nodes that were
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Oct 31 18:34:51 2012
@@ -5466,7 +5466,7 @@ public class FSNamesystem implements Nam
public FSDirectory getFSDirectory() {
return dir;
}
-
+
/**
* Verifies that the given identifier and password are valid and match.
* @param identifier Token identifier.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Wed Oct 31 18:34:51 2012
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import java.io.PrintWriter;
+import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -32,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.util.StringUtils;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.primitives.SignedBytes;
/**
@@ -225,11 +228,10 @@ public abstract class INode implements C
abstract DirCounts spaceConsumedInTree(DirCounts counts);
/**
- * Get local file name
- * @return local file name
+ * @return null if the local name is null; otherwise, return the local name.
*/
public String getLocalName() {
- return DFSUtil.bytes2String(name);
+ return name == null? null: DFSUtil.bytes2String(name);
}
@@ -239,8 +241,8 @@ public abstract class INode implements C
}
/**
- * Get local file name
- * @return local file name
+ * @return null if the local name is null;
+ * otherwise, return the local name byte array.
*/
byte[] getLocalNameBytes() {
return name;
@@ -463,4 +465,30 @@ public abstract class INode implements C
return new INodeFile(permissions, blocks, replication,
modificationTime, atime, preferredBlockSize);
}
+
+ /**
+ * Dump the subtree starting from this inode.
+ * @return a text representation of the tree.
+ */
+ @VisibleForTesting
+ public StringBuffer dumpTreeRecursively() {
+ final StringWriter out = new StringWriter();
+ dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder());
+ return out.getBuffer();
+ }
+
+ /**
+ * Dump tree recursively.
+ * @param prefix The prefix string that each line should print.
+ */
+ @VisibleForTesting
+ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) {
+ out.print(prefix);
+ out.print(" ");
+ out.print(getLocalName());
+ out.print(" (");
+ final String s = super.toString();
+ out.print(s.substring(s.lastIndexOf(getClass().getSimpleName())));
+ out.println(")");
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Wed Oct 31 18:34:51 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -32,6 +33,8 @@ import org.apache.hadoop.hdfs.protocol.U
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshotRoot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* Directory INode class.
*/
@@ -568,4 +571,52 @@ public class INodeDirectory extends INod
return size;
}
}
+
+ /*
+ * The following code is to dump the tree recursively for testing.
+ *
+ * \- foo (INodeDirectory@33dd2717)
+ * \- sub1 (INodeDirectory@442172)
+ * +- file1 (INodeFile@78392d4)
+ * +- file2 (INodeFile@78392d5)
+ * +- sub11 (INodeDirectory@8400cff)
+ * \- file3 (INodeFile@78392d6)
+ * \- z_file4 (INodeFile@45848712)
+ */
+ static final String DUMPTREE_EXCEPT_LAST_ITEM = "+-";
+ static final String DUMPTREE_LAST_ITEM = "\\-";
+ @VisibleForTesting
+ @Override
+ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) {
+ super.dumpTreeRecursively(out, prefix);
+ if (prefix.length() >= 2) {
+ prefix.setLength(prefix.length() - 2);
+ prefix.append(" ");
+ }
+ dumpTreeRecursively(out, prefix, children);
+ }
+
+ /**
+ * Dump the given subtrees.
+ * @param prefix The prefix string that each line should print.
+ * @param subs The subtrees.
+ */
+ @VisibleForTesting
+ protected static void dumpTreeRecursively(PrintWriter out,
+ StringBuilder prefix, List<? extends INode> subs) {
+ prefix.append(DUMPTREE_EXCEPT_LAST_ITEM);
+ if (subs != null && subs.size() != 0) {
+ int i = 0;
+ for(; i < subs.size() - 1; i++) {
+ subs.get(i).dumpTreeRecursively(out, prefix);
+ prefix.setLength(prefix.length() - 2);
+ prefix.append(DUMPTREE_EXCEPT_LAST_ITEM);
+ }
+
+ prefix.setLength(prefix.length() - 2);
+ prefix.append(DUMPTREE_LAST_ITEM);
+ subs.get(i).dumpTreeRecursively(out, prefix);
+ }
+ prefix.setLength(prefix.length() - 2);
+ }
}
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1403306-1404284
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c Wed Oct 31 18:34:51 2012
@@ -24,10 +24,15 @@
#include <jni.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
#define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
#define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
+#define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
+#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress"
+
+#define DFS_WEBHDFS_ENABLED_KEY "dfs.webhdfs.enabled"
struct NativeMiniDfsCluster {
/**
@@ -43,6 +48,7 @@ struct NativeMiniDfsCluster* nmdCreate(s
jvalue val;
JNIEnv *env = getJNIEnv();
jthrowable jthr;
+ jstring jconfStr;
if (!env) {
fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n");
@@ -59,6 +65,22 @@ struct NativeMiniDfsCluster* nmdCreate(s
"nmdCreate: new Configuration");
goto error_free_cl;
}
+ if (conf->webhdfsEnabled) {
+ jthr = newJavaStr(env, DFS_WEBHDFS_ENABLED_KEY, &jconfStr);
+ if (jthr) {
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "nmdCreate: new String");
+ goto error_dlr_cobj;
+ }
+ jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
+ "setBoolean", "(Ljava/lang/String;Z)V",
+ jconfStr, conf->webhdfsEnabled);
+ if (jthr) {
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "nmdCreate: Configuration::setBoolean");
+ goto error_dlr_cobj;
+ }
+ }
jthr = constructNewObjectOfClass(env, &bld, MINIDFS_CLUSTER_BUILDER,
"(L"HADOOP_CONF";)V", cobj);
if (jthr) {
@@ -74,6 +96,16 @@ struct NativeMiniDfsCluster* nmdCreate(s
goto error_dlr_bld;
}
bld2 = val.l;
+ if (conf->webhdfsEnabled) {
+ jthr = invokeMethod(env, &val, INSTANCE, bld2, MINIDFS_CLUSTER_BUILDER,
+ "nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";",
+ conf->namenodeHttpPort);
+ if (jthr) {
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
+ "Builder::nameNodeHttpPort");
+ goto error_dlr_bld2;
+ }
+ }
jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
"build", "()L" MINIDFS_CLUSTER ";");
if (jthr) {
@@ -91,6 +123,7 @@ struct NativeMiniDfsCluster* nmdCreate(s
(*env)->DeleteLocalRef(env, bld2);
(*env)->DeleteLocalRef(env, bld);
(*env)->DeleteLocalRef(env, cobj);
+ (*env)->DeleteLocalRef(env, jconfStr);
return cl;
error_dlr_val:
@@ -101,6 +134,7 @@ error_dlr_bld:
(*env)->DeleteLocalRef(env, bld);
error_dlr_cobj:
(*env)->DeleteLocalRef(env, cobj);
+ (*env)->DeleteLocalRef(env, jconfStr);
error_free_cl:
free(cl);
error:
@@ -177,3 +211,69 @@ int nmdGetNameNodePort(const struct Nati
}
return jVal.i;
}
+
+int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
+ int *port, const char **hostName)
+{
+ JNIEnv *env = getJNIEnv();
+ jvalue jVal;
+ jobject jNameNode, jAddress;
+ jthrowable jthr;
+ int ret = 0;
+ const char *host;
+
+ if (!env) {
+ fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n");
+ return -EIO;
+ }
+ // First get the (first) NameNode of the cluster
+ jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
+ "getNameNode", "()L" HADOOP_NAMENODE ";");
+ if (jthr) {
+ printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "nmdGetNameNodeHttpAddress: "
+ "MiniDFSCluster#getNameNode");
+ return -EIO;
+ }
+ jNameNode = jVal.l;
+
+ // Then get the http address (InetSocketAddress) of the NameNode
+ jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
+ "getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";");
+ if (jthr) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "nmdGetNameNodeHttpAddress: "
+ "NameNode#getHttpAddress");
+ goto error_dlr_nn;
+ }
+ jAddress = jVal.l;
+
+ jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
+ JAVA_INETSOCKETADDRESS, "getPort", "()I");
+ if (jthr) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "nmdGetNameNodeHttpAddress: "
+ "InetSocketAddress#getPort");
+ goto error_dlr_addr;
+ }
+ *port = jVal.i;
+
+ jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS,
+ "getHostName", "()Ljava/lang/String;");
+ if (jthr) {
+ ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ "nmdGetNameNodeHttpAddress: "
+ "InetSocketAddress#getHostName");
+ goto error_dlr_addr;
+ }
+ host = (*env)->GetStringUTFChars(env, jVal.l, NULL);
+ *hostName = strdup(host);
+ (*env)->ReleaseStringUTFChars(env, jVal.l, host);
+
+error_dlr_addr:
+ (*env)->DeleteLocalRef(env, jAddress);
+error_dlr_nn:
+ (*env)->DeleteLocalRef(env, jNameNode);
+
+ return ret;
+}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h Wed Oct 31 18:34:51 2012
@@ -31,6 +31,14 @@ struct NativeMiniDfsConf {
* Nonzero if the cluster should be formatted prior to startup
*/
jboolean doFormat;
+ /**
+ * Whether or not to enable webhdfs in MiniDfsCluster
+ */
+ jboolean webhdfsEnabled;
+ /**
+ * The http port of the namenode in MiniDfsCluster
+ */
+ jint namenodeHttpPort;
};
/**
@@ -76,5 +84,21 @@ void nmdFree(struct NativeMiniDfsCluster
*
* @return the port, or a negative error code
*/
-int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl);
+int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl);
+
+/**
+ * Get the http address that's in use by the given (non-HA) nativeMiniDfs
+ *
+ * @param cl The initialized NativeMiniDfsCluster
+ * @param port Used to capture the http port of the NameNode
+ * of the NativeMiniDfsCluster
+ * @param hostName Used to capture the http hostname of the NameNode
+ * of the NativeMiniDfsCluster
+ *
+ * @return 0 on success; a non-zero error code if failing to
+ * get the information.
+ */
+int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
+ int *port, const char **hostName);
+
#endif
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1403306-1404284
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1403306-1404284
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1403306-1404284
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1403306-1404284
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Wed Oct 31 18:34:51 2012
@@ -1145,7 +1145,7 @@ public class TestDFSShell {
args = new String[2];
args[0] = "-touchz";
- args[1] = "/test/mkdirs/noFileHere";
+ args[1] = "/test/mkdirs/isFileHere";
val = -1;
try {
val = shell.run(args);
@@ -1157,7 +1157,7 @@ public class TestDFSShell {
args = new String[2];
args[0] = "-touchz";
- args[1] = "/test/mkdirs/thisDirNotExists/noFileHere";
+ args[1] = "/test/mkdirs/thisDirNotExists/isFileHere";
val = -1;
try {
val = shell.run(args);
@@ -1171,7 +1171,7 @@ public class TestDFSShell {
args = new String[3];
args[0] = "-test";
args[1] = "-e";
- args[2] = "/test/mkdirs/noFileHere";
+ args[2] = "/test/mkdirs/isFileHere";
val = -1;
try {
val = shell.run(args);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Wed Oct 31 18:34:51 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.bl
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
@@ -44,6 +45,10 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.Time;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
@@ -375,7 +380,71 @@ public class TestReplicationPolicy {
new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
- assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
+ assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
+ }
+
+ /**
+ * In this testcase, it tries to choose more targets than available nodes and
+ * check the result.
+ * @throws Exception
+ */
+ @Test
+ public void testChooseTargetWithMoreThanAvaiableNodes() throws Exception {
+ // make data node 0 & 1 to be not qualified to choose: not enough disk space
+ for(int i=0; i<2; i++) {
+ dataNodes[i].updateHeartbeat(
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+ (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
+ }
+
+ final TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
+ // try to choose NUM_OF_DATANODES which is more than actually available
+ // nodes.
+ DatanodeDescriptor[] targets = replicator.chooseTarget(filename,
+ NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
+ BLOCK_SIZE);
+ assertEquals(targets.length, NUM_OF_DATANODES - 2);
+
+ final List<LoggingEvent> log = appender.getLog();
+ assertNotNull(log);
+ assertFalse(log.size() == 0);
+ final LoggingEvent lastLogEntry = log.get(log.size() - 1);
+
+ assertEquals(lastLogEntry.getLevel(), Level.WARN);
+ // Suppose to place replicas on each node but two data nodes are not
+ // available for placing replica, so here we expect a short of 2
+ assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
+
+ for(int i=0; i<2; i++) {
+ dataNodes[i].updateHeartbeat(
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+ HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ }
+ }
+
+ class TestAppender extends AppenderSkeleton {
+ private final List<LoggingEvent> log = new ArrayList<LoggingEvent>();
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ protected void append(final LoggingEvent loggingEvent) {
+ log.add(loggingEvent);
+ }
+
+ @Override
+ public void close() {
+ }
+
+ public List<LoggingEvent> getLog() {
+ return new ArrayList<LoggingEvent>(log);
+ }
}
private boolean containsWithinRange(DatanodeDescriptor target,
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1404285&r1=1404284&r2=1404285&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Wed Oct 31 18:34:51 2012
@@ -32,7 +32,6 @@ import org.apache.hadoop.fs.permission.F
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -60,17 +59,11 @@ public class TestFsLimits {
return fsn;
}
- private static class TestFSDirectory extends FSDirectory {
- public TestFSDirectory() throws IOException {
+ private static class MockFSDirectory extends FSDirectory {
+ public MockFSDirectory() throws IOException {
super(new FSImage(conf), getMockNamesystem(), conf);
setReady(fsIsReady);
}
-
- @Override
- public <T extends INode> void verifyFsLimits(INode[] pathComponents,
- int pos, T child) throws FSLimitException {
- super.verifyFsLimits(pathComponents, pos, child);
- }
}
@Before
@@ -157,7 +150,7 @@ public class TestFsLimits {
private void addChildWithName(String name, Class<?> expected)
throws Exception {
// have to create after the caller has had a chance to set conf values
- if (fs == null) fs = new TestFSDirectory();
+ if (fs == null) fs = new MockFSDirectory();
INode child = new INodeDirectory(name, perms);
child.setLocalName(name);