You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-dev@hadoop.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2016/05/11 07:52:27 UTC
Hadoop-Hdfs-trunk - Build # 3126 - Still Failing
See https://builds.apache.org/job/Hadoop-Hdfs-trunk/3126/
###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 8204 lines...]
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ hadoop-hdfs-project ---
[INFO] Deleting /home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/target
[INFO]
[INFO] --- maven-antrun-plugin:1.7:run (create-testdirs) @ hadoop-hdfs-project ---
[INFO] Executing tasks
main:
[mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/target/test-dir
[INFO] Executed tasks
[INFO]
[INFO] --- maven-source-plugin:2.3:jar-no-fork (hadoop-java-sources) @ hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-source-plugin:2.3:test-jar-no-fork (hadoop-java-sources) @ hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (dist-enforce) @ hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-site-plugin:3.5:attach-descriptor (attach-descriptor) @ hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-javadoc-plugin:2.8.1:jar (module-javadocs) @ hadoop-hdfs-project ---
[INFO] Skipping javadoc generation
[INFO]
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (depcheck) @ hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-checkstyle-plugin:2.15:checkstyle (default-cli) @ hadoop-hdfs-project ---
[INFO]
[INFO] --- findbugs-maven-plugin:3.0.0:findbugs (default-cli) @ hadoop-hdfs-project ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO]
[INFO] Apache Hadoop HDFS Client ......................... SUCCESS [06:27 min]
[INFO] Apache Hadoop HDFS ................................ FAILURE [ 01:29 h]
[INFO] Apache Hadoop HDFS Native Client .................. SKIPPED
[INFO] Apache Hadoop HttpFS .............................. SKIPPED
[INFO] Apache Hadoop HDFS BookKeeper Journal ............. SKIPPED
[INFO] Apache Hadoop HDFS-NFS ............................ SKIPPED
[INFO] Apache Hadoop HDFS Project ........................ SUCCESS [ 0.146 s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:36 h
[INFO] Finished at: 2016-05-11T07:52:14+00:00
[INFO] Final Memory: 73M/876M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-hdfs: ExecutionException: java.lang.RuntimeException: java.lang.RuntimeException: java.io.IOException: Stream Closed -> [Help 1]
[ERROR]
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR]
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR]
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR] mvn <goals> -rf :hadoop-hdfs
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Sending e-mails to: hdfs-dev@hadoop.apache.org
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any
###################################################################################
############################## FAILED TESTS (if any) ##############################
17 tests failed.
FAILED: org.apache.hadoop.hdfs.TestAsyncDFSRename.testAggressiveConcurrentAsyncRenameWithOverwrite
Error Message:
test timed out after 120000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 120000 milliseconds
at java.lang.Object.wait(Native Method)
at org.apache.hadoop.hdfs.DataStreamer.waitForAckedSeqno(DataStreamer.java:768)
at org.apache.hadoop.hdfs.DFSOutputStream.flushInternal(DFSOutputStream.java:697)
at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:778)
at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:755)
at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:430)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:379)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:372)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:365)
at org.apache.hadoop.hdfs.TestAsyncDFSRename.internalTestConcurrentAsyncRenameWithOverwrite(TestAsyncDFSRename.java:220)
at org.apache.hadoop.hdfs.TestAsyncDFSRename.testAggressiveConcurrentAsyncRenameWithOverwrite(TestAsyncDFSRename.java:184)
FAILED: org.apache.hadoop.hdfs.TestAsyncDFSRename.testConservativeConcurrentAsyncRenameWithOverwrite
Error Message:
test timed out after 60000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
at java.lang.Thread.sleep(Native Method)
at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:825)
at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:784)
at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:755)
at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:430)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:379)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:372)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:365)
at org.apache.hadoop.hdfs.TestAsyncDFSRename.internalTestConcurrentAsyncRenameWithOverwrite(TestAsyncDFSRename.java:220)
at org.apache.hadoop.hdfs.TestAsyncDFSRename.testConservativeConcurrentAsyncRenameWithOverwrite(TestAsyncDFSRename.java:191)
FAILED: org.apache.hadoop.hdfs.TestAsyncDFSRename.testAsyncRenameWithOverwrite
Error Message:
Cannot remove data directory: /home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs/datapath '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs/data':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs/data
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace':
absolute:/home/jenkins/jenkins-slave/workspace
permissions: drwx
path '/home/jenkins/jenkins-slave':
absolute:/home/jenkins/jenkins-slave
permissions: drwx
path '/home/jenkins':
absolute:/home/jenkins
permissions: drwx
path '/home':
absolute:/home
permissions: dr-x
path '/':
absolute:/
permissions: dr-x
Stack Trace:
java.io.IOException: Cannot remove data directory: /home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs/datapath '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs/data':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs/data
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2/dfs
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data/2
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test/data
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/test
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk':
absolute:/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk
permissions: drwx
path '/home/jenkins/jenkins-slave/workspace':
absolute:/home/jenkins/jenkins-slave/workspace
permissions: drwx
path '/home/jenkins/jenkins-slave':
absolute:/home/jenkins/jenkins-slave
permissions: drwx
path '/home/jenkins':
absolute:/home/jenkins
permissions: drwx
path '/home':
absolute:/home
permissions: dr-x
path '/':
absolute:/
permissions: dr-x
at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:834)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:482)
at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:441)
at org.apache.hadoop.hdfs.TestAsyncDFSRename.testAsyncRenameWithOverwrite(TestAsyncDFSRename.java:56)
FAILED: org.apache.hadoop.hdfs.TestFileAppend.testMultipleAppends
Error Message:
Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being available to try. (Nodes: current=[DatanodeInfoWithStorage[127.0.0.1:38570,DS-54a1c12d-2b26-4663-8f6c-7dfe036f20e3,DISK], DatanodeInfoWithStorage[127.0.0.1:51391,DS-0036cd19-7989-45eb-9069-77880e87f426,DISK]], original=[DatanodeInfoWithStorage[127.0.0.1:38570,DS-54a1c12d-2b26-4663-8f6c-7dfe036f20e3,DISK], DatanodeInfoWithStorage[127.0.0.1:51391,DS-0036cd19-7989-45eb-9069-77880e87f426,DISK]]). The current failed datanode replacement policy is DEFAULT, and a client may configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' in its configuration.
Stack Trace:
java.io.IOException: Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being available to try. (Nodes: current=[DatanodeInfoWithStorage[127.0.0.1:38570,DS-54a1c12d-2b26-4663-8f6c-7dfe036f20e3,DISK], DatanodeInfoWithStorage[127.0.0.1:51391,DS-0036cd19-7989-45eb-9069-77880e87f426,DISK]], original=[DatanodeInfoWithStorage[127.0.0.1:38570,DS-54a1c12d-2b26-4663-8f6c-7dfe036f20e3,DISK], DatanodeInfoWithStorage[127.0.0.1:51391,DS-0036cd19-7989-45eb-9069-77880e87f426,DISK]]). The current failed datanode replacement policy is DEFAULT, and a client may configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' in its configuration.
at org.apache.hadoop.hdfs.DataStreamer.findNewDatanode(DataStreamer.java:1166)
at org.apache.hadoop.hdfs.DataStreamer.addDatanode2ExistingPipeline(DataStreamer.java:1236)
at org.apache.hadoop.hdfs.DataStreamer.handleDatanodeReplacement(DataStreamer.java:1427)
at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1342)
at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1325)
at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:603)
FAILED: org.apache.hadoop.hdfs.TestRenameWhileOpen.testWhileOpenRenameParentToNonexistentDir
Error Message:
Problem binding to [localhost:43431] java.net.BindException: Address already in use; For more details see: http://wiki.apache.org/hadoop/BindException
Stack Trace:
java.net.BindException: Problem binding to [localhost:43431] java.net.BindException: Address already in use; For more details see: http://wiki.apache.org/hadoop/BindException
at sun.nio.ch.Net.bind0(Native Method)
at sun.nio.ch.Net.bind(Net.java:444)
at sun.nio.ch.Net.bind(Net.java:436)
at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
at org.apache.hadoop.ipc.Server.bind(Server.java:530)
at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:793)
at org.apache.hadoop.ipc.Server.<init>(Server.java:2592)
at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:958)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server.<init>(ProtobufRpcEngine.java:563)
at org.apache.hadoop.ipc.ProtobufRpcEngine.getServer(ProtobufRpcEngine.java:538)
at org.apache.hadoop.ipc.RPC$Builder.build(RPC.java:800)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.<init>(NameNodeRpcServer.java:426)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createRpcServer(NameNode.java:783)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:710)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:924)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:903)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1620)
at org.apache.hadoop.hdfs.MiniDFSCluster.createNameNode(MiniDFSCluster.java:1247)
at org.apache.hadoop.hdfs.MiniDFSCluster.configureNameService(MiniDFSCluster.java:1016)
at org.apache.hadoop.hdfs.MiniDFSCluster.createNameNodesAndSetConf(MiniDFSCluster.java:891)
at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:823)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:482)
at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:441)
at org.apache.hadoop.hdfs.TestRenameWhileOpen.testWhileOpenRenameParentToNonexistentDir(TestRenameWhileOpen.java:201)
FAILED: org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM.testSecondaryNameNodeHttpAddressNotNeeded
Error Message:
Error replaying edit log at offset 0. Expected transaction ID was 1
Stack Trace:
org.apache.hadoop.hdfs.server.namenode.EditLogInputException: Error replaying edit log at offset 0. Expected transaction ID was 1
at org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:194)
at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85)
at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.skipUntil(EditLogInputStream.java:151)
at org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:178)
at org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85)
at org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadEditRecords(FSEditLogLoader.java:196)
at org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadFSEdits(FSEditLogLoader.java:149)
at org.apache.hadoop.hdfs.server.namenode.FSImage.loadEdits(FSImage.java:837)
at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:694)
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:290)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:989)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:659)
at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:646)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:708)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:924)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:903)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1620)
at org.apache.hadoop.hdfs.MiniDFSCluster.restartNameNode(MiniDFSCluster.java:2020)
at org.apache.hadoop.hdfs.MiniDFSCluster.restartNameNode(MiniDFSCluster.java:1985)
at org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM.restartNameNode(TestSecureNNWithQJM.java:205)
at org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM.doNNWithQJMTest(TestSecureNNWithQJM.java:193)
at org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM.testSecondaryNameNodeHttpAddressNotNeeded(TestSecureNNWithQJM.java:173)
FAILED: org.apache.hadoop.hdfs.security.TestDelegationTokenForProxyUser.testWebHdfsDoAs
Error Message:
test timed out after 5000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 5000 milliseconds
at java.net.SocketInputStream.socketRead0(Native Method)
at java.net.SocketInputStream.read(SocketInputStream.java:152)
at java.net.SocketInputStream.read(SocketInputStream.java:122)
at java.io.BufferedInputStream.fill(BufferedInputStream.java:235)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:275)
at java.io.BufferedInputStream.read(BufferedInputStream.java:334)
at sun.net.www.http.HttpClient.parseHTTPHeader(HttpClient.java:687)
at sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:633)
at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1323)
at java.net.HttpURLConnection.getResponseCode(HttpURLConnection.java:468)
at org.apache.hadoop.hdfs.web.WebHdfsFileSystem$AbstractRunner.connect(WebHdfsFileSystem.java:625)
at org.apache.hadoop.hdfs.web.WebHdfsFileSystem$AbstractRunner.runWithRetry(WebHdfsFileSystem.java:709)
at org.apache.hadoop.hdfs.web.WebHdfsFileSystem$AbstractRunner.access$100(WebHdfsFileSystem.java:555)
at org.apache.hadoop.hdfs.web.WebHdfsFileSystem$AbstractRunner$1.run(WebHdfsFileSystem.java:586)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1755)
at org.apache.hadoop.hdfs.web.WebHdfsFileSystem$AbstractRunner.run(WebHdfsFileSystem.java:582)
at org.apache.hadoop.hdfs.web.WebHdfsFileSystem.append(WebHdfsFileSystem.java:1306)
at org.apache.hadoop.fs.FileSystem.append(FileSystem.java:1172)
at org.apache.hadoop.hdfs.security.TestDelegationTokenForProxyUser.testWebHdfsDoAs(TestDelegationTokenForProxyUser.java:178)
FAILED: org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerWithStripedFile
Error Message:
expected:<9> but was:<8>
Stack Trace:
java.lang.AssertionError: expected:<9> but was:<8>
at org.junit.Assert.fail(Assert.java:88)
at org.junit.Assert.failNotEquals(Assert.java:743)
at org.junit.Assert.assertEquals(Assert.java:118)
at org.junit.Assert.assertEquals(Assert.java:555)
at org.junit.Assert.assertEquals(Assert.java:542)
at org.apache.hadoop.hdfs.StripedFileTestUtil.verifyLocatedStripedBlocks(StripedFileTestUtil.java:343)
at org.apache.hadoop.hdfs.server.balancer.TestBalancer.doTestBalancerWithStripedFile(TestBalancer.java:1915)
at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerWithStripedFile(TestBalancer.java:1878)
FAILED: org.apache.hadoop.hdfs.server.datanode.TestDataNodeUUID.testUUIDRegeneration
Error Message:
test timed out after 10000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 10000 milliseconds
at java.util.zip.Inflater.inflateBytes(Native Method)
at java.util.zip.Inflater.inflate(Inflater.java:259)
at java.util.zip.InflaterInputStream.read(InflaterInputStream.java:152)
at sun.misc.Resource.getBytes(Resource.java:124)
at java.net.URLClassLoader.defineClass(URLClassLoader.java:444)
at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
at io.netty.buffer.PooledByteBufAllocator.<init>(PooledByteBufAllocator.java:168)
at io.netty.buffer.PooledByteBufAllocator.<init>(PooledByteBufAllocator.java:143)
at io.netty.buffer.PooledByteBufAllocator.<init>(PooledByteBufAllocator.java:135)
at io.netty.buffer.PooledByteBufAllocator.<clinit>(PooledByteBufAllocator.java:119)
at io.netty.buffer.ByteBufUtil.<clinit>(ByteBufUtil.java:71)
at io.netty.buffer.ByteBufAllocator.<clinit>(ByteBufAllocator.java:24)
at io.netty.channel.DefaultChannelConfig.<init>(DefaultChannelConfig.java:53)
at io.netty.channel.socket.DefaultServerSocketChannelConfig.<init>(DefaultServerSocketChannelConfig.java:45)
at io.netty.channel.socket.nio.NioServerSocketChannel$NioServerSocketChannelConfig.<init>(NioServerSocketChannel.java:189)
at io.netty.channel.socket.nio.NioServerSocketChannel$NioServerSocketChannelConfig.<init>(NioServerSocketChannel.java:187)
at io.netty.channel.socket.nio.NioServerSocketChannel.<init>(NioServerSocketChannel.java:85)
at io.netty.channel.socket.nio.NioServerSocketChannel.<init>(NioServerSocketChannel.java:70)
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
at java.lang.Class.newInstance(Class.java:374)
at io.netty.channel.ReflectiveChannelFactory.newChannel(ReflectiveChannelFactory.java:38)
at io.netty.bootstrap.AbstractBootstrap.initAndRegister(AbstractBootstrap.java:315)
at io.netty.bootstrap.AbstractBootstrap.doBind(AbstractBootstrap.java:280)
at io.netty.bootstrap.AbstractBootstrap.bind(AbstractBootstrap.java:276)
at org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer.start(DatanodeHttpServer.java:209)
at org.apache.hadoop.hdfs.server.datanode.DataNode.startInfoServer(DataNode.java:892)
at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:1285)
at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:479)
at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:2584)
at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:2472)
at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:1592)
at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:844)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:482)
at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:441)
at org.apache.hadoop.hdfs.server.datanode.TestDataNodeUUID.testUUIDRegeneration(TestDataNodeUUID.java:86)
FAILED: TEST-org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.xml.<init>
Error Message:
Stack Trace:
Failed to read test report file /home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire-reports/TEST-org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.xml
org.dom4j.DocumentException: Error on line 22188 of document file:///home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire-reports/TEST-org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.xml : XML document structures must start and end within the same entity. Nested exception: XML document structures must start and end within the same entity.
at org.dom4j.io.SAXReader.read(SAXReader.java:482)
at org.dom4j.io.SAXReader.read(SAXReader.java:264)
at hudson.tasks.junit.SuiteResult.parse(SuiteResult.java:123)
at hudson.tasks.junit.TestResult.parse(TestResult.java:282)
at hudson.tasks.junit.TestResult.parsePossiblyEmpty(TestResult.java:228)
at hudson.tasks.junit.TestResult.parse(TestResult.java:163)
at hudson.tasks.junit.TestResult.parse(TestResult.java:146)
at hudson.tasks.junit.TestResult.<init>(TestResult.java:122)
at hudson.tasks.junit.JUnitParser$ParseResultCallable.invoke(JUnitParser.java:119)
at hudson.tasks.junit.JUnitParser$ParseResultCallable.invoke(JUnitParser.java:93)
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2719)
at hudson.remoting.UserRequest.perform(UserRequest.java:120)
at hudson.remoting.UserRequest.perform(UserRequest.java:48)
at hudson.remoting.Request$2.run(Request.java:326)
at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:68)
at java.util.concurrent.FutureTask.run(FutureTask.java:262)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: org.xml.sax.SAXParseException; systemId: file:///home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire-reports/TEST-org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.xml; lineNumber: 22188; columnNumber: 730; XML document structures must start and end within the same entity.
at com.sun.org.apache.xerces.internal.util.ErrorHandlerWrapper.createSAXParseException(ErrorHandlerWrapper.java:198)
at com.sun.org.apache.xerces.internal.util.ErrorHandlerWrapper.fatalError(ErrorHandlerWrapper.java:177)
at com.sun.org.apache.xerces.internal.impl.XMLErrorReporter.reportError(XMLErrorReporter.java:441)
at com.sun.org.apache.xerces.internal.impl.XMLErrorReporter.reportError(XMLErrorReporter.java:368)
at com.sun.org.apache.xerces.internal.impl.XMLScanner.reportFatalError(XMLScanner.java:1436)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImpl.endEntity(XMLDocumentFragmentScannerImpl.java:903)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentScannerImpl.endEntity(XMLDocumentScannerImpl.java:563)
at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.endEntity(XMLEntityManager.java:1384)
at com.sun.org.apache.xerces.internal.impl.XMLEntityScanner.load(XMLEntityScanner.java:1774)
at com.sun.org.apache.xerces.internal.impl.XMLEntityScanner.scanData(XMLEntityScanner.java:1252)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImpl.scanCDATASection(XMLDocumentFragmentScannerImpl.java:1654)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImpl$FragmentContentDriver.next(XMLDocumentFragmentScannerImpl.java:3020)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentScannerImpl.next(XMLDocumentScannerImpl.java:606)
at com.sun.org.apache.xerces.internal.impl.XMLNSDocumentScannerImpl.next(XMLNSDocumentScannerImpl.java:117)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImpl.scanDocument(XMLDocumentFragmentScannerImpl.java:510)
at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:848)
at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:777)
at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:141)
at com.sun.org.apache.xerces.internal.parsers.AbstractSAXParser.parse(AbstractSAXParser.java:1213)
at com.sun.org.apache.xerces.internal.jaxp.SAXParserImpl$JAXPSAXParser.parse(SAXParserImpl.java:648)
at org.dom4j.io.SAXReader.read(SAXReader.java:465)
... 18 more
Nested exception:
org.xml.sax.SAXParseException; systemId: file:///home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire-reports/TEST-org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.xml; lineNumber: 22188; columnNumber: 730; XML document structures must start and end within the same entity.
at com.sun.org.apache.xerces.internal.util.ErrorHandlerWrapper.createSAXParseException(ErrorHandlerWrapper.java:198)
at com.sun.org.apache.xerces.internal.util.ErrorHandlerWrapper.fatalError(ErrorHandlerWrapper.java:177)
at com.sun.org.apache.xerces.internal.impl.XMLErrorReporter.reportError(XMLErrorReporter.java:441)
at com.sun.org.apache.xerces.internal.impl.XMLErrorReporter.reportError(XMLErrorReporter.java:368)
at com.sun.org.apache.xerces.internal.impl.XMLScanner.reportFatalError(XMLScanner.java:1436)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImpl.endEntity(XMLDocumentFragmentScannerImpl.java:903)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentScannerImpl.endEntity(XMLDocumentScannerImpl.java:563)
at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.endEntity(XMLEntityManager.java:1384)
at com.sun.org.apache.xerces.internal.impl.XMLEntityScanner.load(XMLEntityScanner.java:1774)
at com.sun.org.apache.xerces.internal.impl.XMLEntityScanner.scanData(XMLEntityScanner.java:1252)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImpl.scanCDATASection(XMLDocumentFragmentScannerImpl.java:1654)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImpl$FragmentContentDriver.next(XMLDocumentFragmentScannerImpl.java:3020)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentScannerImpl.next(XMLDocumentScannerImpl.java:606)
at com.sun.org.apache.xerces.internal.impl.XMLNSDocumentScannerImpl.next(XMLNSDocumentScannerImpl.java:117)
at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImpl.scanDocument(XMLDocumentFragmentScannerImpl.java:510)
at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:848)
at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:777)
at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:141)
at com.sun.org.apache.xerces.internal.parsers.AbstractSAXParser.parse(AbstractSAXParser.java:1213)
at com.sun.org.apache.xerces.internal.jaxp.SAXParserImpl$JAXPSAXParser.parse(SAXParserImpl.java:648)
at org.dom4j.io.SAXReader.read(SAXReader.java:465)
at org.dom4j.io.SAXReader.read(SAXReader.java:264)
at hudson.tasks.junit.SuiteResult.parse(SuiteResult.java:123)
at hudson.tasks.junit.TestResult.parse(TestResult.java:282)
at hudson.tasks.junit.TestResult.parsePossiblyEmpty(TestResult.java:228)
at hudson.tasks.junit.TestResult.parse(TestResult.java:163)
at hudson.tasks.junit.TestResult.parse(TestResult.java:146)
at hudson.tasks.junit.TestResult.<init>(TestResult.java:122)
at hudson.tasks.junit.JUnitParser$ParseResultCallable.invoke(JUnitParser.java:119)
at hudson.tasks.junit.JUnitParser$ParseResultCallable.invoke(JUnitParser.java:93)
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2719)
at hudson.remoting.UserRequest.perform(UserRequest.java:120)
at hudson.remoting.UserRequest.perform(UserRequest.java:48)
at hudson.remoting.Request$2.run(Request.java:326)
at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:68)
at java.util.concurrent.FutureTask.run(FutureTask.java:262)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
FAILED: org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestLazyPersistReplicaRecovery.testDnRestartWithSavedReplicas
Error Message:
Expected: is <DISK>
but: was <RAM_DISK>
Stack Trace:
java.lang.AssertionError:
Expected: is <DISK>
but: was <RAM_DISK>
at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:20)
at org.junit.Assert.assertThat(Assert.java:865)
at org.junit.Assert.assertThat(Assert.java:832)
at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase.ensureFileReplicasOnStorageType(LazyPersistTestCase.java:141)
at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestLazyPersistReplicaRecovery.testDnRestartWithSavedReplicas(TestLazyPersistReplicaRecovery.java:53)
FAILED: org.apache.hadoop.hdfs.server.namenode.TestEditLog.testBatchedSyncWithClosedLogs[1]
Error Message:
logging edit without syncing should do not affect txid expected:<1> but was:<2>
Stack Trace:
java.lang.AssertionError: logging edit without syncing should do not affect txid expected:<1> but was:<2>
at org.junit.Assert.fail(Assert.java:88)
at org.junit.Assert.failNotEquals(Assert.java:743)
at org.junit.Assert.assertEquals(Assert.java:118)
at org.junit.Assert.assertEquals(Assert.java:555)
at org.apache.hadoop.hdfs.server.namenode.TestEditLog.testBatchedSyncWithClosedLogs(TestEditLog.java:594)
FAILED: org.apache.hadoop.hdfs.server.namenode.TestSecureNameNode.testName
Error Message:
Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":38878;
Stack Trace:
java.io.IOException: Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":38878;
at sun.security.krb5.KdcComm.send(KdcComm.java:250)
at sun.security.krb5.KdcComm.send(KdcComm.java:191)
at sun.security.krb5.KrbTgsReq.send(KrbTgsReq.java:187)
at sun.security.krb5.KrbTgsReq.sendAndGetCreds(KrbTgsReq.java:202)
at sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:311)
at sun.security.krb5.internal.CredentialsUtil.acquireServiceCreds(CredentialsUtil.java:115)
at sun.security.krb5.Credentials.acquireServiceCreds(Credentials.java:449)
at sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:641)
at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:248)
at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179)
at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:193)
at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:411)
at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:617)
at org.apache.hadoop.ipc.Client$Connection.access$2000(Client.java:417)
at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:799)
at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:795)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1755)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:794)
at org.apache.hadoop.ipc.Client$Connection.access$3200(Client.java:417)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1547)
at org.apache.hadoop.ipc.Client.call(Client.java:1394)
at org.apache.hadoop.ipc.Client.call(Client.java:1358)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:241)
at com.sun.proxy.$Proxy19.mkdirs(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:569)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:257)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
at com.sun.proxy.$Proxy20.mkdirs(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2302)
at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2277)
at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1079)
at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1076)
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1076)
at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1069)
at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1908)
at org.apache.hadoop.hdfs.server.namenode.TestSecureNameNode.testName(TestSecureNameNode.java:65)
FAILED: org.apache.hadoop.hdfs.server.namenode.ha.TestDFSUpgradeWithHA.testRollbackWithJournalNodes
Error Message:
null
Stack Trace:
java.lang.AssertionError: null
at org.junit.Assert.fail(Assert.java:86)
at org.junit.Assert.assertTrue(Assert.java:41)
at org.junit.Assert.assertTrue(Assert.java:52)
at org.apache.hadoop.hdfs.server.namenode.ha.TestDFSUpgradeWithHA.testRollbackWithJournalNodes(TestDFSUpgradeWithHA.java:687)
FAILED: org.apache.hadoop.metrics2.sink.TestRollingFileSystemSinkWithSecureHdfs.testMissingPropertiesWithSecureHDFS
Error Message:
Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":42390;
Stack Trace:
java.io.IOException: Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":42390;
at sun.security.krb5.KdcComm.send(KdcComm.java:250)
at sun.security.krb5.KdcComm.send(KdcComm.java:191)
at sun.security.krb5.KrbTgsReq.send(KrbTgsReq.java:187)
at sun.security.krb5.KrbTgsReq.sendAndGetCreds(KrbTgsReq.java:202)
at sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:311)
at sun.security.krb5.internal.CredentialsUtil.acquireServiceCreds(CredentialsUtil.java:115)
at sun.security.krb5.Credentials.acquireServiceCreds(Credentials.java:449)
at sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:641)
at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:248)
at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179)
at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:193)
at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:411)
at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:617)
at org.apache.hadoop.ipc.Client$Connection.access$2000(Client.java:417)
at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:799)
at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:795)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1755)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:794)
at org.apache.hadoop.ipc.Client$Connection.access$3200(Client.java:417)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1547)
at org.apache.hadoop.ipc.Client.call(Client.java:1394)
at org.apache.hadoop.ipc.Client.call(Client.java:1358)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:241)
at com.sun.proxy.$Proxy25.mkdirs(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:569)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:257)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
at com.sun.proxy.$Proxy27.mkdirs(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2302)
at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2277)
at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1079)
at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1076)
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1076)
at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1069)
at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1908)
at org.apache.hadoop.metrics2.sink.TestRollingFileSystemSinkWithSecureHdfs.createDirectoriesSecurely(TestRollingFileSystemSinkWithSecureHdfs.java:192)
at org.apache.hadoop.metrics2.sink.TestRollingFileSystemSinkWithSecureHdfs.testMissingPropertiesWithSecureHDFS(TestRollingFileSystemSinkWithSecureHdfs.java:146)
FAILED: org.apache.hadoop.metrics2.sink.TestRollingFileSystemSinkWithSecureHdfs.testWithSecureHDFS
Error Message:
Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":56424;
Stack Trace:
java.io.IOException: Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":56424;
at sun.security.krb5.KdcComm.send(KdcComm.java:250)
at sun.security.krb5.KdcComm.send(KdcComm.java:191)
at sun.security.krb5.KrbTgsReq.send(KrbTgsReq.java:187)
at sun.security.krb5.KrbTgsReq.sendAndGetCreds(KrbTgsReq.java:202)
at sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:311)
at sun.security.krb5.internal.CredentialsUtil.acquireServiceCreds(CredentialsUtil.java:115)
at sun.security.krb5.Credentials.acquireServiceCreds(Credentials.java:449)
at sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:641)
at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:248)
at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179)
at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:193)
at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:411)
at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:617)
at org.apache.hadoop.ipc.Client$Connection.access$2000(Client.java:417)
at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:799)
at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:795)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1755)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:794)
at org.apache.hadoop.ipc.Client$Connection.access$3200(Client.java:417)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1547)
at org.apache.hadoop.ipc.Client.call(Client.java:1394)
at org.apache.hadoop.ipc.Client.call(Client.java:1358)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:241)
at com.sun.proxy.$Proxy25.mkdirs(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:569)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:257)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
at com.sun.proxy.$Proxy27.mkdirs(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2302)
at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2277)
at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1079)
at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1076)
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1076)
at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1069)
at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1908)
at org.apache.hadoop.metrics2.sink.TestRollingFileSystemSinkWithSecureHdfs.createDirectoriesSecurely(TestRollingFileSystemSinkWithSecureHdfs.java:206)
at org.apache.hadoop.metrics2.sink.TestRollingFileSystemSinkWithSecureHdfs.testWithSecureHDFS(TestRollingFileSystemSinkWithSecureHdfs.java:95)
FAILED: org.apache.hadoop.hdfs.shortcircuit.TestShortCircuitCache.testDataXceiverCleansUpSlotsOnFailure
Error Message:
expected:<1> but was:<2>
Stack Trace:
java.lang.AssertionError: expected:<1> but was:<2>
at org.junit.Assert.fail(Assert.java:88)
at org.junit.Assert.failNotEquals(Assert.java:743)
at org.junit.Assert.assertEquals(Assert.java:118)
at org.junit.Assert.assertEquals(Assert.java:555)
at org.junit.Assert.assertEquals(Assert.java:542)
at org.apache.hadoop.hdfs.shortcircuit.TestShortCircuitCache$17.accept(TestShortCircuitCache.java:633)
at org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.visit(ShortCircuitRegistry.java:403)
at org.apache.hadoop.hdfs.shortcircuit.TestShortCircuitCache.checkNumberOfSegmentsAndSlots(TestShortCircuitCache.java:628)
at org.apache.hadoop.hdfs.shortcircuit.TestShortCircuitCache.testDataXceiverCleansUpSlotsOnFailure(TestShortCircuitCache.java:682)