You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-dev@hadoop.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2016/05/12 18:04:40 UTC

Hadoop-Hdfs-trunk - Build # 3133 - Still Failing

See https://builds.apache.org/job/Hadoop-Hdfs-trunk/3133/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 8391 lines...]
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ hadoop-hdfs-project ---
[INFO] Deleting /home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/target
[INFO] 
[INFO] --- maven-antrun-plugin:1.7:run (create-testdirs) @ hadoop-hdfs-project ---
[INFO] Executing tasks

main:
    [mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/target/test-dir
[INFO] Executed tasks
[INFO] 
[INFO] --- maven-source-plugin:2.3:jar-no-fork (hadoop-java-sources) @ hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-source-plugin:2.3:test-jar-no-fork (hadoop-java-sources) @ hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (dist-enforce) @ hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-site-plugin:3.5:attach-descriptor (attach-descriptor) @ hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-javadoc-plugin:2.8.1:jar (module-javadocs) @ hadoop-hdfs-project ---
[INFO] Skipping javadoc generation
[INFO] 
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (depcheck) @ hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-checkstyle-plugin:2.15:checkstyle (default-cli) @ hadoop-hdfs-project ---
[INFO] 
[INFO] --- findbugs-maven-plugin:3.0.0:findbugs (default-cli) @ hadoop-hdfs-project ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop HDFS Client ......................... SUCCESS [05:22 min]
[INFO] Apache Hadoop HDFS ................................ FAILURE [  01:27 h]
[INFO] Apache Hadoop HDFS Native Client .................. SKIPPED
[INFO] Apache Hadoop HttpFS .............................. SKIPPED
[INFO] Apache Hadoop HDFS BookKeeper Journal ............. SKIPPED
[INFO] Apache Hadoop HDFS-NFS ............................ SKIPPED
[INFO] Apache Hadoop HDFS Project ........................ SUCCESS [  0.139 s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:32 h
[INFO] Finished at: 2016-05-12T18:04:38+00:00
[INFO] Final Memory: 72M/801M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-hdfs: There was a timeout or other error in the fork -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-hdfs
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Sending e-mails to: hdfs-dev@hadoop.apache.org
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestAsyncDFSRename.testAggressiveConcurrentAsyncAPI

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
	at java.lang.Object.wait(Native Method)
	at org.apache.hadoop.hdfs.DataStreamer.waitForAckedSeqno(DataStreamer.java:768)
	at org.apache.hadoop.hdfs.DFSOutputStream.flushInternal(DFSOutputStream.java:697)
	at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:778)
	at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:755)
	at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
	at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101)
	at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:430)
	at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:379)
	at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:372)
	at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:365)
	at org.apache.hadoop.hdfs.TestAsyncDFSRename.internalTestConcurrentAsyncAPI(TestAsyncDFSRename.java:328)
	at org.apache.hadoop.hdfs.TestAsyncDFSRename.testAggressiveConcurrentAsyncAPI(TestAsyncDFSRename.java:289)


FAILED:  org.apache.hadoop.hdfs.TestAsyncDFSRename.testAggressiveConcurrentAsyncRenameWithOverwrite

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
	at java.lang.Object.wait(Native Method)
	at org.apache.hadoop.hdfs.DataStreamer.waitForAckedSeqno(DataStreamer.java:768)
	at org.apache.hadoop.hdfs.DFSOutputStream.flushInternal(DFSOutputStream.java:697)
	at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:778)
	at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:755)
	at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
	at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101)
	at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:430)
	at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:379)
	at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:372)
	at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:365)
	at org.apache.hadoop.hdfs.TestAsyncDFSRename.internalTestConcurrentAsyncRenameWithOverwrite(TestAsyncDFSRename.java:226)
	at org.apache.hadoop.hdfs.TestAsyncDFSRename.testAggressiveConcurrentAsyncRenameWithOverwrite(TestAsyncDFSRename.java:199)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testMoveWithTargetPortEmpty

Error Message:
Problem binding to [localhost:9820] java.net.BindException: Address already in use; For more details see:  http://wiki.apache.org/hadoop/BindException

Stack Trace:
java.net.BindException: Problem binding to [localhost:9820] java.net.BindException: Address already in use; For more details see:  http://wiki.apache.org/hadoop/BindException
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.apache.hadoop.ipc.Server.bind(Server.java:530)
	at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:793)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:2592)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:958)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server.<init>(ProtobufRpcEngine.java:563)
	at org.apache.hadoop.ipc.ProtobufRpcEngine.getServer(ProtobufRpcEngine.java:538)
	at org.apache.hadoop.ipc.RPC$Builder.build(RPC.java:800)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.<init>(NameNodeRpcServer.java:426)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.createRpcServer(NameNode.java:783)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:710)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:924)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:903)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1620)
	at org.apache.hadoop.hdfs.MiniDFSCluster.createNameNode(MiniDFSCluster.java:1247)
	at org.apache.hadoop.hdfs.MiniDFSCluster.configureNameService(MiniDFSCluster.java:1016)
	at org.apache.hadoop.hdfs.MiniDFSCluster.createNameNodesAndSetConf(MiniDFSCluster.java:891)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:823)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:482)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:441)
	at org.apache.hadoop.hdfs.TestDFSShell.testMoveWithTargetPortEmpty(TestDFSShell.java:567)


FAILED:  org.apache.hadoop.hdfs.TestFileAppend.testMultipleAppends

Error Message:
Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being available to try. (Nodes: current=[DatanodeInfoWithStorage[127.0.0.1:35341,DS-35dfa487-c50f-46d8-bd79-7d20c7780d35,DISK], DatanodeInfoWithStorage[127.0.0.1:41516,DS-ae64448e-6434-4a70-a23c-b8e8a5c6bda5,DISK]], original=[DatanodeInfoWithStorage[127.0.0.1:41516,DS-ae64448e-6434-4a70-a23c-b8e8a5c6bda5,DISK], DatanodeInfoWithStorage[127.0.0.1:35341,DS-35dfa487-c50f-46d8-bd79-7d20c7780d35,DISK]]). The current failed datanode replacement policy is DEFAULT, and a client may configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' in its configuration.

Stack Trace:
java.io.IOException: Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being available to try. (Nodes: current=[DatanodeInfoWithStorage[127.0.0.1:35341,DS-35dfa487-c50f-46d8-bd79-7d20c7780d35,DISK], DatanodeInfoWithStorage[127.0.0.1:41516,DS-ae64448e-6434-4a70-a23c-b8e8a5c6bda5,DISK]], original=[DatanodeInfoWithStorage[127.0.0.1:41516,DS-ae64448e-6434-4a70-a23c-b8e8a5c6bda5,DISK], DatanodeInfoWithStorage[127.0.0.1:35341,DS-35dfa487-c50f-46d8-bd79-7d20c7780d35,DISK]]). The current failed datanode replacement policy is DEFAULT, and a client may configure this via 'dfs.client.block.write.replace-datanode-on-failure.policy' in its configuration.
	at org.apache.hadoop.hdfs.DataStreamer.findNewDatanode(DataStreamer.java:1166)
	at org.apache.hadoop.hdfs.DataStreamer.addDatanode2ExistingPipeline(DataStreamer.java:1236)
	at org.apache.hadoop.hdfs.DataStreamer.handleDatanodeReplacement(DataStreamer.java:1427)
	at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1342)
	at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1325)
	at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:603)


FAILED:  org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testBlockReportQueueing

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
	at org.junit.Assert.fail(Assert.java:86)
	at org.junit.Assert.assertTrue(Assert.java:41)
	at org.junit.Assert.assertTrue(Assert.java:52)
	at org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.testBlockReportQueueing(TestBlockManager.java:1074)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestNameNodeMetadataConsistency.testGenerationStampInFuture

Error Message:
expected:<17> but was:<0>

Stack Trace:
java.lang.AssertionError: expected:<17> but was:<0>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:555)
	at org.junit.Assert.assertEquals(Assert.java:542)
	at org.apache.hadoop.hdfs.server.namenode.TestNameNodeMetadataConsistency.testGenerationStampInFuture(TestNameNodeMetadataConsistency.java:113)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestSecureNameNode.testName

Error Message:
Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":58382; 

Stack Trace:
java.io.IOException: Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":58382; 
	at sun.security.krb5.KdcComm.send(KdcComm.java:250)
	at sun.security.krb5.KdcComm.send(KdcComm.java:191)
	at sun.security.krb5.KrbTgsReq.send(KrbTgsReq.java:187)
	at sun.security.krb5.KrbTgsReq.sendAndGetCreds(KrbTgsReq.java:202)
	at sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:311)
	at sun.security.krb5.internal.CredentialsUtil.acquireServiceCreds(CredentialsUtil.java:115)
	at sun.security.krb5.Credentials.acquireServiceCreds(Credentials.java:449)
	at sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:641)
	at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:248)
	at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179)
	at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:193)
	at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:411)
	at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:617)
	at org.apache.hadoop.ipc.Client$Connection.access$2000(Client.java:417)
	at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:799)
	at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:795)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1755)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:794)
	at org.apache.hadoop.ipc.Client$Connection.access$3200(Client.java:417)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1547)
	at org.apache.hadoop.ipc.Client.call(Client.java:1394)
	at org.apache.hadoop.ipc.Client.call(Client.java:1358)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:241)
	at com.sun.proxy.$Proxy19.mkdirs(Unknown Source)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:582)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:257)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy20.mkdirs(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2302)
	at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2277)
	at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1119)
	at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1116)
	at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
	at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1116)
	at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1108)
	at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1909)
	at org.apache.hadoop.hdfs.server.namenode.TestSecureNameNode.testName(TestSecureNameNode.java:65)


FAILED:  org.apache.hadoop.metrics2.sink.TestRollingFileSystemSinkWithSecureHdfs.testMissingPropertiesWithSecureHDFS

Error Message:
Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":47390; 

Stack Trace:
java.io.IOException: Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Cannot get a KDC reply)]; Host Details : local host is: "asf903.gq1.ygridcore.net/67.195.81.147"; destination host is: "localhost":47390; 
	at sun.security.krb5.KdcComm.send(KdcComm.java:250)
	at sun.security.krb5.KdcComm.send(KdcComm.java:191)
	at sun.security.krb5.KrbTgsReq.send(KrbTgsReq.java:187)
	at sun.security.krb5.KrbTgsReq.sendAndGetCreds(KrbTgsReq.java:202)
	at sun.security.krb5.internal.CredentialsUtil.serviceCreds(CredentialsUtil.java:311)
	at sun.security.krb5.internal.CredentialsUtil.acquireServiceCreds(CredentialsUtil.java:115)
	at sun.security.krb5.Credentials.acquireServiceCreds(Credentials.java:449)
	at sun.security.jgss.krb5.Krb5Context.initSecContext(Krb5Context.java:641)
	at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:248)
	at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179)
	at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:193)
	at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:411)
	at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:617)
	at org.apache.hadoop.ipc.Client$Connection.access$2000(Client.java:417)
	at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:799)
	at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:795)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1755)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:794)
	at org.apache.hadoop.ipc.Client$Connection.access$3200(Client.java:417)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1547)
	at org.apache.hadoop.ipc.Client.call(Client.java:1394)
	at org.apache.hadoop.ipc.Client.call(Client.java:1358)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:241)
	at com.sun.proxy.$Proxy25.mkdirs(Unknown Source)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:582)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:257)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy27.mkdirs(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2302)
	at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2277)
	at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1119)
	at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1116)
	at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
	at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1116)
	at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1108)
	at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1909)
	at org.apache.hadoop.metrics2.sink.TestRollingFileSystemSinkWithSecureHdfs.createDirectoriesSecurely(TestRollingFileSystemSinkWithSecureHdfs.java:192)
	at org.apache.hadoop.metrics2.sink.TestRollingFileSystemSinkWithSecureHdfs.testMissingPropertiesWithSecureHDFS(TestRollingFileSystemSinkWithSecureHdfs.java:146)