You are viewing a plain text version of this content. The canonical link for it is here.
Posted to jira@kafka.apache.org by "wade wu (JIRA)" <ji...@apache.org> on 2019/03/22 17:36:00 UTC

[jira] [Created] (KAFKA-8149) ERROR Disk error while writing to recovery point file

wade wu created KAFKA-8149:
------------------------------

             Summary: ERROR Disk error while writing to recovery point file
                 Key: KAFKA-8149
                 URL: https://issues.apache.org/jira/browse/KAFKA-8149
             Project: Kafka
          Issue Type: Bug
          Components: core
    Affects Versions: 1.1.1
         Environment: Windows
            Reporter: wade wu


[2019-03-17 02:55:14,458] ERROR Disk error while writing to recovery point file in directory I:\data\Kafka\kafka-datalogs (kafka.server.LogDirFailureChannel)
java.nio.file.AccessDeniedException: H:\data\Kafka\kafka-datalogs\AzPubSubCompactTestNew1-0\00000000000001170892.snapshot
 at sun.nio.fs.WindowsException.translateToIOException(WindowsException.java:83)
 at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:97)
 at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:102)
 at sun.nio.fs.WindowsFileSystemProvider.implDelete(WindowsFileSystemProvider.java:269)
 at sun.nio.fs.AbstractFileSystemProvider.deleteIfExists(AbstractFileSystemProvider.java:108)
 at java.nio.file.Files.deleteIfExists(Files.java:1165)
 at kafka.log.ProducerStateManager$$anonfun$kafka$log$ProducerStateManager$$deleteSnapshotFiles$2.apply(ProducerStateManager.scala:458)
 at kafka.log.ProducerStateManager$$anonfun$kafka$log$ProducerStateManager$$deleteSnapshotFiles$2.apply(ProducerStateManager.scala:457)
 at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
 at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:35)
 at kafka.log.ProducerStateManager$.kafka$log$ProducerStateManager$$deleteSnapshotFiles(ProducerStateManager.scala:457)
 at kafka.log.ProducerStateManager$.deleteSnapshotsBefore(ProducerStateManager.scala:454)
 at kafka.log.ProducerStateManager.deleteSnapshotsBefore(ProducerStateManager.scala:763)
 at kafka.log.Log.deleteSnapshotsAfterRecoveryPointCheckpoint(Log.scala:1461)
 at kafka.log.LogManager$$anonfun$kafka$log$LogManager$$checkpointLogRecoveryOffsetsInDir$1$$anonfun$apply$29$$anonfun$apply$31.apply(LogManager.scala:577)
 at kafka.log.LogManager$$anonfun$kafka$log$LogManager$$checkpointLogRecoveryOffsetsInDir$1$$anonfun$apply$29$$anonfun$apply$31.apply(LogManager.scala:577)
 at scala.collection.immutable.List.foreach(List.scala:392)
 at kafka.log.LogManager$$anonfun$kafka$log$LogManager$$checkpointLogRecoveryOffsetsInDir$1$$anonfun$apply$29.apply(LogManager.scala:577)
 at kafka.log.LogManager$$anonfun$kafka$log$LogManager$$checkpointLogRecoveryOffsetsInDir$1$$anonfun$apply$29.apply(LogManager.scala:573)
 at scala.Option.foreach(Option.scala:257)
 at kafka.log.LogManager$$anonfun$kafka$log$LogManager$$checkpointLogRecoveryOffsetsInDir$1.apply(LogManager.scala:573)
 at kafka.log.LogManager$$anonfun$kafka$log$LogManager$$checkpointLogRecoveryOffsetsInDir$1.apply(LogManager.scala:572)
 at scala.Option.foreach(Option.scala:257)
 at kafka.log.LogManager.kafka$log$LogManager$$checkpointLogRecoveryOffsetsInDir(LogManager.scala:572)
 at kafka.log.LogManager$$anonfun$checkpointLogRecoveryOffsets$1.apply(LogManager.scala:556)
 at kafka.log.LogManager$$anonfun$checkpointLogRecoveryOffsets$1.apply(LogManager.scala:556)
 at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
 at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
 at kafka.log.LogManager.checkpointLogRecoveryOffsets(LogManager.scala:556)
 at kafka.log.LogManager.truncateTo(LogManager.scala:520)
 at kafka.cluster.Partition$$anonfun$truncateTo$1.apply$mcV$sp(Partition.scala:665)
 at kafka.cluster.Partition$$anonfun$truncateTo$1.apply(Partition.scala:665)
 at kafka.cluster.Partition$$anonfun$truncateTo$1.apply(Partition.scala:665)
 at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:250)
 at kafka.utils.CoreUtils$.inReadLock(CoreUtils.scala:256)
 at kafka.cluster.Partition.truncateTo(Partition.scala:664)
 at kafka.server.ReplicaFetcherThread$$anonfun$maybeTruncate$1.apply(ReplicaFetcherThread.scala:320)
 at kafka.server.ReplicaFetcherThread$$anonfun$maybeTruncate$1.apply(ReplicaFetcherThread.scala:301)
 at scala.collection.immutable.Map$Map2.foreach(Map.scala:137)
 at kafka.server.ReplicaFetcherThread.maybeTruncate(ReplicaFetcherThread.scala:301)
 at kafka.server.AbstractFetcherThread$$anonfun$maybeTruncate$1.apply$mcV$sp(AbstractFetcherThread.scala:133)
 at kafka.server.AbstractFetcherThread$$anonfun$maybeTruncate$1.apply(AbstractFetcherThread.scala:130)
 at kafka.server.AbstractFetcherThread$$anonfun$maybeTruncate$1.apply(AbstractFetcherThread.scala:130)
 at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:250)
 at kafka.server.AbstractFetcherThread.maybeTruncate(AbstractFetcherThread.scala:130)
 at kafka.server.AbstractFetcherThread.doWork(AbstractFetcherThread.scala:100)
 at kafka.utils.ShutdownableThread.run(ShutdownableThread.scala:82)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)