You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@kafka.apache.org by "Kevin Tippenhauer (JIRA)" <ji...@apache.org> on 2017/01/25 08:25:26 UTC

[jira] [Created] (KAFKA-4695) Can't start Kafka server after Blue Screen

Kevin Tippenhauer created KAFKA-4695:
----------------------------------------

             Summary: Can't start Kafka server after Blue Screen
                 Key: KAFKA-4695
                 URL: https://issues.apache.org/jira/browse/KAFKA-4695
             Project: Kafka
          Issue Type: Bug
          Components: log
    Affects Versions: 0.10.1.1
         Environment: Windows 10
            Reporter: Kevin Tippenhauer
            Priority: Critical


When I get a blue screen (my laptop does that quite often) I can't start my Karaf servers anymore. Removing the *.index files as mentioned on [KAFKA-1554|https://issues.apache.org/jira/browse/KAFKA-1554] didn't work.

Normal debug information:
{code:title=server.log|borderStyle=solid}
kafka_2.11-0.10.1.1# cat logs/server.log
[2017-01-25 06:54:33,819] INFO KafkaConfig values:
        advertised.host.name = null
        advertised.listeners = null
        advertised.port = null
        authorizer.class.name =
        auto.create.topics.enable = true
        auto.leader.rebalance.enable = true
        background.threads = 10
        broker.id = 0
        broker.id.generation.enable = true
        broker.rack = null
        compression.type = producer
        connections.max.idle.ms = 600000
        controlled.shutdown.enable = true
        controlled.shutdown.max.retries = 3
        controlled.shutdown.retry.backoff.ms = 5000
        controller.socket.timeout.ms = 30000
        default.replication.factor = 1
        delete.topic.enable = false
        fetch.purgatory.purge.interval.requests = 1000
        group.max.session.timeout.ms = 300000
        group.min.session.timeout.ms = 6000
        host.name =
        inter.broker.protocol.version = 0.10.1-IV2
        leader.imbalance.check.interval.seconds = 300
        leader.imbalance.per.broker.percentage = 10
        listeners = PLAINTEXT://127.0.0.1:9092
        log.cleaner.backoff.ms = 15000
        log.cleaner.dedupe.buffer.size = 134217728
        log.cleaner.delete.retention.ms = 86400000
        log.cleaner.enable = true
        log.cleaner.io.buffer.load.factor = 0.9
        log.cleaner.io.buffer.size = 524288
        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
        log.cleaner.min.cleanable.ratio = 0.5
        log.cleaner.min.compaction.lag.ms = 0
        log.cleaner.threads = 1
        log.cleanup.policy = [delete]
        log.dir = /tmp/kafka-logs
        log.dirs = /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs
        log.flush.interval.messages = 9223372036854775807
        log.flush.interval.ms = null
        log.flush.offset.checkpoint.interval.ms = 60000
        log.flush.scheduler.interval.ms = 9223372036854775807
        log.index.interval.bytes = 4096
        log.index.size.max.bytes = 10485760
        log.message.format.version = 0.10.1-IV2
        log.message.timestamp.difference.max.ms = 9223372036854775807
        log.message.timestamp.type = CreateTime
        log.preallocate = false
        log.retention.bytes = -1
        log.retention.check.interval.ms = 300000
        log.retention.hours = 168
        log.retention.minutes = null
        log.retention.ms = null
        log.roll.hours = 168
        log.roll.jitter.hours = 0
        log.roll.jitter.ms = null
        log.roll.ms = null
        log.segment.bytes = 1073741824
        log.segment.delete.delay.ms = 60000
        max.connections.per.ip = 2147483647
        max.connections.per.ip.overrides =
        message.max.bytes = 1000012
        metric.reporters = []
        metrics.num.samples = 2
        metrics.sample.window.ms = 30000
        min.insync.replicas = 1
        num.io.threads = 8
        num.network.threads = 3
        num.partitions = 1
        num.recovery.threads.per.data.dir = 1
        num.replica.fetchers = 1
        offset.metadata.max.bytes = 4096
        offsets.commit.required.acks = -1
        offsets.commit.timeout.ms = 5000
        offsets.load.buffer.size = 5242880
        offsets.retention.check.interval.ms = 600000
        offsets.retention.minutes = 1440
        offsets.topic.compression.codec = 0
        offsets.topic.num.partitions = 50
        offsets.topic.replication.factor = 3
        offsets.topic.segment.bytes = 104857600
        port = 9092
        principal.builder.class = class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder
        producer.purgatory.purge.interval.requests = 1000
        queued.max.requests = 500
        quota.consumer.default = 9223372036854775807
        quota.producer.default = 9223372036854775807
        quota.window.num = 11
        quota.window.size.seconds = 1
        replica.fetch.backoff.ms = 1000
        replica.fetch.max.bytes = 1048576
        replica.fetch.min.bytes = 1
        replica.fetch.response.max.bytes = 10485760
        replica.fetch.wait.max.ms = 500
        replica.high.watermark.checkpoint.interval.ms = 5000
        replica.lag.time.max.ms = 10000
        replica.socket.receive.buffer.bytes = 65536
        replica.socket.timeout.ms = 30000
        replication.quota.window.num = 11
        replication.quota.window.size.seconds = 1
        request.timeout.ms = 30000
        reserved.broker.max.id = 1000
        sasl.enabled.mechanisms = [GSSAPI]
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.principal.to.local.rules = [DEFAULT]
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.mechanism.inter.broker.protocol = GSSAPI
        security.inter.broker.protocol = PLAINTEXT
        socket.receive.buffer.bytes = 102400
        socket.request.max.bytes = 104857600
        socket.send.buffer.bytes = 102400
        ssl.cipher.suites = null
        ssl.client.auth = none
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = null
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        unclean.leader.election.enable = true
        zookeeper.connect = localhost:2181
        zookeeper.connection.timeout.ms = 6000
        zookeeper.session.timeout.ms = 6000
        zookeeper.set.acl = false
        zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2017-01-25 06:54:33,980] INFO starting (kafka.server.KafkaServer)
[2017-01-25 06:54:33,995] INFO [ThrottledRequestReaper-Fetch], Starting  (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
[2017-01-25 06:54:33,995] INFO [ThrottledRequestReaper-Produce], Starting  (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
[2017-01-25 06:54:34,024] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer)
[2017-01-25 06:54:34,336] INFO Cluster ID = rjpZT8_9RbCewZGKtw6pCg (kafka.server.KafkaServer)
[2017-01-25 06:54:34,447] INFO Loading logs. (kafka.log.LogManager)
[2017-01-25 06:54:34,581] WARN Found a corrupted index file due to requirement failed: Corrupt index found, index file (/mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/my-replicated-topic-0/00000000000000000000.index) has non-zero size but the last offset is 0 which is no larger than the base offset 0.}. deleting /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/my-replicated-topic-0/00000000000000000000.timeindex, /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/my-replicated-topic-0/00000000000000000000.index and rebuilding index... (kafka.log.Log)
[2017-01-25 06:54:34,597] ERROR There was an error in one of the threads during logs loading: java.io.IOException: Das Argument ist ungültig (kafka.log.LogManager)
[2017-01-25 06:54:34,598] FATAL Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
java.io.IOException: Das Argument ist ungültig
        at java.io.RandomAccessFile.setLength(Native Method)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:115)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:106)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.resize(AbstractIndex.scala:106)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply$mcV$sp(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.trimToValidSize(AbstractIndex.scala:159)
        at kafka.log.LogSegment.recover(LogSegment.scala:236)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:218)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:179)
        at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
        at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732)
        at kafka.log.Log.loadSegments(Log.scala:179)
        at kafka.log.Log.<init>(Log.scala:108)
        at kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$3$$anonfun$apply$10$$anonfun$apply$1.apply$mcV$sp(LogManager.scala:151)
        at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:58)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
[2017-01-25 06:54:34,598] WARN Found a corrupted index file due to requirement failed: Corrupt index found, index file (/mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-changelog-0/00000000000000000000.index) has non-zero size but the last offset is 0 which is no larger than the base offset 0.}. deleting /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-changelog-0/00000000000000000000.timeindex, /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-changelog-0/00000000000000000000.index and rebuilding index... (kafka.log.Log)
[2017-01-25 06:54:34,618] INFO shutting down (kafka.server.KafkaServer)
[2017-01-25 06:54:34,630] WARN Found a corrupted index file due to requirement failed: Corrupt index found, index file (/mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-repartition-0/00000000000000000000.index) has non-zero size but the last offset is 0 which is no larger than the base offset 0.}. deleting /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-repartition-0/00000000000000000000.timeindex, /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-repartition-0/00000000000000000000.index and rebuilding index... (kafka.log.Log)
[2017-01-25 06:54:34,644] WARN Found a corrupted index file due to requirement failed: Corrupt index found, index file (/mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-output-0/00000000000000000000.index) has non-zero size but the last offset is 0 which is no larger than the base offset 0.}. deleting /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-output-0/00000000000000000000.timeindex, /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-output-0/00000000000000000000.index and rebuilding index... (kafka.log.Log)
[2017-01-25 06:54:34,648] INFO shut down completed (kafka.server.KafkaServer)
[2017-01-25 06:54:34,648] FATAL Fatal error during KafkaServerStartable startup. Prepare to shutdown (kafka.server.KafkaServerStartable)
java.io.IOException: Das Argument ist ungültig
        at java.io.RandomAccessFile.setLength(Native Method)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:115)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:106)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.resize(AbstractIndex.scala:106)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply$mcV$sp(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.trimToValidSize(AbstractIndex.scala:159)
        at kafka.log.LogSegment.recover(LogSegment.scala:236)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:218)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:179)
        at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
        at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732)
        at kafka.log.Log.loadSegments(Log.scala:179)
        at kafka.log.Log.<init>(Log.scala:108)
        at kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$3$$anonfun$apply$10$$anonfun$apply$1.apply$mcV$sp(LogManager.scala:151)
        at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:58)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
[2017-01-25 06:54:34,648] WARN Found a corrupted index file due to requirement failed: Corrupt index found, index file (/mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/__consumer_offsets-0/00000000000000000000.index) has non-zero size but the last offset is 0 which is no larger than the base offset 0.}. deleting /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/__consumer_offsets-0/00000000000000000000.timeindex, /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/__consumer_offsets-0/00000000000000000000.index and rebuilding index... (kafka.log.Log)
[2017-01-25 06:54:34,667] INFO shutting down (kafka.server.KafkaServer)
{code}

Extended debug information
{code:title=server.log|borderStyle=solid}
[2017-01-25 07:11:53,348] INFO KafkaConfig values:
        advertised.host.name = null
        advertised.listeners = null
        advertised.port = null
        authorizer.class.name =
        auto.create.topics.enable = true
        auto.leader.rebalance.enable = true
        background.threads = 10
        broker.id = 0
        broker.id.generation.enable = true
        broker.rack = null
        compression.type = producer
        connections.max.idle.ms = 600000
        controlled.shutdown.enable = true
        controlled.shutdown.max.retries = 3
        controlled.shutdown.retry.backoff.ms = 5000
        controller.socket.timeout.ms = 30000
        default.replication.factor = 1
        delete.topic.enable = false
        fetch.purgatory.purge.interval.requests = 1000
        group.max.session.timeout.ms = 300000
        group.min.session.timeout.ms = 6000
        host.name =
        inter.broker.protocol.version = 0.10.1-IV2
        leader.imbalance.check.interval.seconds = 300
        leader.imbalance.per.broker.percentage = 10
        listeners = PLAINTEXT://127.0.0.1:9092
        log.cleaner.backoff.ms = 15000
        log.cleaner.dedupe.buffer.size = 134217728
        log.cleaner.delete.retention.ms = 86400000
        log.cleaner.enable = true
        log.cleaner.io.buffer.load.factor = 0.9
        log.cleaner.io.buffer.size = 524288
        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
        log.cleaner.min.cleanable.ratio = 0.5
        log.cleaner.min.compaction.lag.ms = 0
        log.cleaner.threads = 1
        log.cleanup.policy = [delete]
        log.dir = /tmp/kafka-logs
        log.dirs = /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs
        log.flush.interval.messages = 9223372036854775807
        log.flush.interval.ms = null
        log.flush.offset.checkpoint.interval.ms = 60000
        log.flush.scheduler.interval.ms = 9223372036854775807
        log.index.interval.bytes = 4096
        log.index.size.max.bytes = 10485760
        log.message.format.version = 0.10.1-IV2
        log.message.timestamp.difference.max.ms = 9223372036854775807
        log.message.timestamp.type = CreateTime
        log.preallocate = false
        log.retention.bytes = -1
        log.retention.check.interval.ms = 300000
        log.retention.hours = 168
        log.retention.minutes = null
        log.retention.ms = null
        log.roll.hours = 168
        log.roll.jitter.hours = 0
        log.roll.jitter.ms = null
        log.roll.ms = null
        log.segment.bytes = 1073741824
        log.segment.delete.delay.ms = 60000
        max.connections.per.ip = 2147483647
        max.connections.per.ip.overrides =
        message.max.bytes = 1000012
        metric.reporters = []
        metrics.num.samples = 2
        metrics.sample.window.ms = 30000
        min.insync.replicas = 1
        num.io.threads = 8
        num.network.threads = 3
        num.partitions = 1
        num.recovery.threads.per.data.dir = 1
        num.replica.fetchers = 1
        offset.metadata.max.bytes = 4096
        offsets.commit.required.acks = -1
        offsets.commit.timeout.ms = 5000
        offsets.load.buffer.size = 5242880
        offsets.retention.check.interval.ms = 600000
        offsets.retention.minutes = 1440
        offsets.topic.compression.codec = 0
        offsets.topic.num.partitions = 50
        offsets.topic.replication.factor = 3
        offsets.topic.segment.bytes = 104857600
        port = 9092
        principal.builder.class = class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder
        producer.purgatory.purge.interval.requests = 1000
        queued.max.requests = 500
        quota.consumer.default = 9223372036854775807
        quota.producer.default = 9223372036854775807
        quota.window.num = 11
        quota.window.size.seconds = 1
        replica.fetch.backoff.ms = 1000
        replica.fetch.max.bytes = 1048576
        replica.fetch.min.bytes = 1
        replica.fetch.response.max.bytes = 10485760
        replica.fetch.wait.max.ms = 500
        replica.high.watermark.checkpoint.interval.ms = 5000
        replica.lag.time.max.ms = 10000
        replica.socket.receive.buffer.bytes = 65536
        replica.socket.timeout.ms = 30000
        replication.quota.window.num = 11
        replication.quota.window.size.seconds = 1
        request.timeout.ms = 30000
        reserved.broker.max.id = 1000
        sasl.enabled.mechanisms = [GSSAPI]
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.principal.to.local.rules = [DEFAULT]
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.mechanism.inter.broker.protocol = GSSAPI
        security.inter.broker.protocol = PLAINTEXT
        socket.receive.buffer.bytes = 102400
        socket.request.max.bytes = 104857600
        socket.send.buffer.bytes = 102400
        ssl.cipher.suites = null
        ssl.client.auth = none
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = null
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        unclean.leader.election.enable = true
        zookeeper.connect = localhost:2181
        zookeeper.connection.timeout.ms = 6000
        zookeeper.session.timeout.ms = 6000
        zookeeper.set.acl = false
        zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2017-01-25 07:11:53,490] INFO starting (kafka.server.KafkaServer)
[2017-01-25 07:11:53,512] INFO [ThrottledRequestReaper-Fetch], Starting  (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
[2017-01-25 07:11:53,517] INFO [ThrottledRequestReaper-Produce], Starting  (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
[2017-01-25 07:11:53,529] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer)
[2017-01-25 07:11:53,749] INFO Cluster ID = rjpZT8_9RbCewZGKtw6pCg (kafka.server.KafkaServer)
[2017-01-25 07:11:53,863] INFO Loading logs. (kafka.log.LogManager)
[2017-01-25 07:11:53,911] ERROR Could not find index file corresponding to log file /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/my-replicated-topic-0/00000000000000000000.log, rebuilding index... (kafka.log.Log)
[2017-01-25 07:11:53,923] ERROR There was an error in one of the threads during logs loading: java.io.IOException: Das Argument ist ungültig (kafka.log.LogManager)
[2017-01-25 07:11:53,927] FATAL Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
java.io.IOException: Das Argument ist ungültig
        at java.io.RandomAccessFile.setLength(Native Method)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:115)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:106)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.resize(AbstractIndex.scala:106)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply$mcV$sp(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.trimToValidSize(AbstractIndex.scala:159)
        at kafka.log.LogSegment.recover(LogSegment.scala:236)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:222)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:179)
        at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
        at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732)
        at kafka.log.Log.loadSegments(Log.scala:179)
        at kafka.log.Log.<init>(Log.scala:108)
        at kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$3$$anonfun$apply$10$$anonfun$apply$1.apply$mcV$sp(LogManager.scala:151)
        at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:58)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
[2017-01-25 07:11:53,931] ERROR Could not find index file corresponding to log file /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-changelog-0/00000000000000000000.log, rebuilding index... (kafka.log.Log)
[2017-01-25 07:11:53,942] INFO shutting down (kafka.server.KafkaServer)
[2017-01-25 07:11:53,955] ERROR Could not find index file corresponding to log file /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-repartition-0/00000000000000000000.log, rebuilding index... (kafka.log.Log)
[2017-01-25 07:11:53,960] INFO shut down completed (kafka.server.KafkaServer)
[2017-01-25 07:11:53,961] FATAL Fatal error during KafkaServerStartable startup. Prepare to shutdown (kafka.server.KafkaServerStartable)
java.io.IOException: Das Argument ist ungültig
        at java.io.RandomAccessFile.setLength(Native Method)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:115)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:106)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.resize(AbstractIndex.scala:106)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply$mcV$sp(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.trimToValidSize(AbstractIndex.scala:159)
        at kafka.log.LogSegment.recover(LogSegment.scala:236)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:222)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:179)
        at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
        at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732)
        at kafka.log.Log.loadSegments(Log.scala:179)
        at kafka.log.Log.<init>(Log.scala:108)
        at kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$3$$anonfun$apply$10$$anonfun$apply$1.apply$mcV$sp(LogManager.scala:151)
        at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:58)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
[2017-01-25 07:11:53,969] ERROR Could not find index file corresponding to log file /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-output-0/00000000000000000000.log, rebuilding index... (kafka.log.Log)
[2017-01-25 07:11:53,975] INFO shutting down (kafka.server.KafkaServer)
[2017-01-25 07:21:08,725] INFO KafkaConfig values:
        advertised.host.name = null
        advertised.listeners = null
        advertised.port = null
        authorizer.class.name =
        auto.create.topics.enable = true
        auto.leader.rebalance.enable = true
        background.threads = 10
        broker.id = 0
        broker.id.generation.enable = true
        broker.rack = null
        compression.type = producer
        connections.max.idle.ms = 600000
        controlled.shutdown.enable = true
        controlled.shutdown.max.retries = 3
        controlled.shutdown.retry.backoff.ms = 5000
        controller.socket.timeout.ms = 30000
        default.replication.factor = 1
        delete.topic.enable = false
        fetch.purgatory.purge.interval.requests = 1000
        group.max.session.timeout.ms = 300000
        group.min.session.timeout.ms = 6000
        host.name =
        inter.broker.protocol.version = 0.10.1-IV2
        leader.imbalance.check.interval.seconds = 300
        leader.imbalance.per.broker.percentage = 10
        listeners = PLAINTEXT://127.0.0.1:9092
        log.cleaner.backoff.ms = 15000
        log.cleaner.dedupe.buffer.size = 134217728
        log.cleaner.delete.retention.ms = 86400000
        log.cleaner.enable = true
        log.cleaner.io.buffer.load.factor = 0.9
        log.cleaner.io.buffer.size = 524288
        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
        log.cleaner.min.cleanable.ratio = 0.5
        log.cleaner.min.compaction.lag.ms = 0
        log.cleaner.threads = 1
        log.cleanup.policy = [delete]
        log.dir = /tmp/kafka-logs
        log.dirs = /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs
        log.flush.interval.messages = 9223372036854775807
        log.flush.interval.ms = null
        log.flush.offset.checkpoint.interval.ms = 60000
        log.flush.scheduler.interval.ms = 9223372036854775807
        log.index.interval.bytes = 4096
        log.index.size.max.bytes = 10485760
        log.message.format.version = 0.10.1-IV2
        log.message.timestamp.difference.max.ms = 9223372036854775807
        log.message.timestamp.type = CreateTime
        log.preallocate = false
        log.retention.bytes = -1
        log.retention.check.interval.ms = 300000
        log.retention.hours = 168
        log.retention.minutes = null
        log.retention.ms = null
        log.roll.hours = 168
        log.roll.jitter.hours = 0
        log.roll.jitter.ms = null
        log.roll.ms = null
        log.segment.bytes = 1073741824
        log.segment.delete.delay.ms = 60000
        max.connections.per.ip = 2147483647
        max.connections.per.ip.overrides =
        message.max.bytes = 1000012
        metric.reporters = []
        metrics.num.samples = 2
        metrics.sample.window.ms = 30000
        min.insync.replicas = 1
        num.io.threads = 8
        num.network.threads = 3
        num.partitions = 1
        num.recovery.threads.per.data.dir = 1
        num.replica.fetchers = 1
        offset.metadata.max.bytes = 4096
        offsets.commit.required.acks = -1
        offsets.commit.timeout.ms = 5000
        offsets.load.buffer.size = 5242880
        offsets.retention.check.interval.ms = 600000
        offsets.retention.minutes = 1440
        offsets.topic.compression.codec = 0
        offsets.topic.num.partitions = 50
        offsets.topic.replication.factor = 3
        offsets.topic.segment.bytes = 104857600
        port = 9092
        principal.builder.class = class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder
        producer.purgatory.purge.interval.requests = 1000
        queued.max.requests = 500
        quota.consumer.default = 9223372036854775807
        quota.producer.default = 9223372036854775807
        quota.window.num = 11
        quota.window.size.seconds = 1
        replica.fetch.backoff.ms = 1000
        replica.fetch.max.bytes = 1048576
        replica.fetch.min.bytes = 1
        replica.fetch.response.max.bytes = 10485760
        replica.fetch.wait.max.ms = 500
        replica.high.watermark.checkpoint.interval.ms = 5000
        replica.lag.time.max.ms = 10000
        replica.socket.receive.buffer.bytes = 65536
        replica.socket.timeout.ms = 30000
        replication.quota.window.num = 11
        replication.quota.window.size.seconds = 1
        request.timeout.ms = 30000
        reserved.broker.max.id = 1000
        sasl.enabled.mechanisms = [GSSAPI]
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.principal.to.local.rules = [DEFAULT]
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.mechanism.inter.broker.protocol = GSSAPI
        security.inter.broker.protocol = PLAINTEXT
        socket.receive.buffer.bytes = 102400
        socket.request.max.bytes = 104857600
        socket.send.buffer.bytes = 102400
        ssl.cipher.suites = null
        ssl.client.auth = none
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = null
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        unclean.leader.election.enable = true
        zookeeper.connect = localhost:2181
        zookeeper.connection.timeout.ms = 6000
        zookeeper.session.timeout.ms = 6000
        zookeeper.set.acl = false
        zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2017-01-25 07:21:08,835] INFO starting (kafka.server.KafkaServer)
[2017-01-25 07:21:08,852] INFO [ThrottledRequestReaper-Fetch], Starting  (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
[2017-01-25 07:21:08,860] INFO [ThrottledRequestReaper-Produce], Starting  (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
[2017-01-25 07:21:08,876] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer)
[2017-01-25 07:21:09,143] INFO Cluster ID = rjpZT8_9RbCewZGKtw6pCg (kafka.server.KafkaServer)
[2017-01-25 07:21:09,224] INFO Loading logs. (kafka.log.LogManager)
[2017-01-25 07:21:09,265] WARN Found a corrupted index file due to requirement failed: Corrupt index found, index file (/mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/my-replicated-topic-0/00000000000000000000.index) has non-zero size but the last offset is 0 which is no larger than the base offset 0.}. deleting /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/my-replicated-topic-0/00000000000000000000.timeindex, /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/my-replicated-topic-0/00000000000000000000.index and rebuilding index... (kafka.log.Log)
[2017-01-25 07:21:09,273] ERROR There was an error in one of the threads during logs loading: java.io.IOException: Das Argument ist ungültig (kafka.log.LogManager)
[2017-01-25 07:21:09,280] FATAL Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
java.io.IOException: Das Argument ist ungültig
        at java.io.RandomAccessFile.setLength(Native Method)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:115)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:106)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.resize(AbstractIndex.scala:106)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply$mcV$sp(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.trimToValidSize(AbstractIndex.scala:159)
        at kafka.log.LogSegment.recover(LogSegment.scala:236)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:218)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:179)
        at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
        at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732)
        at kafka.log.Log.loadSegments(Log.scala:179)
        at kafka.log.Log.<init>(Log.scala:108)
        at kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$3$$anonfun$apply$10$$anonfun$apply$1.apply$mcV$sp(LogManager.scala:151)
        at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:58)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
[2017-01-25 07:21:09,280] WARN Found a corrupted index file due to requirement failed: Corrupt index found, index file (/mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-changelog-0/00000000000000000000.index) has non-zero size but the last offset is 0 which is no larger than the base offset 0.}. deleting /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-changelog-0/00000000000000000000.timeindex, /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-changelog-0/00000000000000000000.index and rebuilding index... (kafka.log.Log)
[2017-01-25 07:21:09,293] INFO shutting down (kafka.server.KafkaServer)
[2017-01-25 07:21:09,299] WARN Found a corrupted index file due to requirement failed: Corrupt index found, index file (/mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-repartition-0/00000000000000000000.index) has non-zero size but the last offset is 0 which is no larger than the base offset 0.}. deleting /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-repartition-0/00000000000000000000.timeindex, /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-Counts-repartition-0/00000000000000000000.index and rebuilding index... (kafka.log.Log)
[2017-01-25 07:21:09,303] WARN Found a corrupted index file due to requirement failed: Corrupt index found, index file (/mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-output-0/00000000000000000000.index) has non-zero size but the last offset is 0 which is no larger than the base offset 0.}. deleting /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-output-0/00000000000000000000.timeindex, /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/streams-wordcount-output-0/00000000000000000000.index and rebuilding index... (kafka.log.Log)
[2017-01-25 07:21:09,313] ERROR Could not find index file corresponding to log file /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/__consumer_offsets-0/00000000000000000000.log, rebuilding index... (kafka.log.Log)
[2017-01-25 07:21:09,313] INFO shut down completed (kafka.server.KafkaServer)
[2017-01-25 07:21:09,317] FATAL Fatal error during KafkaServerStartable startup. Prepare to shutdown (kafka.server.KafkaServerStartable)
java.io.IOException: Das Argument ist ungültig
        at java.io.RandomAccessFile.setLength(Native Method)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:115)
        at kafka.log.AbstractIndex$$anonfun$resize$1.apply(AbstractIndex.scala:106)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.resize(AbstractIndex.scala:106)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply$mcV$sp(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.log.AbstractIndex$$anonfun$trimToValidSize$1.apply(AbstractIndex.scala:160)
        at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
        at kafka.log.AbstractIndex.trimToValidSize(AbstractIndex.scala:159)
        at kafka.log.LogSegment.recover(LogSegment.scala:236)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:218)
        at kafka.log.Log$$anonfun$loadSegments$4.apply(Log.scala:179)
        at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
        at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732)
        at kafka.log.Log.loadSegments(Log.scala:179)
        at kafka.log.Log.<init>(Log.scala:108)
        at kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$3$$anonfun$apply$10$$anonfun$apply$1.apply$mcV$sp(LogManager.scala:151)
        at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:58)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
[2017-01-25 07:21:09,325] ERROR Could not find index file corresponding to log file /mnt/d/Programme/kafka_2.11-0.10.1.1/data/kafka-logs/__consumer_offsets-1/00000000000000000000.log, rebuilding index... (kafka.log.Log)
[2017-01-25 07:21:09,333] INFO shutting down (kafka.server.KafkaServer)
{code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)