You are viewing a plain text version of this content. The canonical link for it is here.
Posted to users@kafka.apache.org by Jon Yeargers <jo...@cedexis.com> on 2017/01/01 19:27:44 UTC

0.10.2.0-SNAPSHOT - rocksdb exception(s)

2017-01-01 18:19:13,206 [StreamThread-1] ERROR
o.a.k.c.c.i.ConsumerCoordinator - User provided listener
org.apache.kafka.streams.processor.internals.StreamThread$1 for group
RtDetailBreako

utProcessor failed on partition assignment

org.apache.kafka.streams.errors.ProcessorStateException: Error opening
store table_stream-201701011700 at location
/mnt/RtDetailBreakoutProcessor/RtDetailBreakoutProcessor/0_1/table_stream/

table_stream-201701011700

        at
org.apache.kafka.streams.state.internals.RocksDBStore.openDB(RocksDBStore.java:187)

        at
org.apache.kafka.streams.state.internals.RocksDBStore.openDB(RocksDBStore.java:156)

        at
org.apache.kafka.streams.state.internals.RocksDBWindowStore$Segment.openDB(RocksDBWindowStore.java:72)

        at
org.apache.kafka.streams.state.internals.RocksDBWindowStore.getOrCreateSegment(RocksDBWindowStore.java:388)

        at
org.apache.kafka.streams.state.internals.RocksDBWindowStore.putInternal(RocksDBWindowStore.java:319)

        at
org.apache.kafka.streams.state.internals.RocksDBWindowStore.access$000(RocksDBWindowStore.java:51)

        at
org.apache.kafka.streams.state.internals.RocksDBWindowStore$1.restore(RocksDBWindowStore.java:206)

        at
org.apache.kafka.streams.processor.internals.ProcessorStateManager.restoreActiveState(ProcessorStateManager.java:238)

        at
org.apache.kafka.streams.processor.internals.ProcessorStateManager.register(ProcessorStateManager.java:201)

        at
org.apache.kafka.streams.processor.internals.ProcessorContextImpl.register(ProcessorContextImpl.java:122)

        at
org.apache.kafka.streams.state.internals.RocksDBWindowStore.init(RocksDBWindowStore.java:200)

        at
org.apache.kafka.streams.state.internals.MeteredWindowStore.init(MeteredWindowStore.java:65)

        at
org.apache.kafka.streams.state.internals.CachingWindowStore.init(CachingWindowStore.java:65)

        at
org.apache.kafka.streams.processor.internals.AbstractTask.initializeStateStores(AbstractTask.java:86)

        at
org.apache.kafka.streams.processor.internals.StreamTask.<init>(StreamTask.java:120)

        at
org.apache.kafka.streams.processor.internals.StreamThread.createStreamTask(StreamThread.java:794)

        at
org.apache.kafka.streams.processor.internals.StreamThread$TaskCreator.createTask(StreamThread.java:1222)

        at
org.apache.kafka.streams.processor.internals.StreamThread$AbstractTaskCreator.retryWithBackoff(StreamThread.java:1195)

        at
org.apache.kafka.streams.processor.internals.StreamThread.addStreamTasks(StreamThread.java:897)

        at
org.apache.kafka.streams.processor.internals.StreamThread.access$500(StreamThread.java:71)

        at
org.apache.kafka.streams.processor.internals.StreamThread$1.onPartitionsAssigned(StreamThread.java:240)

        at
org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:230)

        at
org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:314)

        at
org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:278)

        at
org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:261)

        at
org.apache.kafka.clients.consumer.KafkaConsumer.pollOnce(KafkaConsumer.java:1039)

        at
org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1004)

        at
org.apache.kafka.streams.processor.internals.StreamThread.runLoop(StreamThread.java:570)

        at
org.apache.kafka.streams.processor.internals.StreamThread.run(StreamThread.java:359)

Caused by: org.rocksdb.RocksDBException: IO error: lock
/mnt/RtDetailBreakoutProcessor/RtDetailBreakoutProcessor/0_1/table_stream/table_stream-201701011700/LOCK:
No locks available

        at org.rocksdb.RocksDB.open(Native Method)

        at org.rocksdb.RocksDB.open(RocksDB.java:184)

        at
org.apache.kafka.streams.state.internals.RocksDBStore.openDB(RocksDBStore.java:180)

Re: 0.10.2.0-SNAPSHOT - rocksdb exception(s)

Posted by Guozhang Wang <wa...@gmail.com>.
Jon,

It is hard to determine what could be the root cause of this scenario just
from the stack trace without checking the logs. We have seen a similar
issue before and it has been fixed in the latest trunk head:
https://issues.apache.org/jira/browse/KAFKA-4509

Are you using the latest trunk head with 0.10.2.0-SNAPSHOT? Could you paste
the commit hash of your built jar?


Guozhang


On Sun, Jan 1, 2017 at 11:27 AM, Jon Yeargers <jo...@cedexis.com>
wrote:

> 2017-01-01 18:19:13,206 [StreamThread-1] ERROR
> o.a.k.c.c.i.ConsumerCoordinator - User provided listener
> org.apache.kafka.streams.processor.internals.StreamThread$1 for group
> RtDetailBreako
>
> utProcessor failed on partition assignment
>
> org.apache.kafka.streams.errors.ProcessorStateException: Error opening
> store table_stream-201701011700 at location
> /mnt/RtDetailBreakoutProcessor/RtDetailBreakoutProcessor/0_1/table_stream/
>
> table_stream-201701011700
>
>         at
> org.apache.kafka.streams.state.internals.RocksDBStore.
> openDB(RocksDBStore.java:187)
>
>         at
> org.apache.kafka.streams.state.internals.RocksDBStore.
> openDB(RocksDBStore.java:156)
>
>         at
> org.apache.kafka.streams.state.internals.RocksDBWindowStore$Segment.
> openDB(RocksDBWindowStore.java:72)
>
>         at
> org.apache.kafka.streams.state.internals.RocksDBWindowStore.
> getOrCreateSegment(RocksDBWindowStore.java:388)
>
>         at
> org.apache.kafka.streams.state.internals.RocksDBWindowStore.putInternal(
> RocksDBWindowStore.java:319)
>
>         at
> org.apache.kafka.streams.state.internals.RocksDBWindowStore.access$000(
> RocksDBWindowStore.java:51)
>
>         at
> org.apache.kafka.streams.state.internals.RocksDBWindowStore$1.restore(
> RocksDBWindowStore.java:206)
>
>         at
> org.apache.kafka.streams.processor.internals.ProcessorStateManager.
> restoreActiveState(ProcessorStateManager.java:238)
>
>         at
> org.apache.kafka.streams.processor.internals.ProcessorStateManager.
> register(ProcessorStateManager.java:201)
>
>         at
> org.apache.kafka.streams.processor.internals.
> ProcessorContextImpl.register(ProcessorContextImpl.java:122)
>
>         at
> org.apache.kafka.streams.state.internals.RocksDBWindowStore.init(
> RocksDBWindowStore.java:200)
>
>         at
> org.apache.kafka.streams.state.internals.MeteredWindowStore.init(
> MeteredWindowStore.java:65)
>
>         at
> org.apache.kafka.streams.state.internals.CachingWindowStore.init(
> CachingWindowStore.java:65)
>
>         at
> org.apache.kafka.streams.processor.internals.AbstractTask.
> initializeStateStores(AbstractTask.java:86)
>
>         at
> org.apache.kafka.streams.processor.internals.StreamTask.<init>(StreamTask.
> java:120)
>
>         at
> org.apache.kafka.streams.processor.internals.
> StreamThread.createStreamTask(StreamThread.java:794)
>
>         at
> org.apache.kafka.streams.processor.internals.StreamThread$TaskCreator.
> createTask(StreamThread.java:1222)
>
>         at
> org.apache.kafka.streams.processor.internals.StreamThread$
> AbstractTaskCreator.retryWithBackoff(StreamThread.java:1195)
>
>         at
> org.apache.kafka.streams.processor.internals.StreamThread.addStreamTasks(
> StreamThread.java:897)
>
>         at
> org.apache.kafka.streams.processor.internals.StreamThread.access$500(
> StreamThread.java:71)
>
>         at
> org.apache.kafka.streams.processor.internals.StreamThread$1.
> onPartitionsAssigned(StreamThread.java:240)
>
>         at
> org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.
> onJoinComplete(ConsumerCoordinator.java:230)
>
>         at
> org.apache.kafka.clients.consumer.internals.AbstractCoordinator.
> joinGroupIfNeeded(AbstractCoordinator.java:314)
>
>         at
> org.apache.kafka.clients.consumer.internals.AbstractCoordinator.
> ensureActiveGroup(AbstractCoordinator.java:278)
>
>         at
> org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(
> ConsumerCoordinator.java:261)
>
>         at
> org.apache.kafka.clients.consumer.KafkaConsumer.
> pollOnce(KafkaConsumer.java:1039)
>
>         at
> org.apache.kafka.clients.consumer.KafkaConsumer.poll(
> KafkaConsumer.java:1004)
>
>         at
> org.apache.kafka.streams.processor.internals.StreamThread.runLoop(
> StreamThread.java:570)
>
>         at
> org.apache.kafka.streams.processor.internals.
> StreamThread.run(StreamThread.java:359)
>
> Caused by: org.rocksdb.RocksDBException: IO error: lock
> /mnt/RtDetailBreakoutProcessor/RtDetailBreakoutProcessor/0_1/
> table_stream/table_stream-201701011700/LOCK:
> No locks available
>
>         at org.rocksdb.RocksDB.open(Native Method)
>
>         at org.rocksdb.RocksDB.open(RocksDB.java:184)
>
>         at
> org.apache.kafka.streams.state.internals.RocksDBStore.
> openDB(RocksDBStore.java:180)
>



-- 
-- Guozhang